prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
| tm.assert_series_equal(result, ts) | pandas._testing.assert_series_equal |
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime
import random
import sys
from sklearn.model_selection import ParameterSampler
from scipy.stats import randint as sp_randint
from scipy.stats import uniform
from functions import (
under_over_sampler,
classifier_train,
classifier_train_manual,
make_generic_df,
get_xy_from_df,
plot_precision_recall_vs_threshold,
plot_precision_vs_recall,
)
from classification_methods import (
random_forest_classifier,
# knn_classifier,
# logistic_regression,
# sgd_classifier,
# ridge_classifier,
# svm_classifier,
# gaussian_nb_classifier,
xgboost_classifier,
)
# stop warnings from sklearn
# https://stackoverflow.com/questions/32612180/eliminating-warnings-from-scikit-learn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# to profile script for memory usage, use:
# /usr/bin/time -f "mem=%K RSS=%M elapsed=%E cpu.sys=%S .user=%U" python random_search_run.py
# from https://unix.stackexchange.com/questions/375889/unix-command-to-tell-how-much-ram-was-used-during-program-runtime
#############################################################################
# RANDOM SEARCH PARAMETERS
# fill these out to set parameters for the random search
# set a seed for the parameter sampler
sampler_seed = random.randint(0, 2 ** 16)
no_iterations = 30000
# create list of tools that we want to look over
# these are only the tools that we know we have wear-failures [57, 54, 32, 36, 22, 8, 2]
# tool_list_all = [57, 54, 32, 36, 22, 8, 2]
tool_list_all = [54]
# tool_list_some = [57, 32, 22, 8, 2, 36]
tool_list_some = []
# other parameters
scaler_methods = ["standard", "min_max"]
imbalance_ratios = [0.1,0.5,0.8,1]
average_across_indices = [True,False]
# list of classifiers to test
classifier_list_all = [
random_forest_classifier,
# knn_classifier,
# logistic_regression,
# sgd_classifier,
# ridge_classifier,
# svm_classifier,
# gaussian_nb_classifier,
xgboost_classifier,
]
over_under_sampling_methods = [
"random_over",
"random_under",
"random_under_bootstrap",
"smote",
"adasyn",
None,
]
# no cut indices past 9 that are valid
index_list = [
list(range(0, 10)),
list(range(1, 10)),
list(range(1, 9)),
list(range(1, 8)),
list(range(2, 8)),
list(range(3, 7)),
list(range(2, 9)),
list(range(2, 10)),
]
#############################################################################
# test and train folds
# failures for tool 54 on following dates:
# 2018-11-15
# 2019-01-28
# 2019-01-29
# 2019-01-30
# 2019-02-04
# 2019-02-07
# 2019-02-08
# 2019-09-11 - These are resampled into pickle files (in case that matters)
# 2019-11-27
# 2019-01-23 - These are from January data without speed
# update 8/6/2020: does not look like we use the 'test_fold'
# therefore, I have divided the dates into the other three folds
test_fold = [
"2018-10-23",
"2018-11-15", # failures
"2018-11-16",
"2018-11-19",
"2019-09-11", # failures
"2019-09-13",
]
train_fold_1 = [
"2018-11-21",
"2019-01-25",
"2019-01-28", # failures
"2019-11-27", # failures
"2019-01-23", # failures, from Jan without speed
"2019-05-03",
"2019-09-11", # failures
"2019-09-13",
]
train_fold_2 = [
"2019-01-29", # failures
"2019-01-30", # failures
"2019-02-01",
"2019-02-08", # failures
"2019-09-10",
"2019-09-12",
"2018-11-20",
"2019-02-11",
"2019-01-24", # i forgot this one earlier
"2019-05-04",
"2018-11-16",
"2018-11-19",
]
train_fold_3 = [
"2019-02-04", # failures
"2019-02-05",
"2019-02-07", # failures
"2019-05-06",
"2019-01-22", # from Jan without speed
"2018-10-23",
"2018-11-15", # failures
]
train_folds = [train_fold_1, train_fold_2, train_fold_3]
train_dates_all = [date for sublist in train_folds for date in sublist]
#############################################################################
# start by loading the csv with the features
# file_folder = Path(
# "/home/tim/Documents/Checkfluid-Project/data/processed/"
# "_tables/low_levels_labels_created_2020-03-11"
# )
# for HPC
file_folder = Path(
"/home/tvhahn/projects/def-mechefsk/tvhahn/_tables/low_levels_labels_created_2020-03-11/"
)
file = file_folder / "low_level_labels_created_2020.03.11_v3_updated_2020.08.06.csv"
df = | pd.read_csv(file) | pandas.read_csv |
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv('data.csv')
# print("Number data is null: ", data.isnull().sum())
# print("Description: ", data.describe())
# print("Types: ", data.dtypes)
# print("Nunique: ", data.nunique())
df = | pd.DataFrame(data) | pandas.DataFrame |
from numpy.core.fromnumeric import var
import pytest
import pandas as pd
import numpy as np
from dowhy import CausalModel
class TestIDIdentifier(object):
def test_1(self):
treatment = "T"
outcome = "Y"
causal_graph = "digraph{T->Y;}"
columns = list(treatment) + list(outcome)
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import os
import json
import pytest
import numpy as np
import pandas as pd
from sklearn import datasets
import lightgbm as lgb
import matplotlib as mpl
import mlflow
import mlflow.lightgbm
mpl.use('Agg')
client = mlflow.tracking.MlflowClient()
def get_latest_run():
return client.get_run(client.list_run_infos(experiment_id='0')[0].run_id)
@pytest.fixture(scope="session")
def bst_params():
return {
'objective': 'multiclass',
'num_class': 3,
}
@pytest.fixture(scope="session")
def train_set():
iris = datasets.load_iris()
X = | pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2]) | pandas.DataFrame |
import logging
import os
import csv
import pandas as pd
import src.helper as helper
import src.business as business
def process_csv_in_chunks(**args) -> tuple:
filename = args.get('filename')
config = args.get('config')
delimiter = args.get('delimiter')
with pd.read_csv(filename,
delimiter=delimiter,
header=0,
dtype=object,
na_values='',
na_filter=False,
engine='c',
chunksize=config.CHUNK_SIZE) as reader:
for indx, data_frame in enumerate(reader):
args['chunk_index'] = indx
args['data_frame'] = data_frame
args = helper.middle_logic(business.process_dataframe(), **args)
if args.get('critical_error'):
return False, args
# DEBUG data_frame.info()
return True, args
def write_dataframe_to_file(**args) -> tuple:
data_frame = args.get('data_frame')
config = args.get('config')
header_record = args.get('header_record')
chunk_index = args.get('chunk_index')
is_initial_write = chunk_index == 0
destination_filename = args.get('destination_filename')
filepath_list = os.path.split(destination_filename)
logging.debug('count of records in dataframe: ' + str(data_frame.shape[0]))
with open(destination_filename, "w") as openfile:
data_frame.to_csv(openfile,
index=False,
mode='a',
sep='|',
header=is_initial_write,
line_terminator='\n',
columns=header_record,
quoting=csv.QUOTE_NONE)
logging.info('records written to {}: {}'.format(
filepath_list[1],
config.CHUNK_SIZE * (chunk_index + 1)))
return True, args
def trim_string_values(**args) -> tuple:
data_frame = args.get('data_frame')
df_obj = data_frame.select_dtypes(['object'])
data_frame[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
return True, args
def is_first_dataframe(**args) -> tuple:
chunk_index = args.get('chunk_index')
return chunk_index == 0, args
def create_header_record(**args) -> tuple:
missing_columns = args.get('missing_columns')
data_frame = args.get('data_frame')
header = [column for column in data_frame.columns if column not in missing_columns]
logging.info("columns to be imported: " + str(header))
args['header_record'] = header
return True, args
def get_list_of_date_columns(**args) -> tuple:
header_record = args.get('header_record')
destination_schema = args.get('destination_schema')
result = list()
for column in header_record:
if destination_schema[column]['DATA_TYPE'] == 'date':
result.append(column)
args['date_columns'] = result
return True, args
def check_date_format_set_if_date_fields_present(**args) -> tuple:
date_columns = args.get('date_columns')
day_first = args.get('day_first')
if len(date_columns) > 0 and day_first is None:
logging.critical('date format not set')
args['critical_error'] = True
return False, args
return True, args
def format_numeric_values(**args) -> tuple:
data_frame = args.get('data_frame')
header_record = args.get('header_record')
destination_schema = args.get('destination_schema')
for column_name in header_record:
if destination_schema[column_name]['DATA_TYPE'] == 'numeric':
data_frame[column_name] = pd.to_numeric(data_frame[column_name])
# TODO https://stackoverflow.com/a/62546734/14205069 for better solution
return True, args
def convert_dates(**args) -> tuple:
data_frame = args.get('data_frame')
header_record = args.get('header_record')
destination_schema = args.get('destination_schema')
day_first = args.get('day_first')
for column_name in header_record:
if destination_schema[column_name]['DATA_TYPE'][0:4] == 'date':
try:
data_frame[column_name] = | pd.to_datetime(data_frame[column_name], dayfirst=day_first) | pandas.to_datetime |
""" Veracity Data Fabric API
"""
from typing import Any, AnyStr, Mapping, Sequence
from urllib.error import HTTPError
import pandas as pd
from azure.storage.blob.aio import ContainerClient
from .base import ApiBase
class DataFabricError(RuntimeError):
pass
class DataFabricAPI(ApiBase):
""" Access to the data fabric endpoints (/datafabric) in the Veracity API.
All web calls are async using aiohttp. Returns web responses exactly as
received, usually JSON.
Arguments:
credential (veracity.Credential): Provides oauth access tokens for the
API (the user has to log in to retrieve these unless your client
application has permissions to use the service.)
subscription_key (str): Your application's API subscription key. Gets
sent in th Ocp-Apim-Subscription-Key header.
version (str): Not currently used.
"""
API_ROOT = "https://api.veracity.com/veracity/datafabric"
def __init__(self, credential, subscription_key, version=None, **kwargs):
super().__init__(credential, subscription_key, scope=kwargs.pop('scope', 'veracity_datafabric'), **kwargs)
self._url = f"{DataFabricAPI.API_ROOT}/data/api/1"
self.sas_cache = {}
self.access_cache = {}
@property
def url(self):
return self._url
# APPLICATIONS.
async def get_current_application(self):
url = f'{self._url}/application'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
async def get_application(self, applicationId):
url = f'{self._url}/application/{applicationId}'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
async def add_application(self, *args, **kwargs):
raise NotImplementedError()
async def update_application_role(self, applicationId, role):
url = f'{self._url}/application/{applicationId}?role={role}'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
# GROUPS.
async def get_groups(self):
raise NotImplementedError()
async def add_group(self, *args, **kwargs):
raise NotImplementedError()
async def get_group(self, groupId):
raise NotImplementedError()
async def update_group(self, groupId, *args, **kwargs):
raise NotImplementedError()
async def delete_group(self, groupId):
raise NotImplementedError()
# KEY TEMPLATES.
async def get_keytemplates(self):
url = f'{self._url}/keytemplates'
resp = await self.session.get(url)
data = await resp.json()
if resp.status != 200:
raise HTTPError(url, resp.status, data, resp.headers, None)
return data
# LEDGER.
async def get_ledger(self, containerId: AnyStr) -> pd.DataFrame:
url = f'{self._url}/resource/{containerId}/ledger'
resp = await self.session.get(url)
data = await resp.json()
if resp.status == 200:
df = pd.DataFrame(data)
df['dateOfEvent'] = | pd.to_datetime(df['dateOfEvent'], format="%Y-%m-%dT%H:%M:%SZ") | pandas.to_datetime |
import pandas as pd
import cv2
import pygame
import numpy as np
from movement_detector.detectors import AbstractMovementDetector
class Interface:
"""
This class displays the video, overlays metadata, and enables user-control.
"""
def __init__(self, detector: AbstractMovementDetector):
self.detector = detector
self._play_video = False
self._frame_index = 0
self._playback_frame_rate = self.detector.video.frame_rate
self._player = pygame.display.set_mode(
self.detector.video.frame_shape[1::-1],
pygame.RESIZABLE
)
self._clock = pygame.time.Clock()
self._space_pressed = False
self._key_repeat_buffer = 600
def display(self, stop_keys=('N', 'P', 27)):
vid_name = self.detector.video.vid_name
self._play_video = False
self._frame_index = 0
time_since_last_frame = 0
quit_ = False
keys = None
command_text = ''
command_print_count = 0
command_print_max = max(self._key_repeat_buffer, 10)
keys_pressed = []
time_since_key_press = 0
while True:
tick = self._clock.tick()
if self._frame_index == len(self.detector.video):
self._play_video = False
else:
frame = self._build_frame(action_text=command_text)
if command_text != '':
command_print_count += 1
if command_print_count == command_print_max:
command_text = ''
command_print_count = 0
pygame.display.set_caption(
f'{vid_name} - Frame {self._frame_index + 1}'
)
pygame.surfarray.blit_array(self._player, frame)
pygame.display.update()
keys_pressed = pygame.key.get_pressed()
if any(keys_pressed):
if (time_since_key_press == 0
or time_since_key_press >= self._key_repeat_buffer):
new_command_text = self._parse_command(keys=keys_pressed)
time_since_key_press += tick
if new_command_text != '':
command_text = new_command_text
else:
time_since_key_press = 0
if self._space_pressed:
self._space_pressed = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit_ = True
if quit_:
break
if self._play_video:
time_since_last_frame += tick
if time_since_last_frame >= 1/self._playback_frame_rate:
self._frame_index += 1
time_since_last_frame = 0
else:
time_since_last_frame = 0
return keys_pressed
def _build_frame(self, action_text=''):
frame = self.detector.video[self._frame_index]
meta_data = self.detector.meta(
start=self._frame_index,
stop=self._frame_index + 1
)
self._add_moving_text(frame=frame, meta_data=meta_data)
self._add_outlier_text(frame=frame, meta_data=meta_data)
self._add_frame_rate_text(frame=frame)
self._add_action_text(frame=frame, action_text=action_text)
frame = np.flipud(np.rot90(frame))
# frame = pygame.surfarray.make_surface(frame)
return frame
@staticmethod
def _add_moving_text(frame, meta_data):
if | pd.isna(meta_data['moving'].iloc[0]) | pandas.isna |
from datetime import date as dt
import numpy as np
import pandas as pd
import pytest
import talib
import os
from finance_tools_py.simulation import Simulation
from finance_tools_py.simulation.callbacks import talib as cb_talib
from finance_tools_py.simulation import callbacks
@pytest.fixture
def init_global_data():
pytest.global_code = '000001'
pytest.global_data = pd.DataFrame({
'code': [pytest.global_code for x in range(1998, 2020)],
'date': [dt(y, 1, 1) for y in range(1998, 2020)],
'close':
np.random.random((len(list(range(1998, 2020))), )),
'high':
np.random.random((len(list(range(1998, 2020))), )),
'low':
np.random.random((len(list(range(1998, 2020))), )),
})
@pytest.fixture
def mock_data():
pytest.mock_code = '600036'
if "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true":
pytest.mock_data = | pd.read_csv('tests/data/600036.csv', index_col=None) | pandas.read_csv |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Invalid comparison between dtype=category and str"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = | Series([True, False]) | pandas.Series |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.core.accessor import AccessorProperty
import pandas.plotting._core as gfx
def torque_column_labels():
return [f'torque_{i}' for i in range(361)]
class RevolutionPlotMethods(gfx.FramePlotMethods):
polar_angles = np.arange(0, 361) / (180 / np.pi)
torque_columns = torque_column_labels()
def polar(self, *args, **kwargs):
ax = plt.subplot(111, projection='polar')
torque = self._data[self.torque_columns].mean()
ax.plot(self.polar_angles, torque, *args, **kwargs)
ax.set_theta_offset(np.pi/2)
# ax.set_theta_direction(-1)
xticks_num = 8
xticks = np.arange(0, 2*np.pi, 2 * np.pi / xticks_num)
ax.set_xticks(xticks)
rad_to_label = lambda i: '{}°'.format(int(i / (2 * np.pi) * 360) % 180)
ax.set_xticklabels([rad_to_label(i) for i in xticks])
ax.set_yticklabels([])
return ax
class RevolutionDataFrame(pd.DataFrame):
@property
def _constructor(self):
return RevolutionDataFrame
def compute_min_max_angles(self):
# @TODO this method is quite memory inefficient. Row by row calculation is better
torque_columns = torque_column_labels()
torque_T = self.loc[:, torque_columns].transpose().reset_index(drop=True)
left_max_angle = torque_T.iloc[:180].idxmax()
right_max_angle = torque_T.iloc[180:].idxmax() - 180
left_min_angle = | pd.concat([torque_T.iloc[:135], torque_T.iloc[315:]]) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 21:24:44 2020
@author: omars
"""
#%% Libraries
import pickle
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from params import (target_col, date_col, region_col, training_cutoff,
df_path, default_path, nmin, restriction_dict, region_exceptions_dict)
import os
import warnings
warnings.filterwarnings("ignore")
#%% Helper Functions
def save_model(model,
filename):
file_pi = open(filename, 'wb')
pickle.dump(model, file_pi)
def load_model(filename):
filehandler = open(filename, 'rb')
return(pickle.load(filehandler))
def load_data(file=df_path,
target=target_col,
date=date_col,
region=region_col,
training_cutoff=training_cutoff,
validation_cutoff=None,
nmin=nmin,
restriction_dict=restriction_dict[region_col],
region_exceptions=region_exceptions_dict[region_col],
default_path=default_path,
add_countries=False):
if file is None:
df = get_public_data(default_path)
else:
df = pd.read_csv(file)
df.columns = map(str.lower, df.columns)
# delete excepctions
if not (region_exceptions is None):
df = df[~df[region].isin(region_exceptions)].copy()
df = df[df[target] >= nmin[region]]
df.sort_values(by=[region, date], inplace=True)
try:
df["cases_nom"] = df["cases"] / df["population"]
df["deaths_nom"] = df["deaths"] / df["population"]
except KeyError:
pass
df["cases_pct3"] = df.groupby(region)["cases"].pct_change(3).values
df["cases_pct5"] = df.groupby(region)["cases"].pct_change(5).values
try:
df[date] = df[date].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
except:
df[date] = df[date].apply(lambda x: datetime.strptime(x, '%m/%d/%Y'))
df = df.sort_values(by=[region, date])
if (region == "state") & add_countries:
data = pd.read_csv('https://covid.ourworldindata.org/data/owid-covid-data.csv')
data.rename(columns={'location': region_col, 'total_cases': 'cases', 'total_tests': 'people_tested', 'total_deaths': 'deaths'}, inplace=True)
data = data.loc[:, [region_col, 'date', 'cases', 'deaths', 'people_tested']]
#data = data[~data.cases.isna()]
data['cases'] = data.groupby('state')['cases'].fillna(method='ffill')
data['deaths'] = data.groupby('state')['deaths'].fillna(method='ffill')
data[date] = pd.to_datetime(df[date])
df.rename(columns={'state': region_col}, inplace=True)
df = df.append(data)
# restrict to a subset of obervations
if not (restriction_dict is None):
masks = []
for col, values in restriction_dict.items():
try:
masks.append(df[col].isin(values))
except:
pass
if masks:
mask_ = masks.pop(0)
for other_mask in masks:
mask_ = (mask_ | other_mask)
df = df[mask_].copy()
df_train = df[df[date] <= training_cutoff]
print("Training set contains {} {}.".format(df[region].nunique(), region))
if validation_cutoff is None:
df_test = df[df[date] > training_cutoff]
df_val = df.copy()
else:
df_test = df[[a and b for a, b in zip(df[date] > training_cutoff, df[date] <= validation_cutoff)]]
df_val = df[df[date] <= validation_cutoff]
return(df, df_val, df_train, df_test)
def dict_to_df(output,
df_validation,
region_col=region_col,
date_col=date_col,
target_col=target_col):
models = list(output.keys())
regions = list(set(df_validation[region_col]))
dates = list(set(df_validation[date_col]))
predictions_rows = []
for region in regions:
for date in dates:
prediction = [region, date]
for model in models:
if region in output[model].keys():
try:
prediction.append(output[model][region].loc[date])
except:
prediction.append(np.nan)
else:
prediction.append(np.nan)
predictions_rows.append(prediction)
df_predictions = pd.DataFrame(predictions_rows, columns=[region_col, date_col] + models)
df_agg = df_predictions.merge(df_validation.loc[:, [region_col, date_col, target_col]], how='left', on=[region_col, date_col])
return df_agg
def mape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true))
def get_mapes(df,
models,
region_col='state',
target_col='cases'):
results = []
for region in set(df[region_col]):
df_sub = df[df[region_col] == region]
results.append([region] + [mape(df_sub[target_col], df_sub[model]) for model in models])
results.append(['Average'] + [mape(df[target_col], df[model]) for model in models])
return(pd.DataFrame(results, columns=[region_col] + ['MAPE_' + model for model in models]))
def get_public_data(path=df_path):
# Import the latest data using the raw data urls
meas_url = 'https://raw.githubusercontent.com/COVID19StatePolicy/SocialDistancing/master/data/USstatesCov19distancingpolicy.csv'
case_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
deaths_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv'
mob_url = 'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv'
measures = pd.read_csv(meas_url,
encoding="ISO-8859-1")
cases = pd.read_csv(case_url,
encoding='utf-8')
deaths = pd.read_csv(deaths_url,
encoding='utf-8')
mobility = pd.read_csv(mob_url,
encoding='utf-8')
#<NAME> University daily reports
last_available_date = (datetime.today() - timedelta(1)).strftime('%Y-%m-%d')
dates = pd.date_range(start='2020-04-12', end=datetime.today() - timedelta(3)).strftime('%m-%d-%Y').tolist()
daily_df = pd.DataFrame()
for date in dates:
daily_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/' + date + '.csv'
this_daily = pd.read_csv(daily_url,encoding='utf-8')
this_daily['date'] = date # fill the date column with the date from the file name
daily_df = daily_df.append(this_daily)
daily_df.drop(['Country_Region', 'Last_Update', 'Lat', 'Long_', 'UID', 'ISO3'], axis=1, inplace=True)
daily_df.columns = daily_df.columns.str.replace(
'Province_State', 'state').str.replace(
'Confirmed','cases').str.replace(
'Deaths', 'deaths')
daily_df['date'] = | pd.to_datetime(daily_df['date'], format="%m-%d-%Y") | pandas.to_datetime |
import functools
import itertools
import warnings
from collections import OrderedDict
import cupy
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal
import cudf
import cudf._lib as libcudf
from cudf._lib.nvtx import annotate
from cudf._lib.scalar import Scalar
from cudf.core import column
from cudf.core.column import as_column, build_categorical_column
from cudf.utils.dtypes import (
is_categorical_dtype,
is_datetime_dtype,
is_numerical_dtype,
is_scalar,
is_string_dtype,
min_scalar_type,
)
class Frame(libcudf.table.Table):
"""
Frame: A collection of Column objects with an optional index.
Parameters
----------
data : OrderedColumnDict
An OrderedColumnDict mapping column names to Columns
index : Table
A Frame representing the (optional) index columns.
"""
@classmethod
def _from_table(cls, table):
return cls(table._data, index=table._index)
@classmethod
@annotate("CONCAT", color="orange", domain="cudf_python")
def _concat(cls, objs, axis=0, ignore_index=False):
# shallow-copy the input DFs in case the same DF instance
# is concatenated with itself
objs = [f.copy(deep=False) for f in objs]
from cudf.core.index import as_index
from cudf.core.column.column import column_empty
from cudf.core.column.column import build_categorical_column
# Create a dictionary of the common, non-null columns
def get_non_null_cols_and_dtypes(col_idxs, list_of_columns):
# A mapping of {idx: np.dtype}
dtypes = dict()
# A mapping of {idx: [...columns]}, where `[...columns]`
# is a list of columns with at least one valid value for each
# column name across all input dataframes
non_null_columns = dict()
for idx in col_idxs:
for cols in list_of_columns:
# Skip columns not in this frame
if idx >= len(cols) or cols[idx] is None:
continue
# Store the first dtype we find for a column, even if it's
# all-null. This ensures we always have at least one dtype
# for each name. This dtype will be overwritten later if a
# non-null Column with the same name is found.
if idx not in dtypes:
dtypes[idx] = cols[idx].dtype
if cols[idx].valid_count > 0:
if idx not in non_null_columns:
non_null_columns[idx] = [cols[idx]]
else:
non_null_columns[idx].append(cols[idx])
return non_null_columns, dtypes
def find_common_dtypes_and_categories(non_null_columns, dtypes):
# A mapping of {idx: categories}, where `categories` is a
# column of all the unique categorical values from each
# categorical column across all input dataframes
categories = dict()
for idx, cols in non_null_columns.items():
# default to the first non-null dtype
dtypes[idx] = cols[0].dtype
# If all the non-null dtypes are int/float, find a common dtype
if all(is_numerical_dtype(col.dtype) for col in cols):
dtypes[idx] = np.find_common_type(
[col.dtype for col in cols], []
)
# If all categorical dtypes, combine the categories
elif all(is_categorical_dtype(col.dtype) for col in cols):
# Combine and de-dupe the categories
categories[idx] = (
cudf.concat([col.cat().categories for col in cols])
.to_series()
.drop_duplicates()
._column
)
# Set the column dtype to the codes' dtype. The categories
# will be re-assigned at the end
dtypes[idx] = min_scalar_type(len(categories[idx]))
# Otherwise raise an error if columns have different dtypes
elif not all(
is_dtype_equal(c.dtype, dtypes[idx]) for c in cols
):
raise ValueError("All columns must be the same type")
return categories
def cast_cols_to_common_dtypes(
col_idxs, list_of_columns, dtypes, categories
):
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
for idx in col_idxs:
dtype = dtypes[idx]
for cols in list_of_columns:
# If column not in this df, fill with an all-null column
if idx >= len(cols) or cols[idx] is None:
n = len(next(filter(lambda x: x is not None, cols)))
cols[idx] = column_empty(n, dtype, masked=True)
else:
# If column is categorical, rebase the codes with the
# combined categories, and cast the new codes to the
# min-scalar-sized dtype
if idx in categories:
cols[idx] = (
cols[idx]
.cat()
._set_categories(
categories[idx], is_unique=True
)
.codes
)
cols[idx] = cols[idx].astype(dtype)
def reassign_categories(categories, cols, col_idxs):
for name, idx in zip(cols, col_idxs):
if idx in categories:
cols[name] = build_categorical_column(
categories=categories[idx],
codes=as_column(
cols[name].base_data, dtype=cols[name].dtype
),
mask=cols[name].base_mask,
offset=cols[name].offset,
size=cols[name].size,
)
# Get a list of the unique table column names
names = [name for f in objs for name in f._column_names]
names = list(OrderedDict.fromkeys(names).keys())
# Combine the index and table columns for each Frame into a
# list of [...index_cols, ...table_cols]. If a table is
# missing a column, that list will have None in the slot instead
columns = [
([] if ignore_index else list(f._index._data.columns))
+ [f._data[name] if name in f._data else None for name in names]
for i, f in enumerate(objs)
]
# Get a list of the combined index and table column indices
indices = list(range(functools.reduce(max, map(len, columns))))
# The position of the first table colum in each
# combined index + table columns list
first_data_column_position = len(indices) - len(names)
# Get the non-null columns and their dtypes
non_null_cols, dtypes = get_non_null_cols_and_dtypes(indices, columns)
# Infer common dtypes between numeric columns
# and combine CategoricalColumn categories
categories = find_common_dtypes_and_categories(non_null_cols, dtypes)
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
cast_cols_to_common_dtypes(indices, columns, dtypes, categories)
# Construct input tables with the index and data columns in the same
# order. This strips the given index/column names and replaces the
# names with their integer positions in the `cols` list
tables = []
for i, cols in enumerate(columns):
table_cols = cols[first_data_column_position:]
table_names = indices[first_data_column_position:]
table = cls(data=dict(zip(table_names, table_cols)))
if 1 == first_data_column_position:
table._index = as_index(cols[0])
elif first_data_column_position > 1:
index_cols = cols[:first_data_column_position]
index_names = indices[:first_data_column_position]
table._index = cls(data=dict(zip(index_names, index_cols)))
tables.append(table)
# Concatenate the Tables
out = cls._from_table(
libcudf.concat.concat_tables(tables, ignore_index=ignore_index)
)
# Reassign the categories for any categorical table cols
reassign_categories(
categories, out._data, indices[first_data_column_position:]
)
# Reassign the categories for any categorical index cols
reassign_categories(
categories, out._index._data, indices[:first_data_column_position]
)
# Reassign index and column names
if isinstance(objs[0].columns, pd.MultiIndex):
out.columns = objs[0].columns
else:
out.columns = names
out._index.name = objs[0]._index.name
out._index.names = objs[0]._index.names
return out
def _get_columns_by_label(self, labels, downcast=False):
"""
Returns columns of the Frame specified by `labels`
If downcast is True, try and downcast from a DataFrame to a Series
"""
new_data = self._data.get_by_label(labels)
if downcast:
if is_scalar(labels):
nlevels = 1
elif isinstance(labels, tuple):
nlevels = len(labels)
if self._data.multiindex is False or nlevels == self._data.nlevels:
return self._constructor_sliced(
new_data, name=labels, index=self.index
)
return self._constructor(
new_data, columns=new_data.to_pandas_index(), index=self.index
)
def _get_columns_by_index(self, indices):
"""
Returns columns of the Frame specified by `labels`
"""
data = self._data.get_by_index(indices)
return self._constructor(
data, columns=data.to_pandas_index(), index=self.index
)
def _gather(self, gather_map, keep_index=True):
if not | pd.api.types.is_integer_dtype(gather_map.dtype) | pandas.api.types.is_integer_dtype |
import pytest
import pandas as pd
import geopandas as gp
from shapely.geometry import Polygon, Point, LinearRing, LineString, MultiLineString, MultiPolygon, MultiPoint
from types import GeneratorType
from pam.samplers import attributes, basic, facility, spatial
@pytest.fixture
def michael():
return {
'age': 16,
'agebin': 'younger',
'gender': 'male'
}
@pytest.fixture
def kasia():
return {
'age': 96,
'agebin': 'older',
'gender': 'female'
}
@pytest.fixture
def fred():
return {
'age': -3,
'agebin': '',
'gender': 1
}
@pytest.fixture
def bins():
return {
(0,50): 'younger',
(51,100): 'older'
}
@pytest.fixture
def cat_joint_distribution():
mapping = ['agebin', 'gender']
distribution = {
'younger': {'male': 0, 'female': 0},
'older': {'male': 0, 'female': 1}
}
return mapping, distribution
def test_apply_bin_integer_transformer_to_michael(michael, bins):
assert attributes.bin_integer_transformer(michael, 'age', bins) == 'younger'
def test_apply_bin_integer_transformer_with_missing_bin(fred, bins):
assert attributes.bin_integer_transformer(fred, 'age', bins) is None
def test_apply_discrete_joint_distribution_sampler_to_michael(michael, cat_joint_distribution):
mapping, dist = cat_joint_distribution
assert attributes.discrete_joint_distribution_sampler(michael, mapping, dist) == False
def test_applt_discrete_joint_distribution_sampler_to_kasia(kasia, cat_joint_distribution):
mapping, dist = cat_joint_distribution
assert attributes.discrete_joint_distribution_sampler(kasia, mapping, dist) == True
def test_applt_discrete_joint_distribution_sampler_to_fred_carefully(fred, cat_joint_distribution):
mapping, dist = cat_joint_distribution
with pytest.raises(KeyError):
attributes.discrete_joint_distribution_sampler(fred, mapping, dist, careful=True)
def test_applt_discrete_joint_distribution_sampler_to_fred_not_carefully(fred, cat_joint_distribution):
mapping, dist = cat_joint_distribution
assert attributes.discrete_joint_distribution_sampler(fred, mapping, dist) == False
testdata = [
(0, 1.5, 0),
(10, 1., 10),
(0, 0.0, 0),
(10, 2., 20),
(10, 1.5, 15),
(10, .5, 5),
]
@pytest.mark.parametrize("freq,sample,result", testdata)
def test_freq_sampler_determined(freq, sample, result):
assert basic.freq_sample(freq, sample) == result
testdata = [
(1, 1.5, 1, 2),
(1, .5, 0, 1),
(1, 0.0001, 0, 1),
(1, 1.0001, 1, 2),
]
@pytest.mark.parametrize("freq,sample,lower,upper", testdata)
def test_freq_sampler_random_round(freq, sample, lower, upper):
assert basic.freq_sample(freq, sample) in [lower, upper]
def test_sample_point_from_geoseries_of_polygons():
df = pd.DataFrame({1:[1,2,3], 2: [4,5,6]})
poly = Polygon(((0,0), (1,0), (1,1), (0,1)))
gdf = gp.GeoDataFrame(df, geometry=[poly]*3)
sampler = spatial.RandomPointSampler(gdf.geometry)
assert isinstance(sampler.sample_point_from_polygon(gdf.geometry[0]), Point)
def test_sample_point_from_geoseries_of_polygons_invalid():
df = pd.DataFrame({1:[1,2,3], 2: [4,5,6]})
poly = Polygon(((0,0), (1,0), (0,1), (1,1)))
gdf = gp.GeoDataFrame(df, geometry=[poly]*3)
sampler = spatial.RandomPointSampler(gdf.geometry)
assert isinstance(sampler.sample_point_from_polygon(gdf.geometry[0]), Point)
def test_sample_point_from_geoseries_of_polygons_no_area():
df = pd.DataFrame({1:[1,2,3], 2: [4,5,6]})
poly = Polygon(((0,0), (1,0), (0,0)))
gdf = gp.GeoDataFrame(df, geometry=[poly]*3)
sampler = spatial.RandomPointSampler(gdf.geometry, patience=0)
assert isinstance(sampler.sample_point_from_polygon(gdf.geometry[0]), Point)
def test_random_sample_point_from_multipolygon():
df = pd.DataFrame({1:[1,2,3], 2: [4,5,6]})
p1 = Polygon(((0,0), (1,0), (1,1), (0,1)))
p2 = Polygon(((10,10), (11,10), (11,11), (10,11)))
poly = MultiPolygon([p1, p2])
gdf = gp.GeoDataFrame(df, geometry=[poly]*3)
sampler = spatial.RandomPointSampler(gdf.geometry, patience=0)
assert isinstance(sampler.sample_point_from_multipolygon(gdf.geometry[0]), Point)
def test_random_sample_point_from_multilinestring():
df = pd.DataFrame({1:[1,2,3], 2: [4,5,6]})
p1 = LinearRing(((0,0), (1,0), (1,1), (0,1)))
p2 = LineString(((10,10), (11,10), (11,11), (10,11)))
poly = MultiLineString([p1, p2])
gdf = gp.GeoDataFrame(df, geometry=[poly]*3)
sampler = spatial.RandomPointSampler(gdf.geometry, patience=0)
assert isinstance(sampler.sample_point_from_multilinestring(gdf.geometry[0]), Point)
def test_random_sample_point_from_multipoint():
df = | pd.DataFrame({1:[1,2,3], 2: [4,5,6]}) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = | pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) | pandas.DataFrame |
"""Module with functions to read time series with observations from knmi.
The main function to obtain a time series are:
- get_knmi_timeseries_xy: obtain a knmi station based on the xy location
- get_knmi_timeseries_stn: obtain a knmi station based on the station number
There is some ugly code involved if you want to obtain precipitation data.
This code combines the data from the meteo_stations and the neerslagstations.
"""
import datetime as dt
import logging
import os
import re
import tempfile
from io import StringIO
import numpy as np
import pandas as pd
import requests
from .. import util
logger = logging.getLogger(__name__)
URL_DAILY_NEERSLAG = 'https://www.daggegevens.knmi.nl/klimatologie/monv/reeksen'
URL_DAILY_METEO = 'https://www.daggegevens.knmi.nl/klimatologie/daggegevens'
URL_HOURLY_METEO = 'https://www.daggegevens.knmi.nl/klimatologie/uurgegevens'
def get_stations(meteo_var='RH', use_precipitation_stn=True):
"""get knmi stations from json files according to variable.
Parameters
----------
meteo_var : str, optional
type of meteodata, by default 'RH'
use_precipitation_stn : bool, optional
if True a combination of neerslagstations and meteo stations are used.
If False only meteo stations are used to obtain precipitation data.
Default is True.
Returns
-------
pandas DataFrame with stations, names and coordinates (Lat/Lon & RD)
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
fname = "../data/knmi_meteostation.json"
stations = pd.read_json(os.path.join(dir_path, fname))
# in case we want precipitation we use both meteo and neerslagstations
if (meteo_var == "RH") and use_precipitation_stn:
fname = "../data/knmi_neerslagstation.json"
stations2 = pd.read_json(os.path.join(dir_path, fname))
stations2.index = stations2.index.astype(str) + '_neerslag_station'
stations = pd.concat([stations, stations2], axis=0)
if meteo_var == 'PG':
# in Ell wordt geen luchtdruk gemeten
stations.drop(377, inplace=True)
elif meteo_var == 'EV24':
# in Woensdrecht wordt geen verdamping gemeten
stations.drop(340, inplace=True)
return stations
def get_station_name(stn, stations):
"""Returns the station name from a KNMI station.
Modifies the station name in such a way that a valid url can be obtained.
Parameters
----------
stn : int
station number.
stations : pandas DataFrame
DataFrame with the station metadata.
Returns
-------
str
Name of the station.
"""
stn_name = stations.loc[stn, 'naam']
stn_name = stn_name.upper().replace(' ', '-')
stn_name = stn_name.replace('(', '').replace(')', '')
return stn_name
def get_nearest_stations_xy(x, y, meteo_var, use_precipitation_stn=True,
n=1, stations=None, ignore=None):
"""find the KNMI stations that measure 'variable' closest to the x, y
coordinates.
Parameters
----------
x : int or float
x coordinate in RD
y : int or float
x coordinate in RD
meteo_var : str
measurement variable e.g. 'RH' or 'EV24'
use_precipitation_stn : bool, optional
if True a combination of neerslagstations and meteo stations are used.
If False only meteo stations are used to obtain precipitation data.
Default is True.
n : int, optional
number of stations you want to return. The default is 1.
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
list
station numbers.
"""
if stations is None:
stations = get_stations(meteo_var=meteo_var,
use_precipitation_stn=use_precipitation_stn)
if ignore is not None:
stations.drop(ignore, inplace=True)
if stations.empty:
return None
distance = np.sqrt((stations.x - x)**2 + (stations.y - y)**2)
stns = distance.nsmallest(n).index.to_list()
return stns
def get_nearest_station_df(locations, xcol='x', ycol='y', stations=None,
use_precipitation_stn=True,
meteo_var="RH", ignore=None):
"""find the KNMI stations that measure 'meteo_var' closest to the
coordinates in 'locations'.
Parameters
----------
locations : pd.DataFrame
x and y coordinates
xcol : str
name of the column in the locations dataframe with the x values
ycol : str
name of the column in the locations dataframe with the y values
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
use_precipitation_stn : bool, optional
if True a combination of neerslagstations and meteo stations are used.
If False only meteo stations are used to obtain precipitation data.
Default is True.
meteo_var : str
measurement variable e.g. 'RH' or 'EV24'
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
stns : list
station numbers.
"""
if stations is None:
stations = get_stations(meteo_var=meteo_var,
use_precipitation_stn=use_precipitation_stn)
if ignore is not None:
stations.drop(ignore, inplace=True)
if stations.empty:
return None
xo = pd.to_numeric(locations[xcol])
xt = pd.to_numeric(stations.x)
yo = pd.to_numeric(locations[ycol])
yt = pd.to_numeric(stations.y)
xh, xi = np.meshgrid(xt, xo)
yh, yi = np.meshgrid(yt, yo)
distances = pd.DataFrame(np.sqrt((xh - xi) ** 2 + (yh - yi) ** 2),
index=locations.index,
columns=stations.index)
stns = distances.idxmin(axis=1).unique()
return stns
def get_nearest_station_grid(xmid, ymid, stations=None, meteo_var="RH",
use_precipitation_stn=True,
ignore=None):
"""find the KNMI stations that measure 'meteo_var' closest to all cells in
a grid.
Parameters
----------
xmid : np.array
x coördinates of the cell centers of your grid shape(ncol)
ymid : np.array
y coördinates of the cell centers of your grid shape(nrow)
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
meteo_var : str
measurement variable e.g. 'RH' or 'EV24'
use_precipitation_stn : bool, optional
if True a combination of neerslagstations and meteo stations are used.
If False only meteo stations are used to obtain precipitation data.
Default is True.
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
stns : list
station numbers.
Notes
-----
assumes you have a structured rectangular grid.
"""
mg = np.meshgrid(xmid, ymid)
locations = pd.DataFrame(data={'x': mg[0].ravel(),
'y': mg[1].ravel()})
stns = get_nearest_station_df(locations, xcol='x', ycol='y',
stations=stations,
use_precipitation_stn=use_precipitation_stn,
meteo_var=meteo_var,
ignore=ignore)
return stns
def _start_end_to_datetime(start, end):
"""convert start and endtime to datetime.
Parameters
----------
start : str, datetime, None
start time
end : str, datetime, None
start time
Returns
-------
start : pd.TimeStamp
start time
end : pd.TimeStamp
end time
"""
if start is None:
start = pd.Timestamp(pd.Timestamp.today().year - 1, 1, 1)
else:
start = pd.to_datetime(start)
if end is None:
end = pd.Timestamp.today() - pd.Timedelta(1, unit='D')
else:
end = | pd.to_datetime(end) | pandas.to_datetime |
import datetime
import math
import os
import time
import HPGe_Calibration as clb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import uncertainties
from lmfit.models import GaussianModel
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from scipy.optimize import curve_fit
today = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
print(today)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['text.latex.preamble'] = [
r'\usepackage{siunitx}',
r'\usepackage{isotope}',
r'\sisetup{detect-all}',
r'\usepackage{fourier}',
]
plt.rcParams['ps.usedistiller'] = 'xpdf'
plt.rcParams['ps.distiller.res'] = '16000'
energymatch = []
mystmatch = []
energymatchuncertainty = []
mystmatchuncertainty = []
nuclidematch = []
intensitymatch = []
chisss = []
formattednuclide = []
def overlap(point1, point2, uncert1, uncert2, sigma):
lower1 = point1 - uncert1 * sigma
upper1 = point1 + uncert1 * sigma
lower2 = point2 - uncert2 * sigma
upper2 = point2 + uncert2 * sigma
index1 = np.where((lower1 <= point2) and (upper1 >= point2) or (lower2 <= point1) and (upper2 >= point1))
if index1[0] == 0:
return index1[0]
def datamatch(measured, known, sigma, type, xraySigma):
for i in range(0, len(known)):
for k in range(0, len(measured)):
b = None
if i <= 329:
b = overlap(known.at[i, "Energy"], measured.at[k, "Mean"], known.at[i, "EnergyUncert"], measured.at[k, "MeanError"], sigma)
if i > 329:
b = overlap(known.at[i, "Energy"], measured.at[k, "Mean"], known.at[i, "EnergyUncert"], measured.at[k, "MeanError"], xraySigma)
if b == 0:
energymatch.append(known.at[i, "Energy"])
mystmatch.append(measured.at[k, "Mean"])
energymatchuncertainty.append(known.at[i, "EnergyUncert"])
mystmatchuncertainty.append(measured.at[k, "MeanError"])
nuclidematch.append(known.at[i, "Source"])
intensitymatch.append(measured.at[k, "Amplitude"])
chisss.append(measured.at[k, "ReducedChisq"])
num1 = known.at[i, "Source"].split('-')
try:
num2 = num1[2].split(' ')
formattednuclide.append(r'$\isotope[{}][{}]{{{}}}$ {} {}'.format(num2[0], num1[0], num1[1], num2[1], num2[2]))
except IndexError:
try:
formattednuclide.append(r'$\isotope[{}][{}]{{{}}}$'.format(num1[2], num1[0], num1[1]))
except IndexError:
formattednuclide.append(known.at[i, "Source"])
continue
match_frame = pd.DataFrame({'Accepted energy': energymatch, 'Accepted uncertainty': energymatchuncertainty,
'Measured energy': mystmatch, 'Measured uncertainty': mystmatchuncertainty,
'Nuclide': formattednuclide, 'Height': intensitymatch})
match_frame.to_csv('Matches/oldMatches/{}_{}.csv'.format(type, today))
match_frame.to_csv('Matches/{}.csv'.format(type))
with open('Latex/OldVersions/{}Match_{}.tex'.format(type, today), 'w') as texfile:
texfile.write('\\documentclass[a4paper,twoside]{article}\n')
texfile.write('\\usepackage[margin=0in]{geometry}\n')
texfile.write('\\usepackage{isotope}\n')
texfile.write('\\usepackage{ltxtable}\n')
texfile.write('\\usepackage{ltablex}\n')
texfile.write('\\usepackage{longtable}\n')
texfile.write('\\pagenumbering{gobble}\n')
texfile.write('\\begin{document}\n')
texfile.write(r'\ {\def\arraystretch{0.9}\tabcolsep=3pt')
texfile.write('\\\n')
texfile.write(r'\begin{tabularx}{\textwidth}{XXXXX}')
texfile.write('\\\n')
row_fields = ('$E_{Measured}$ (keV)', '$E_{Accepted}$ (keV)',
'Source', 'RI', '$\chi^2$')
texfile.write(' {} & {} & {} & {} & {} \\\\ \n'.format(row_fields[0], row_fields[1],
row_fields[2], row_fields[3],
row_fields[4]))
texfile.write('\hline ')
for i in range(0, len(energymatch)):
mystvals = uncertainties.ufloat(mystmatch[i], mystmatchuncertainty[i])
knownvals = uncertainties.ufloat(energymatch[i], energymatchuncertainty[i])
texfile.write(' ${:.2uL}$ & ${:.2uL}$ & {} & {:.2f} & {:.3f} \\\\ \n'.format(mystvals, knownvals,
formattednuclide[i],
intensitymatch[i], chisss[i]))
texfile.write('\hline')
texfile.write('\\end{tabularx}\n')
texfile.write('\\ \ }\n')
texfile.write('\\end{document}\n')
items = np.unique(nuclidematch)
match_count = pd.DataFrame(columns=['Isotope', 'Number'])
for item in items:
num = nuclidematch.count(item)
num1 = item.split('-')
try:
num2 = num1[2].split(' ')
isotope = r'$\isotope[{}][{}]{{{}}}$ {} {}'.format(num1[0], num2[0], num1[1], num2[1], num2[2])
except IndexError:
try:
isotope = r'$\isotope[{}][{}]{{{}}}$'.format(num1[0], num1[2], num1[1])
except IndexError:
isotope = item
pass
match_count = match_count.append({'Isotope': isotope, 'Number': num}, ignore_index=True)
with open('Latex/OldVersions/{}MatchList_{}.tex'.format(type, today), 'w') as texfile:
texfile.write('\\documentclass[a4paper,twoside]{article}\n')
texfile.write('\\usepackage[margin=0in]{geometry}\n')
texfile.write('\\usepackage{mathtools}\n')
texfile.write('\\usepackage[math]{cellspace}\n')
texfile.write('\\usepackage{isotope}\n')
texfile.write('\\usepackage{longtable}\n')
texfile.write('\\pagenumbering{gobble}\n')
texfile.write('\\cellspacetoplimit 4pt\n')
texfile.write('\\cellspacebottomlimit 4pt\n')
texfile.write('\n')
texfile.write(r'\setlength{\topmargin}{1in}')
texfile.write('\n')
texfile.write('\\begin{document}\n')
texfile.write(r'''\ {\def\arraystretch{1.2}\tabcolsep=3pt''')
texfile.write('\\ \n')
texfile.write('\\begin{tabular}[h!]{cc} \n')
texfile.write('\hline')
row_fields = ('Isotope', 'Hits')
texfile.write('\\ {} & {} \\\\ \n'.format(row_fields[0], row_fields[1]))
texfile.write('\hline ')
for i in range(0, len(match_count)):
texfile.write(' {} & {} \\\\ \n'.format(
match_count['Isotope'][i], match_count['Number'][i]))
texfile.write('\hline')
texfile.write('\\end{tabular}\n')
texfile.write('\\ }\n')
texfile.write('\\end{document}\n')
with open('Latex/{}Match.tex'.format(type), 'w') as texfile:
texfile.write('\\documentclass[a4paper,twoside]{article}\n')
texfile.write('\\usepackage[margin=0in]{geometry}\n')
texfile.write('\\usepackage{isotope}\n')
texfile.write('\\usepackage{ltxtable}\n')
texfile.write('\\usepackage{ltablex}\n')
texfile.write('\\usepackage{longtable}\n')
texfile.write('\\pagenumbering{gobble}\n')
texfile.write('\\begin{document}\n')
texfile.write(r'\ {\def\arraystretch{0.9}\tabcolsep=3pt')
texfile.write('\\\n')
texfile.write(r'\begin{tabularx}{\textwidth}{XXXXX}')
texfile.write('\\\n')
row_fields = ('$E_{Measured}$ (keV)', '$E_{Accepted}$ (keV)',
'Source', 'RI', '$\chi^2$')
texfile.write(' {} & {} & {} & {} & {} \\\\ \n'.format(row_fields[0], row_fields[1],
row_fields[2], row_fields[3],
row_fields[4]))
texfile.write('\hline ')
for i in range(0, len(energymatch)):
mystvals = uncertainties.ufloat(mystmatch[i], mystmatchuncertainty[i])
knownvals = uncertainties.ufloat(energymatch[i], energymatchuncertainty[i])
texfile.write(' ${:.2uL}$ & ${:.2uL}$ & {} & {:.2f} & {:.3f} \\\\ \n'.format(mystvals, knownvals,
formattednuclide[i],
intensitymatch[i], chisss[i]))
texfile.write('\hline')
texfile.write('\\end{tabularx}\n')
texfile.write('\\ \ }\n')
texfile.write('\\end{document}\n')
items = np.unique(nuclidematch)
match_count = pd.DataFrame(columns=['Isotope', 'Number'])
for item in items:
num = nuclidematch.count(item)
num1 = item.split('-')
try:
num2 = num1[2].split(' ')
isotope = r'$\isotope[{}][{}]{{{}}}$ {} {}'.format(num1[0], num2[0], num1[1], num2[1], num2[2])
except IndexError:
try:
isotope = r'$\isotope[{}][{}]{{{}}}$'.format(num1[0], num1[2], num1[1])
except IndexError:
isotope = item
pass
match_count = match_count.append({'Isotope': isotope, 'Number': num}, ignore_index=True)
with open('Latex/{}MatchList.tex'.format(type), 'w') as texfile:
texfile.write('\\documentclass[a4paper,twoside]{article}\n')
texfile.write('\\usepackage[margin=0in]{geometry}\n')
texfile.write('\\usepackage{mathtools}\n')
texfile.write('\\usepackage[math]{cellspace}\n')
texfile.write('\\usepackage{isotope}\n')
texfile.write('\\usepackage{longtable}\n')
texfile.write('\\pagenumbering{gobble}\n')
texfile.write('\\cellspacetoplimit 4pt\n')
texfile.write('\\cellspacebottomlimit 4pt\n')
texfile.write('\n')
texfile.write(r'\setlength{\topmargin}{1in}')
texfile.write('\n')
texfile.write('\\begin{document}\n')
texfile.write(r'''\ {\def\arraystretch{1.2}\tabcolsep=3pt''')
texfile.write('\\ \n')
texfile.write('\\begin{tabular}[h!]{|c|c|} \n')
texfile.write('\hline')
row_fields = ('Isotope', 'Hits')
texfile.write('\\ {} & {} \\\\ \n'.format(row_fields[0], row_fields[1]))
texfile.write('\hline \hline')
for i in range(0, len(match_count)):
texfile.write('\\ {} & {} \\\\ \n'.format(
match_count['Isotope'][i], match_count['Number'][i]))
texfile.write('\hline')
texfile.write('\\end{tabular}\n')
texfile.write('\\ }\n')
texfile.write('\\end{document}\n')
trinititePeaks = pd.read_csv("Peaks/ShieldedTrinititeFIN2.csv")
acceptedPeaks = pd.read_csv("Energies8.csv", names=["Source", "Energy", "EnergyUncert", "Prob", "ProbUncert", "Notes"])
acceptedPeaks.replace(r'^\s*$', np.nan, inplace=True, regex=True)
acceptedPeaks = acceptedPeaks[pd.notnull(acceptedPeaks['Energy'])]
acceptedPeaks = acceptedPeaks[ | pd.notnull(acceptedPeaks['EnergyUncert']) | pandas.notnull |
"""
# Contains a custom Engine for querying the PATSTAT installation on the Server I am using
# You may want to define a different engine according to your installation
"""
# Required libraries
import pandas as pd
import sqlalchemy
from sqlalchemy import create_engine
import tempfile
import pandas.core.common as com
from pandas.io.sql import SQLTable, pandasSQL_builder
import warnings
# Loading model parameters
import Parameters as param
class CustomEngineForPATSTAT:
"""
This class define an engine able to query PATSTAT and retrive the data via SQLalchemy
"""
def __init__(self, engine):
"""
Instantiation of the model
"""
self.engine = engine
print('---------------------------------------')
print('CustomEngineForPATSTAT instanciated.')
print('---------------------------------------')
def _Run_Engine_step_1(self, technology_classes_list, start_date, end_date):
"""
We retrieve the primary information about the patent, so as to filter them before querying again:
# 1. IDs
# 2. Technology class to select them (other technology classes will be with other queries)
# 3. Family id
# 4. Filling date
# 5. Number of patent citations at the DOCDB family level
"""
# Unpacking parameters
eng = self.engine
TABLE_PRIMARY_INFO = pd.DataFrame()
for technology_class in technology_classes_list:
print('-> Retrieving the patent ids corresponding to the technology class', technology_class,
'filled between',start_date ,'and', end_date )
t = self.read_sql_tmpfile(param.sql_query_PATENT_PRIMARY_INFO.format(technology_class, start_date, end_date), eng)
TABLE_PRIMARY_INFO = pd.concat([TABLE_PRIMARY_INFO, t])
return TABLE_PRIMARY_INFO
def _Run_Engine_step_2(self,
list_patent_ids,
technology_classes_list,
start_date,
end_date):
"""
Running the engine to retrive all necessary data from the PATSTAT Postgress database
"""
# Unpacking parameters
eng = self.engine
# Local variables
temp_SQL_table_1 = 'temporary_table_patent_ids'
temp_SQL_table_2 = 'docdb_family_ids'
# (1)
print('-> Creating a temporary table in the SQL database contaning the patent ids')
#t = tuple(TABLE_PATENT_IDS[param.VAR_APPLN_ID].unique().tolist())
t = tuple(list_patent_ids)
df = pd.DataFrame(t)
df.columns = [param.VAR_APPLN_ID]
self.create_temporary_table(df = df,
temporary_table_name = temp_SQL_table_1,
key = param.VAR_APPLN_ID,
engine = eng)
# (2)
print('-> Retrieving general information about the selected patents')
TABLE_MAIN_PATENT_INFOS = self.read_sql_tmpfile(param.sql_query_PATENT_MAIN_INFO, eng)
# (3)
print('-> Retrieving CPC technology classes of the selected patents')
TABLE_CPC = self.read_sql_tmpfile(param.sql_query_CPC_INFO, eng)
# (4)
print('-> Retrieving information about the patentees (individuals) of the selected patents')
TABLE_PATENTEES_INFO = self.read_sql_tmpfile(param.sql_query_PATENTEES_INFO, eng)
# (5)
print('-> Creating a temporary table in the SQL database containing the docdb_family ids')
df = TABLE_MAIN_PATENT_INFOS[[param.VAR_DOCDC_FAMILY_ID]]
df.drop_duplicates(inplace = True)
self.create_temporary_table(df = df,
temporary_table_name = temp_SQL_table_2,
key = param.VAR_DOCDC_FAMILY_ID,
engine = eng)
# (6)
print('-> Retrieving information about backward citations of the selected families')
TABLE_DOCBD_backwards_citations = self.read_sql_tmpfile(param.sql_query_DOCBD_backwards_citations, eng)
# (7)
print('-> Retrieving information about forward citations of the selected families')
TABLE_FORWARD_CITATIONS = self.read_sql_tmpfile(param.sql_query_FORWARD_CITATIONS, eng)
# Regrouping a bit the tables to simplify the output
TABLE_ALL_PATENTS_INFO = TABLE_MAIN_PATENT_INFOS.append([TABLE_CPC, TABLE_PATENTEES_INFO])
TABLE_ALL_PATENTS_INFO = pd.merge(TABLE_ALL_PATENTS_INFO,
TABLE_DOCBD_backwards_citations,
how = 'left',
left_on = param.VAR_DOCDC_FAMILY_ID,
right_on = param.VAR_DOCDC_FAMILY_ID)
return TABLE_ALL_PATENTS_INFO, TABLE_FORWARD_CITATIONS
def create_temporary_table(self, df, temporary_table_name, key, engine):
"""
Snippet to create a temporary table in the SQL database
Inpired from https://stackoverflow.com/questions/30867390/python-pandas-to-sql-how-to-create-a-table-with-a-primary-key
"""
# Local variables
eng = self.engine
with eng.connect() as conn, conn.begin():
pandas_engine = | pandasSQL_builder(conn) | pandas.io.sql.pandasSQL_builder |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
from sklearn.compose import ColumnTransformer
import sklearn.preprocessing as skp
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing
from shapash.utils.columntransformer_backend import get_feature_names, get_names, get_list_features_names
# TODO
# StandardScaler return object vs float vs int
# Target encoding return object vs float
class TestInverseTransformColumnsTransformer(unittest.TestCase):
def test_inv_transform_ct_1(self):
"""
test inv_transform_ct with multiple encoding and drop option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_2(self):
"""
test inv_transform_ct with multiple encoding and passthrough option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_3(self):
"""
test inv_transform_ct with multiple encoding and dictionnary
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['CH', 'CH', 'PR'],
'onehot_ce_state': ['US-FR', 'US-FR', 'US-FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A-B', 'A-B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
input_dict1 = dict()
input_dict1['col'] = 'onehot_ce_city'
input_dict1['mapping'] = pd.Series(data=['chicago', 'paris'], index=['CH', 'PR'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'other'
input_dict2['mapping'] = pd.Series(data=['A', 'B', 'C'], index=['A-B', 'A-B', 'C'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'onehot_ce_state'
input_dict3['mapping'] = pd.Series(data=['US', 'FR'], index=['US-FR', 'US-FR'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
original = inverse_transform(result, [enc,input_dict1,list_dict])
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_4(self):
"""
test inv_transform_ct with single target category encoders and passthrough option
"""
y = pd.DataFrame(data=[0, 1, 1, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago'],
'state': ['US', 'FR', 'FR', 'US'],
'other': ['A', 'B', 'B', 'B']})
enc = ColumnTransformer(
transformers=[
('target', ce.TargetEncoder(), ['city', 'state'])
],
remainder='passthrough')
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame(data={'target_city': ['chicago', 'chicago', 'paris'],
'target_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
dtype=object)
enc.fit(train, y)
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_5(self):
"""
test inv_transform_ct with single target category encoders and drop option
"""
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris', 'chicago', 'paris'],
'state': ['US', 'FR', 'US', 'FR'],
'other': ['A', 'B', 'A', 'B']})
enc = ColumnTransformer(
transformers=[
('target', ce.TargetEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame(data={
'target_city': ['chicago', 'chicago', 'paris'],
'target_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_6(self):
"""
test inv_transform_ct with Ordinal Category Encoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('ordinal', ce.OrdinalEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'ordinal_city': ['chicago', 'chicago', 'paris'],
'ordinal_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_7(self):
"""
test inv_transform_ct with category Ordinal Encoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('ordinal', ce.OrdinalEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'ordinal_city': ['chicago', 'chicago', 'paris'],
'ordinal_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_8(self):
"""
test inv_transform_ct with Binary encoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('binary', ce.BinaryEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'binary_city': ['chicago', 'chicago', 'paris'],
'binary_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_9(self):
"""
test inv_transform_ct with Binary Encoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('binary', ce.BinaryEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'binary_city': ['chicago', 'chicago', 'paris'],
'binary_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_10(self):
"""
test inv_transform_ct with BaseN Encoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('basen', ce.BaseNEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'basen_city': ['chicago', 'chicago', 'paris'],
'basen_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_11(self):
"""
test inv_transform_ct with BaseN Encoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('basen', ce.BaseNEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'basen_city': ['chicago', 'chicago', 'paris'],
'basen_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_12(self):
"""
test inv_transform_ct with single OneHotEncoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', ce.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'onehot_city': ['chicago', 'chicago', 'paris'],
'onehot_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_13(self):
"""
test inv_transform_ct with OneHotEncoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', ce.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'onehot_city': ['chicago', 'chicago', 'paris'],
'onehot_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
| pd.testing.assert_frame_equal(original, expected) | pandas.testing.assert_frame_equal |
import requests
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import json
import time
import re
from time import sleep
from random import randint
# Not needed but I like a timer to see how long the code takes to run
then = time.time()
# Create empty lists to hold data
reviews = []
headings = []
stars = []
dates = []
lang = "sv"
# Set number of pages to scrape, you need to check on TrustPilot to see how many to scrape
# in this instance at the time of coding there were 287 pages to be scraped
# The first number 1 means start at 1, the number 287 means stop at 287
# the third number which is 1 means go from 1 to 287 in steps of 1
webPageList = ["https://se.trustpilot.com/review/telness.se", "https://se.trustpilot.com/review/hifi-punkten.se",
"https://se.trustpilot.com/review/www.fyndborsen.se","https://se.trustpilot.com/review/www.cdon.se",
"https://se.trustpilot.com/review/oljemagasinet.se","https://se.trustpilot.com/review/filmhyllan.nu?languages=sv",
"https://se.trustpilot.com/review/petworld.se","https://se.trustpilot.com/review/hm.com",
"https://se.trustpilot.com/review/www.spotify.com","https://se.trustpilot.com/review/verisure.se",
"https://se.trustpilot.com/review/www.vvsochbad.se","https://se.trustpilot.com/review/vinoteket.se",
"https://se.trustpilot.com/review/www.inkclub.com","https://se.trustpilot.com/review/teknikdelar.se",
"https://se.trustpilot.com/review/fyndiq.se","https://se.trustpilot.com/review/advisa.se",
"https://se.trustpilot.com/review/www.tele2.se","https://se.trustpilot.com/review/www.comhem.se",
"https://se.trustpilot.com/review/bodylab.se","https://se.trustpilot.com/review/www.solfaktor.se",
"https://se.trustpilot.com/review/tui.se","https://se.trustpilot.com/review/www.flygpoolen.se",
"https://se.trustpilot.com/review/www.ticket.se"]
for webPageName in webPageList:
webPage =requests.get(webPageName)
soup = BeautifulSoup(webPage.text, "html.parser")
numRev = soup.find('div', class_="reviews-overview card card--related")
temp = numRev.script.contents[0]
svPos = temp.find(lang)
svRev_str = temp[(svPos+43):(svPos+50)]
svRev = re.findall(r'\d+', svRev_str)
if len(svRev)>1:
svRev = int(svRev[0]+svRev[1])
else:
svRev = int(svRev[0])
numPages = int(svRev/20)+1
pages = np.arange(1, numPages, 1)
# Create a loop to go over the reviews
for page in pages:
page = requests.get(webPageName + "?languages=" + lang + "&page=" + str(page))
soup = BeautifulSoup(page.text, "html.parser")
# Set the tag we wish to start at, this is like a parent tag where we will go in and get everything below it
review_div = soup.find_all('div', class_="review-content")
# Sleep is not needed but many websites will ban scraping so a random timer helps get by this
# by using sleep we tell the code to stop between 2 and 10 seconds so it will slow the code execution down
sleep(randint(1, 4)/100)
# loop to iterate through each reviews
for container in review_div:
# Get the body of the review
# If there is no review left by the user we will get a "-" returned by using 'if len(nv) == True else '-''
# TrustPilot will add nothing if there is no review so there will be no tag for the code to scrape
# It is saying if nv is True (we have a review) return the review or just put a - in
# We now tell the code to go into the tag 'p' 'class' 'review-content__text'
nv = container.find_all('p', attrs={'class': 'review-content__text'})
review = container.p.text if len(nv) == True else '-'
reviews.append(review)
# Get the title of the review
nv1 = container.find_all('h2', attrs={'class': 'review-content__title'})
heading = container.a.text if len(nv1) == True else '-'
headings.append(heading)
# Get the star rating review given
star = container.find("div", {"class": "star-rating star-rating--medium"}).find('img').get('alt')
stars.append(star)
# Get the date
# cont = container.find('script', {"data-initial-state": "review-dates"}).text
# date_json = json.loads(container.find('script').text)
# date = date_json['publishedDate']
# dates.append(date)
# Cleaning up the data, this could be done in code but I saved it to a .csv and opened it back up
# Create a DataFrame using the lists we created
TrustPilot = pd.DataFrame({'Title': headings, 'Body': reviews, 'Rating': stars})
# The review had a lot of white space on both sides so we strip that out and create a .csv with the data
TrustPilot['Body'] = TrustPilot['Body'].str.strip()
TrustPilot.to_csv('TrustPilot.csv', index=False)
# Read the csv file
data = | pd.read_csv('TrustPilot.csv') | pandas.read_csv |
#!/usr/bin/ipython
# author: <NAME>
# Referring to the excel table, perform dimensionality reduction.
import pandas as pd
import ipdb
import numpy as np
import glob
from time import time
import sys
import paths
excel_path_var = paths.root_dir + 'misc_derived/ref_excel/varref_excel_v6.tsv'
excel_path_lab = paths.root_dir + 'misc_derived/ref_excel/labref_excel_v6.tsv'
in_version = 'v6b'
merged_dir = paths.root_dir + '/3_merged/' + in_version + '/'
merged_reduced_dir = merged_dir + 'reduced/'
def preproc_excel(excel_var, excel_lab):
# process labs
excel_lab['MetaVariableID'] = excel_lab['MetaVariableID'].astype('int')
excel_lab.rename(columns={'VariableName': 'MetaVariableName'}, inplace=True)
excel = pd.concat([excel_var, excel_lab], axis=0)
# this is to delete the single row (ECMO) with NaN variable ID
excel = excel.loc[excel['VariableID'] > 0, :]
# change IDs to have p, v
pharma_rows = excel['Type'] == 'Pharma'
excel.loc[pharma_rows, 'VariableID'] = list(map(lambda x: 'p' + str(int(x)), excel.loc[pharma_rows, 'VariableID']))
excel.loc[~pharma_rows, 'VariableID'] = list(map(lambda x: 'v' + str(int(x)), excel.loc[~pharma_rows, 'VariableID']))
excel['Subgroup'] = excel['Subgroup'].fillna('Unknown')
# convert fractions in text into floats, for flow rate
fractions = excel.loc[excel['UnitConversionFactor'].notnull()&(excel['MetaVariableUnit'] == 'Flow rate'), 'UnitConversionFactor']
fractions_as_floats = list(map(lambda x: np.float32(eval(x)), fractions))
excel.loc[excel['UnitConversionFactor'].notnull()&(excel['MetaVariableUnit'] == 'Flow rate'), 'UnitConversionFactor'] = fractions_as_floats
return excel
def get_mID_column(m_excel, df, IDs):
"""
"""
parameter_unit = m_excel['MetaVariableUnit'].dropna().unique()
if len(parameter_unit) == 0:
# print('WARNING: parameter has no units... assuming non drugs')
parameter_unit = ['missing unit']
assert len(parameter_unit) == 1
if parameter_unit in ['[yes/no]', ' [yes/no]']:
merge_logic = 'binary'
elif parameter_unit == 'count of drugs':
merge_logic = 'count presence'
elif parameter_unit == 'Flow rate':
merge_logic = 'scale drugs'
else:
merge_logic = 'non drugs'
assert not 'Pharma' in m_excel['Type'].unique()
# nothing to do here exactly (conversion to binary/count happens at the end)
if len(IDs) == 1:
try:
mID_column = df[IDs[0]]
except KeyError:
print('WARNING: couldnt find', IDs[0], 'in data frame')
mID_column = np.random.normal(size=df.shape[0])
if merge_logic in ['binary', 'count presence']:
mID_column = (mID_column > 0).astype(int)
else:
# now we have to merge
if merge_logic == 'scale drugs':
# need scaling factors
scaling_factors = []
for ID in IDs:
scaling_factor = m_excel.loc[m_excel['VariableID'] == ID, 'UnitConversionFactor'].values[0]
scaling_factors.append(scaling_factor)
# sometimes there is no scaling factor - assume 1
if np.isnan(scaling_factors).any():
try:
assert np.isnan(scaling_factors).all()
except AssertionError:
print(scaling_factors)
ipdb.set_trace()
scaling_factors = [1]*len(IDs)
else:
scaling_factors = [1]*len(IDs)
mID_column = merge_IDs(df, merge_logic, scaling_factors)
if merge_logic == 'binary':
mID_column = mID_column.astype(int)
return mID_column
def merge_IDs(df, merge_logic, scaling_factors):
if merge_logic == 'binary':
columns = (df > 0).any(axis=1)
elif merge_logic == 'count presence':
columns = (df > 0).sum(axis=1, min_count=1)
elif merge_logic == 'scale drugs':
assert len(scaling_factors) == df.shape[1]
columns = (df*scaling_factors).sum(axis=1, min_count=1)
elif merge_logic == 'non drugs':
# pandas median automatically deals with NaN
# there is no min_count for median, it just produces NaN if there are only NaNs
columns = df.median(axis=1)
return columns
def merge_parameter(mID, excel, df_full):
m_excel = excel.loc[excel['MetaVariableID'] == mID, :]
assert len(m_excel['MetaVariableName'].dropna().unique()) == 1
if 'Pharma' in m_excel['Type'].dropna().unique():
assert len(m_excel['Type'].dropna().unique()) == 1
mID = 'pm' + str(int(mID))
else:
mID = 'vm' + str(int(mID))
parameter = m_excel['MetaVariableName'].dropna().unique()[0]
IDs = m_excel['VariableID'].unique()
print(mID, parameter)
try:
df = df_full.loc[:, IDs]
except KeyError:
print('WARNING: some of', IDs, 'missing in dataframe for parameter', parameter)
ipdb.set_trace()
return np.nan, mID
mID_column = get_mID_column(m_excel, df, IDs)
return mID_column, mID
#DEBUG_STOP = 5000
def process_chunk(chunkname, excel):
print('Processing', chunkname)
inpath = merged_dir + chunkname
outpath = merged_reduced_dir + 'reduced_' + chunkname
print('Reading in chunk', merged_dir + chunkname)
chunk = pd.read_hdf(merged_dir + chunkname)
mIDs = excel['MetaVariableID'].unique()
df_reduced = chunk.loc[:, ['PatientID', 'Datetime']]
# now run
for mID in mIDs:
# mID gets changed in here, to add a pm or vm
mID_column, mID = merge_parameter(mID, excel, chunk)
df_reduced[mID] = mID_column
print('Saving to', outpath)
df_reduced.to_hdf(outpath, 'reduced', append=False, complevel=5,
complib='blosc:lz4', data_columns=['PatientID'], format='table')
# csv is a lot quicker...
#df_reduced.to_csv(merged_reduced_csv, mode='a')
def main(idx):
#excel = pd.read_excel(excel_path)
excel_var = pd.read_csv(excel_path_var, sep='\t', encoding='cp1252')
excel_lab = | pd.read_csv(excel_path_lab, sep='\t', encoding='cp1252') | pandas.read_csv |
import pandas as pd
import pprint
import numpy as np
import sys
output_dir = "../../../derivation/python/output/"
from auxfunction_tablecreation import create_table_df
'''
@Author <NAME>
Produces summary statistics on the coverage of meetings by
the financial times, nytimes, and wall street journal,
reading in our master news data exporting to a latex file
'''
def main():
comp_df = get_merge()
#print(comp_df)
comp_df.rename(columns={'meeting_date':'Meetings'},inplace=True)
pivot = pd.pivot_table(comp_df,
values=['Meetings','NYT', 'WSJ', 'FT'],
columns="year",
aggfunc={'Meetings':np.count_nonzero,
'NYT':np.sum,
'WSJ':np.sum,
'FT':np.sum})
pivot = pivot.reset_index()
pivot.rename(columns={"index": "Newspaper"}, inplace=True)
pivot = pivot.reindex([1,0,2,3])
pivot = pivot.astype(int,errors="ignore")
#print(pivot)
#print(pivot.shape)
create_table_df(pivot,"tab_news_coverage",12)
def get_merge():
derived_df = | pd.read_csv("../../../collection/python/output/derived_data.csv") | pandas.read_csv |
"""Create road OD matrices matched to network nodes in Argentina
"""
import csv
import os
import types
import fiona
import pandas as pd
import geopandas as gpd
import numpy as np
import igraph as ig
import copy
import unidecode
from scipy.spatial import Voronoi
from atra.utils import *
import datetime
from tqdm import tqdm
def assign_node_weights_by_area_population_proximity(region_path,nodes,region_pop_col):
"""Assign weights to nodes based on their nearest regional populations
- By finding the regions_data that intersect with the Voronoi extents of nodes
Parameters
- region_path - Path of region shapefile
- nodes_in - Path of nodes shapefile
- region_pop_col - String name of column containing region population values
Outputs
- nodes - Geopandas dataframe of nodes with new column called weight
"""
# load provinces and get geometry of the right regions data
tqdm.pandas()
regions_data = gpd.read_file(region_path,encoding='utf-8')
regions_data = regions_data.to_crs({'init': 'epsg:4326'})
sindex_regions_data = regions_data.sindex
# create Voronoi polygons for the nodes
xy_list = []
for values in nodes.itertuples():
xy = list(values.geometry.coords)
xy_list += [list(xy[0])]
vor = Voronoi(np.array(xy_list))
regions, vertices = voronoi_finite_polygons_2d(vor)
min_x = vor.min_bound[0] - 0.1
max_x = vor.max_bound[0] + 0.1
min_y = vor.min_bound[1] - 0.1
max_y = vor.max_bound[1] + 0.1
mins = np.tile((min_x, min_y), (vertices.shape[0], 1))
bounded_vertices = np.max((vertices, mins), axis=0)
maxs = np.tile((max_x, max_y), (vertices.shape[0], 1))
bounded_vertices = np.min((bounded_vertices, maxs), axis=0)
box = Polygon([[min_x, min_y], [min_x, max_y], [max_x, max_y], [max_x, min_y]])
poly_list = []
for region in regions:
polygon = vertices[region]
# Clipping polygon
poly = Polygon(polygon)
poly = poly.intersection(box)
poly_list.append(poly)
poly_index = list(np.arange(0, len(poly_list), 1))
poly_df = pd.DataFrame(list(zip(poly_index, poly_list)),
columns=['gid', 'geometry'])
gdf_voronoi = gpd.GeoDataFrame(poly_df, crs='epsg:4326')
gdf_voronoi['node_id'] = gdf_voronoi.progress_apply(
lambda x: extract_nodes_within_gdf(x, nodes, 'node_id'), axis=1)
gdf_voronoi[region_pop_col] = 0
gdf_voronoi = assign_value_in_area_proportions(regions_data, gdf_voronoi, region_pop_col)
gdf_voronoi.rename(columns={region_pop_col: 'weight'}, inplace=True)
gdf_pops = gdf_voronoi[['node_id', 'weight']]
del gdf_voronoi, poly_list, poly_df
nodes = | pd.merge(nodes, gdf_pops, how='left', on=['node_id']) | pandas.merge |
import pandas as pd
import statsmodels.formula.api as api
from sklearn.preprocessing import scale, StandardScaler
from sklearn.linear_model import RidgeCV
from plotnine import *
import torch
import numpy as np
def sumcode(col):
return (col * 2 - 1).astype(int)
def massage(dat, scaleall=False):
dat['durationsum'] = dat['duration1'] + dat['duration2']
keep = ['samespeaker', 'sameepisode', 'sametype', 'semsim',
'durationdiff', 'durationsum', 'sim_1', 'sim_2']
data = dat[keep].dropna().query("semsim != 0.0").assign(
samespeaker = lambda x: scale(x.samespeaker) if scaleall else sumcode(x.samespeaker),
sameepisode = lambda x: scale(x.sameepisode) if scaleall else sumcode(x.sameepisode),
sametype = lambda x: scale(x.sametype) if scaleall else sumcode(x.sametype),
semsim = lambda x: scale(x.semsim),
durationdiff = lambda x: scale(x.durationdiff),
durationsum = lambda x: scale(x.durationsum),
sim_1 = lambda x: scale(x.sim_1),
sim_2 = lambda x: scale(x.sim_2))
return data
def standardize(data):
keep = ['samespeaker', 'sameepisode', 'sametype', 'semsim',
'distance', 'durationdiff', 'durationsum', 'sim_1', 'sim_2']
scaler = StandardScaler()
data = data[keep].astype(float)
return pd.DataFrame(scaler.fit_transform(data.values), columns=data.columns, index=data.index)
def rer(red, full):
return (red - full) / red
def partial_r2(model, data):
r2 = []
mse_full = model.fit().mse_resid
predictors = [ name for name in model.exog_names if name != 'Intercept' ]
# drop intercept
mse_red = model.from_formula(f"{model.endog_names} ~ {' + '.join(predictors)}",
drop_cols=['Intercept'],
data=data).fit().mse_resid
r2.append(rer(mse_red, mse_full))
for predictor in predictors:
exog = ' + '.join([ name for name in predictors if name != predictor ])
formula = f"{model.endog_names} ~ {exog}"
mse_red = model.from_formula(formula, data).fit().mse_resid
r2.append(rer(mse_red, mse_full))
return pd.DataFrame(index=['Intercept']+predictors, data=dict(partial_r2=r2))
def plot_coef(table, fragment_type, multiword):
data = table.query(f"multiword == {multiword} & fragment_type == '{fragment_type}'")
data['version'] = data['version'].map(str)
g = ggplot(data, aes('Variable', 'Coefficient')) + \
geom_hline(yintercept=0, color='gray', linetype='dashed') + \
geom_errorbar(aes(color='version', ymin='Lower', ymax='Upper', lwd=1, width=0.25)) + \
geom_point(aes(color='version')) + \
coord_flip()
ggsave(g, f"results/grsa_{fragment_type}_{'multi' if multiword else ''}word_coef.pdf")
def frameit(matrix, prefix="dim"):
return pd.DataFrame(matrix, columns=[f"{prefix}{i}" for i in range(matrix.shape[1])])
def backprobes(version):
for fragment_type in ['dialog', 'narration']:
data = torch.load(f"data/out/words_{version}_{fragment_type}.pt")
backprobe(data['words']).to_csv(f"results/backprobe_{version}_{fragment_type}.csv",
index=False,
header=True)
def backprobe(words):
rows = []
embedding_2 = frameit(scale(torch.stack([word.embedding_2 for word in words]).cpu().numpy()),
prefix="emb_2")
embedding_1 = frameit(scale(torch.stack([word.embedding_1 for word in words]).cpu().numpy()),
prefix="emb_1")
embedding_0 = frameit(scale(torch.stack([word.embedding_0 for word in words]).cpu().numpy()),
prefix="emb_0")
semsim = frameit(torch.stack([word.semsim for word in words]).cpu().numpy(),
prefix="semsim")
speaker = pd.get_dummies([word.speaker for word in words], prefix="speaker")
episode = | pd.get_dummies([word.episode for word in words], prefix="episode") | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# # GenCode Explore
#
# Explore the human RNA sequences from GenCode.
#
# Assume user downloaded files from GenCode 38 [FTP](http://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_38/)
# to a subdirectory called data.
#
# Build on 102 which excluded mitochondrial genes by ID. (If the number of exclusions grow, may need to move from an exclusion list to an annotation gff parser.)
#
# Explore remaining PC mRNA that have tiny ORFs.
# In[1]:
import time
def show_time():
t = time.time()
s = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
print(s)
show_time()
# In[2]:
import numpy as np
import pandas as pd
import gzip
import sys
import re
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCode_Protein_Include.py')
with open('GenCode_Protein_Include', 'w') as f:
f.write(s.text) # writes to cloud local, delete the file later?
from RNA_describe import *
from GenCode_preprocess import prot_incl
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='../data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import *
from SimTools.GenCode_Protein_Include import prot_incl
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_describe():
print("ERROR: Cannot use RNA_describe.")
# In[3]:
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
# In[4]:
def load_gencode(filename,label,check_list=None):
DEFLINE='>' # start of line with ids in a FASTA FILE
DELIM='|' # character between ids
VERSION='.' # character between id and version
EMPTY='' # use this to avoid saving "previous" sequence in first iteration
labels=[] # usually 1 for protein-coding or 0 for non-coding
seqs=[] # usually strings of ACGT
lens=[] # sequence length
ids=[] # GenCode transcript ID, always starts ENST, excludes version
one_seq = EMPTY
one_id = None
pattern5=re.compile('.*UTR5:')
pattern3=re.compile('.*UTR3:')
has_utr=False
with gzip.open (filename,'rt') as infile:
for line in infile:
if line[0]==DEFLINE:
if not one_seq == EMPTY and (check_list is None or one_id in check_list) and has_utr:
labels.append(label)
seqs.append(one_seq)
lens.append(len(one_seq))
ids.append(one_id)
one_id = line[1:].split(VERSION)[0]
one_seq = EMPTY
has_utr = not (pattern5.match(line) is None or pattern3.match(line) is None)
else:
# Continue loading sequence lines till next defline.
additional = line.rstrip()
one_seq = one_seq + additional
# Don't forget to save the last sequence after end-of-file.
if not one_seq == EMPTY and (check_list is None or one_id in check_list):
labels.append(label)
seqs.append(one_seq)
lens.append(len(one_seq))
ids.append(one_id)
df1=pd.DataFrame(ids,columns=['tid'])
df2= | pd.DataFrame(labels,columns=['class']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime
from scipy.stats import ttest_ind
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from decimal import Decimal
import warnings
warnings.filterwarnings("ignore")
from scipy.optimize import basinhopping
def load_data(path,index):
df = pd.read_csv(path)
df.set_index(index,inplace=True)
df.columns = df.columns.map(lambda x: x.title())
# Returns the dataframe
return df
# Converts epoch time to datetime and sort by date
# I leave the format YY/mm/DD/HH:MM:SS since a priory we don't know the time scale of events
def to_datetime(df,var):
# Copies the dataframe
df = df.copy()
df[var] = pd.to_datetime(df[var], utc=True, format = "%Y%m%d%H%M%S").dt.strftime("%Y-%m-%d-%H:%M:%S")
df.sort_values(by=[var],inplace=True)
# Returns the dataframe
return df
# Checks for duplicated data
def duplicated_data(df):
# Copies the dataframe
df = df.copy()
# Rows containing duplicate data
print("Removed ", df[df.duplicated()].shape[0], ' duplicated rows.')
# Returns a dataframe with the duplicated rows removed
return df.drop_duplicates()
# Checks for columns with missing values (NaNs)
def check_missing_values(df,cols=None,axis=0):
# Copies the dataframe
df = df.copy()
if cols != None:
df = df[cols]
missing_num = df.isnull().sum(axis).to_frame().rename(columns={0:'missing_num'})
missing_num['missing_percent'] = df.isnull().mean(axis)*100
result = missing_num.sort_values(by='missing_percent',ascending = False)
# Returns a dataframe with columns with missing data as index and the number and percent of NaNs
return result[result["missing_percent"]>0.0]
# Encodes the categorical variable Action
def cat_encoder(df,variables):
# Copies the dataframe
df = df.copy()
df_to_encode = df[variables]
df_encoded = | pd.get_dummies(df_to_encode,drop_first=False) | pandas.get_dummies |
import pandas as pd
import numpy as np
def convert_minutes_to_seconds(time_minutes):
"""
Convert time expressed in (float) minutes into (float) seconds.
:param float time_minutes: Time expressed in minutes.
:return: Time expressed in seconds.
:rtype: float
"""
time_seconds = time_minutes * 60
return time_seconds
def transform_llimllib_boston_data(df, year):
"""
Transform 2013-2104 Boston Marathon data from llimllib's Github repo into a standard form for downstream processing.
Namely, split times are converted to integer seconds.
:param pandas.DataFrame df: DataFrame representing 2013-2014 Boston Marathon data from llimllib's Github repo.
:param int year: Year of Boston Marathon
:return: DataFrame of transformed marathon data
:rtype: pandas.DataFrame
"""
# Header names for split time field in llimllib marathon data
headers_split = ['5k', '10k', '20k', 'half', '25k', '30k', '35k', '40k', 'official']
# Replace nan placeholders with actual nan values
for header in headers_split:
df[header].replace('-', np.nan, inplace=True)
# Cast split times to float
dtypes_new = dict(zip(headers_split, [float] * len(headers_split)))
df = df.astype(dtypes_new)
# Convert split time from decimal minutes to seconds
for header in headers_split + ['pace']:
df[header] = df[header].apply(convert_minutes_to_seconds)
# Add year field
df['year'] = year
# Add empty columns for 15k split time and gender_place rank
df['15k'] = np.nan
df['gender_place'] = np.nan
df = df.rename(columns={'official': 'official_time', 'ctz': 'citizen'})
return df
# at least one row in 2015 had an incomprehensible official finish time: 0.124548611111111
# I believe there was only one, but the try/catch below should handle
# missing values, placeholder '-', and bad values
def convert_string_to_seconds(time_str):
"""
Convert time in a string format 'HH:MM:SS' into (int) seconds.
:param str time_str: Time in a string format 'HH:MM:SS'
:return: Time expressed in seconds
:rtype: int
"""
try:
hours_str, minutes_str, seconds_str = time_str.split(':')
return int(hours_str) * 3600 + int(minutes_str) * 60 + int(seconds_str)
except:
return np.nan
def transform_rojour_boston_data(df, year):
"""
Transform 2015-2107 Boston Marathon data from rojour's Github repo into a standard form for downstream processing.
Namely, split times are converted to integer seconds.
:param pandas.DataFrame df: DataFrame representing 2015-2017 Boston Marathon data from rojour's Github repo.
:param int year: Year of Boston Marathon
:return: DataFrame of transformed marathon data
:rtype: pandas.DataFrame
"""
# Drop unnecessary columns
if year == 2016:
df.drop('Proj Time', axis=1)
else:
df.drop([df.columns[0], 'Proj Time'], axis=1)
# The split times in the Kaggle data are formatted as strings hh:mm:ss. We want these times in total seconds.
headers_split = ['5K', '10K', '15K', '20K', 'Half', '25K', '30K', '35K', '40K', 'Official Time']
for header in headers_split:
df[header] = df[header].apply(convert_string_to_seconds)
if year == 2015:
df.dropna(subset=['Official Time'])
# Create a year field and an empty field for 'genderdiv'
df['year'] = year
df['genderdiv'] = np.nan
# Map of field names to rename headers of df to ensure consistency with transform_llimllib_boston_data
headers_map = {'Age': 'age', 'Official Time': 'official_time', 'Bib': 'bib', 'Citizen': 'citizen',
'Overall': 'overall', 'Pace': 'pace', 'State': 'state', 'Country': 'country', 'City': 'city',
'Name': 'name', 'Division': 'division', 'M/F': 'gender', '5K': '5k', '10K': '10k', '15K': '15k',
'20K': '20k', 'Half': 'half', '25K': '25k', '30K': '30k', '35K': '35k', '40K': '40k',
'Gender': 'gender_place'}
# The rojour data has an unnamed field that varies depending on the year.
# We can't drop this field since it's used later to remove certain records.
if year == 2016:
headers_map.update({'Unnamed: 8': 'para_status'})
else:
headers_map.update({'Unnamed: 9': 'para_status'})
df = df.rename(columns=headers_map)
# Drop all runners with a 'para' status and then drop the para_status field
df = df[df.para_status != 'MI']
df = df[df.para_status != 'VI']
df = df.drop('para_status', axis=1)
return df
def band_age(age):
"""
Banding method that maps a Boston Marathon runner's (integer) age to a labeled age band and level.
**Note**: The age brackets on the BAA website are as follows:
* 14-19*, 20-24, 25-29, 30-34, 35-39, 40-44, 45-49, 50-54, 55-59, 60-64, 65-70, 70-74, 75-79, and 80
This places 70 into two brackets. We have assumed this is a typo and use the bands '65-69' and '70-74'.
We have also ignored the minimum age in case it has not been the same in every year
:param int age: Age of Boston Marathon runner
:return: (banded_level, age_banding) where: banded_level is banded level of age for Boston Marathon runner and
age_banding is banding of age for Boston Marathon runner in 5 year increments
:rtype: (int, str)
"""
if age <= 19:
bid = 1
bstr = '<= 19'
elif age <= 24:
bid = 2
bstr = '20-24'
elif age <= 29:
bid = 3
bstr = '25-29'
elif age <= 34:
bid = 4
bstr = '30-34'
elif age <= 39:
bid = 5
bstr = '35-39'
elif age <= 44:
bid = 6
bstr = '40-44'
elif age <= 49:
bid = 7
bstr = '45-49'
elif age <= 54:
bid = 8
bstr = '50-54'
elif age <= 59:
bid = 9
bstr = '55-59'
elif age <= 64:
bid = 10
bstr = '60-64'
elif age <= 69:
bid = 11
bstr = '65-69'
elif age <= 74:
bid = 12
bstr = '70-74'
elif age <= 79:
bid = 13
bstr = '75-79'
else:
bid = 14
bstr = '80+'
return bid, bstr
def append_age_banding(df):
"""
Method that appends a banding of the age field, which is consistent with the method `band_age`.
**Note**: This method assumes that the DataFrame `df` include the (int) field named 'age', which is
:param pandas.DataFrame df: DataFrame of transformed marathon data
:return: DataFrame of transformed marathon data that includes a banding of age consistent with method `band_age`
:rtype: pandas.DataFrame
"""
return pd.concat((
df,
df['age'].apply(lambda cell: pd.Series(band_age(cell), index=['age_bucket', 'age_range']))
), axis=1)
def combine_boston_data(list_dfs):
"""
Method that takes the union of a list of DataFrames each representing different years of Boston Marathon data. The
field named 'age' is also used to append a banding for runners' age.
:param list[pandas.DataFrame] list_dfs: List of DataFrames containing transformed marathon data
:return: DataFrame of transformed and unioned marathon data that includes a banding of age consistent with method
`band_age`
:rtype: pandas.DataFrame
"""
df_combine = pd.concat(list_dfs, sort=True)
df_combine = append_age_banding(df_combine)
df_combine.drop(['pace', 'Proj Time', 'Unnamed: 0'], axis=1, inplace=True)
return df_combine
def pipe_reader(input_file):
"""
Read datasets without pandas read_csv when we have a pipe delimiter dataset
with commas inside columns
:param str input_file: File path
:return: The pipe delimited file as a DataFrame
:rtype: pandas.DataFrame
"""
with open(input_file, 'r') as f:
temp_file = f.read()
temp_file = temp_file.split('\n')
lis = []
for row in temp_file:
row = row.split('|')
if len(row) == 20:
lis.append(row)
temp_df = pd.DataFrame(lis, columns=lis[0])
temp_df = temp_df.drop(0, axis=0)
return temp_df
def process_boston_data():
"""
Method to import, transform, and combine Boston Marathon data.
:return: DataFrame of transformed and combined Boston Marathon data.
:rtype: pandas.DataFrame
"""
# Read in data
llimllib_boston_results_2013 = pd.read_csv('dashathon/data/external_data/llimllib_boston_results_2013.csv',
delimiter=',')
llimllib_boston_results_2014 = pd.read_csv('dashathon/data/external_data/llimllib_boston_results_2014.csv',
delimiter=',')
rojour_boston_results_2015 = pd.read_csv('dashathon/data/external_data/rojour_boston_results_2015.csv',
delimiter=',')
rojour_boston_results_2016 = pd.read_csv('dashathon/data/external_data/rojour_boston_results_2016.csv',
delimiter=',')
rojour_boston_results_2017 = pd.read_csv('dashathon/data/external_data/rojour_boston_results_2017.csv',
delimiter=',')
# Transform data
boston_results_2013 = transform_llimllib_boston_data(df=llimllib_boston_results_2013, year=2013)
boston_results_2014 = transform_llimllib_boston_data(df=llimllib_boston_results_2014, year=2014)
boston_results_2015 = transform_rojour_boston_data(df=rojour_boston_results_2015, year=2015)
boston_results_2016 = transform_rojour_boston_data(df=rojour_boston_results_2016, year=2016)
boston_results_2017 = transform_rojour_boston_data(df=rojour_boston_results_2017, year=2017)
# Combine Boston data
boston_results = combine_boston_data(list_dfs=[boston_results_2013, boston_results_2014, boston_results_2015,
boston_results_2016, boston_results_2017])
# Append host city to distinguish among other marathon results
boston_results['host_city'] = 'Boston'
# Removing gender 'W' from bib in boston base
boston_results.bib = boston_results.bib.str.replace('W', '')
return boston_results
def process_nyc_data():
"""
Method to import, transform, and combine NYC Marathon data.
:return: DataFrame of transformed and combine NYC Marathon data.
:rtype: pandas.DataFrame
"""
andreanr_nyc_results_2015 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2015.csv')
andreanr_nyc_results_2016 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2016.csv')
andreanr_nyc_results_2017 = pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2017.csv')
andreanr_nyc_results_2018 = | pd.read_csv('dashathon/data/external_data/andreanr_nyc_results_2018.csv') | pandas.read_csv |
import abc
import itertools
import inspect
import pandas as pd
import numpy as np
from skimage.measure import regionprops_table, perimeter
from scipy.ndimage.measurements import labeled_comprehension
from scipy.ndimage.morphology import distance_transform_edt
from scipy.ndimage import find_objects
# TODO test for 3D feature extraction/conversion
# TODO test with empty labels
class BaseFeatureExtractor():
'''Base class for feature extractors. Extract features from all combinations
of label-channel in labels,channels input dicts. Optionaly target keys can be filtered.
Returns the result a pandas dataframe.
'''
def __init__(self,
label_targets='all',
channel_targets='all',
*args,
**kwargs):
'''
Args:
label_targets: list of keys to filter label images
channel_targets: list of keys to filter channel images
'''
self.label_targets = label_targets
self.channel_targets = channel_targets
def __call__(self, labels, channels):
if not (isinstance(labels, dict) or labels is None):
raise ValueError(
'Expects labels to be a dictionnary of images. received {}'.
format(type(labels)))
if not (isinstance(channels, dict) or channels is None):
raise ValueError(
'Expects channels to be a dictionnary of images. received {}'.
format(type(channels)))
# filter targets
if self.label_targets is None:
labels = None
elif self.label_targets != 'all':
labels = {
key: val
for key, val in labels.items() if key in self.label_targets
}
if self.channel_targets is None:
channels = None
elif self.channel_targets != 'all':
channels = {
key: val
for key, val in channels.items() if key in self.channel_targets
}
if labels is None and channels is None:
raise ValueError(
'At least one label image or one intensity channel must be provided'
)
if labels is None:
# measure image properties --> single label covering entire image
labels = {
'img': np.ones_like(next(iter(channels.values())),
dtype=np.uint8)
}
if channels is None:
channels = {'na': None}
all_props = pd.DataFrame(columns=[
'channel', 'region', 'object_id', 'feature_name', 'feature_value'
])
# all combination of labels and channel
for (label_key,
label), (ch_key, ch) in itertools.product(labels.items(),
channels.items()):
if label.max() == 0: # empty label image
continue
props = self._extract_features(label, ch)
props = pd.DataFrame(props)
props = props.set_index(
'label').stack().reset_index() #.set_index('label')
props.columns = ['object_id', 'feature_name', 'feature_value']
props['channel'] = ch_key
props['region'] = label_key
all_props = all_props.append(props, sort=False)
all_props = all_props.apply(self._dataframe_hook, axis=1)
return all_props
def _dataframe_hook(self, row):
'''Function applied to each row of the final dataframe'''
return row
@abc.abstractmethod
def _extract_features(self, label, intensity):
'''Method to extract feature for the given label,intensity_image pair.
Is expected to return a dict with the followng format:
example:
{'label':[1,2,3],
'area':[101,45,1000],
'mean_intensity': [10,100,25]}
'''
pass
class QuantilesFeatureExtractor(BaseFeatureExtractor):
'''Extract quantiles intensities over each labeled region'''
def __init__(self, quantiles=[0., 0.25, 0.5, 0.75, 1.0], *args, **kwargs):
super().__init__(*args, **kwargs)
self.quantiles = quantiles
def _extract_features(self, labels, intensity):
unique_l = np.unique(labels)
unique_l = unique_l[unique_l != 0]
q_vals = np.stack([
np.quantile(intensity[labels == l], self.quantiles)
for l in unique_l
],
axis=-1)
props = {
'q{:.3f}'.format(q).replace('.', '_'): qv
for q, qv in zip(self.quantiles, q_vals)
}
props['label'] = unique_l
return props
class IntensityFeatureExtractor(BaseFeatureExtractor):
'''Extract mean,std,mad (median absolute deviation) intensities
over each labeled region'''
_features_functions = {
'mean': np.mean,
'std': np.std,
'mad': lambda x: np.median(
np.abs(x - np.median(x))
), # median absolute deviation defined as median(|xi - median(x)|)
}
_implemented_features = set(_features_functions.keys())
def __init__(self, features=['mean'], *args, **kwargs):
super().__init__(*args, **kwargs)
for f in set(features) - self._implemented_features:
raise NotImplementedError('feature {} not implemented'.format(f))
self.features = features
def _extract_features(self, labels, intensity):
unique_l = np.unique(labels)
unique_l = unique_l[unique_l != 0]
props = {
feature_name:
labeled_comprehension(intensity,
labels,
unique_l,
self._features_functions[feature_name],
out_dtype=float,
default=np.nan)
for feature_name in self.features
}
props['label'] = unique_l
return props
class DistanceTransformFeatureExtractor(BaseFeatureExtractor):
'''Extract features based on distance transform (mean|max|median radius)
over each labeled region'''
_features_functions = {
'mean_radius': np.mean,
'max_radius': np.max,
'median_radius': np.median,
}
_implemented_features = set(_features_functions.keys())
_require_isotropic = {'median_radius'}
def __init__(self,
features=['mean_radius', 'max_radius', 'median_radius'],
physical_coords=False,
spacing=1,
*args,
**kwargs):
'''
Args:
features: list of features to compute
physical_coords: whether to convert px coordinates to physical coordinates
spacing: voxel size to do the coordinate conversion
'''
# override channel target
try:
del kwargs['channel_targets']
except KeyError:
pass
super().__init__(channel_targets=None, *args, **kwargs)
for f in set(features) - self._implemented_features:
raise NotImplementedError('feature {} not implemented'.format(f))
if not isinstance(
spacing,
(int, float)) and not np.all(np.array(spacing) == spacing[0]):
for f in self._require_isotropic.intersection(features):
raise ValueError(
'{} feature requires isotropic spacing'.format(f))
# add compulsory 'label' needed of indexing
self.features = set(features)
self.spacing = spacing
self.physical_coords = physical_coords
def _extract_features(self, labels, intensity):
if self.physical_coords:
self.ndim = labels.ndim
self.spacing = np.broadcast_to(np.array(self.spacing), self.ndim)
sampling = self.spacing
else:
sampling = None
props = {f: [] for f in self.features}
unique_l = []
# compute distance transform separately for each label (in case they are touching)
locs = find_objects(labels)
for l, loc in enumerate(locs, start=1):
if loc:
unique_l.append(l)
mask = np.pad(labels[loc] > 0, 1)
dist = distance_transform_edt(mask, sampling=sampling)
radii = dist[mask]
for f in self.features:
props[f].append(self._features_functions[f](radii))
props['label'] = unique_l
return props
class SKRegionPropFeatureExtractor(BaseFeatureExtractor):
'''scikit-image regionprops wrapper.
Notes:
for details see https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops
Also compute the convex_perimeter.
'''
# TODO complete name mapping and coord conv
# skimage uses 2D names for 3D features (e.g. area, perimeter, etc.)
_name_mapping_2D_3D = {'area': 'volume'}
_name_mapping_3D_2D = {
val: key
for key, val in _name_mapping_2D_3D.items()
}
_implemented_features = {
'label', 'volume', 'area', 'centroid', 'weighted_centroid',
'minor_axis_length', 'major_axis_length', 'eccentricity', 'perimeter',
'convex_area', 'convex_perimeter', 'solidity', 'moments_hu',
'weighted_moments_hu'
}
_require_isotropic = {
'minor_axis_length', 'major_axis_length', 'perimeter',
'convex_perimeter'
}
_physical_coords_conversion = {
'volume': lambda x, spacing: x * np.prod(spacing),
'perimeter': lambda x, spacing: x * spacing[0],
'convex_perimeter': lambda x, spacing: x * spacing[0],
'area': lambda x, spacing: x * np.prod(spacing),
'convex_area': lambda x, spacing: x * np.prod(spacing),
'centroid-0': lambda c, spacing: c * spacing[0],
'centroid-1': lambda c, spacing: c * spacing[1],
'centroid-2': lambda c, spacing: c * spacing[2],
'weighted_centroid-0': lambda c, spacing: c * spacing[0],
'weighted_centroid-1': lambda c, spacing: c * spacing[1],
'weighted_centroid-2': lambda c, spacing: c * spacing[2],
'minor_axis_length': lambda x, spacing: x * spacing[0],
'major_axis_length': lambda x, spacing: x * spacing[0],
}
def __init__(self,
features=['centroid'],
physical_coords=False,
spacing=1,
*args,
**kwargs):
'''
Args:
features: list of features to compute
physical_coords: whether to convert px coordinates to physical coordinates
spacing: voxel size to do the coordinate conversion
'''
super().__init__(*args, **kwargs)
for f in set(features) - self._implemented_features:
raise NotImplementedError('feature {} not implemented'.format(f))
# add compulsory 'label' needed of indexing
self.features = set(features).union({'label'})
self.spacing = spacing
self.physical_coords = physical_coords
def _px_to_phy(self, row):
if self.physical_coords:
if not self.isotropic and row.feature_name in self._require_isotropic:
raise ValueError(
'{} requires isotropic spacing. spacing: {}'.format(
row.feature_name, self.spacing))
convert_fun = self._physical_coords_conversion.get(
row.feature_name)
if convert_fun is not None:
row.feature_value = convert_fun(row.feature_value,
self.spacing)
return row
def _dataframe_hook(self, row):
'''Function applied to each row of the final dataframe'''
row = self._px_to_phy(row)
return row
def _extract_features(self, labels, intensity):
self.ndim = labels.ndim
self.spacing = np.broadcast_to(np.array(self.spacing), self.ndim)
self.isotropic = np.all(self.spacing == self.spacing[0])
# map 2D feature names if 3D image
if self.ndim == 3:
# skimage regions props uses 2D feature names (e.g. perimeter, area instead of surface, volume respectively)
features = {
self._name_mapping_3D_2D.get(f, f)
for f in self.features
}
else:
features = self.features
# special case pre: extract "convex_image" to compute missing "convex_perimeter" feature
if 'convex_perimeter' in features:
features = [
'convex_image' if x == 'convex_perimeter' else x
for x in features
]
# extract actual features
props = regionprops_table(labels,
intensity_image=intensity,
properties=features,
separator='-')
# special case post: extract compute missing "convex_perimeter" feature
convex_images = props.pop('convex_image', None)
if convex_images is not None:
props['convex_perimeter'] = [
perimeter(hull) for hull in convex_images
]
# map back 3D feature names if 3D image
if self.ndim == 3:
props = {
self._name_mapping_2D_3D.get(key, key): val
for key, val in props.items()
}
return props
class BasedDerivedFeatureCalculator():
'''Base class to compute derived features from a dataframe of existing features.
add new features as static methods with base feature names as arguments.'''
@property
@classmethod
@abc.abstractmethod
def grouping(cls):
'''List of keys to groupby props DataFrame to compute derived features'''
return NotImplementedError
def __init__(self, features, label_targets='all', channel_targets='all'):
for f in features:
fun = getattr(self, f, None)
if fun is None:
raise NotImplementedError(
'feature {} not implemented'.format(f))
self.features = features
self.label_targets = label_targets
self.channel_targets = channel_targets
self.arg_keys = [
a for a in ['channel', 'region', 'object_id']
if a not in self.grouping
] + ['feature_name']
def __call__(self, props):
props = props.set_index(['channel', 'region',
'object_id']).sort_index()
if self.label_targets != 'all':
props = props.loc(axis=0)[:, self.label_targets]
if self.channel_targets != 'all':
props = props.loc(axis=0)[self.channel_targets + ['na']]
derived_props = props.groupby(self.grouping).apply(
self._compute_subdf_features, props=props)
if len(derived_props) > 0:
derived_props = derived_props.droplevel(-1).reset_index()
return derived_props
def _get_arg_value(self, arg, subdf):
'''Returns '''
rows_masks = [
subdf[k].str.startswith(a)
for k, a in zip(self.arg_keys, arg.split('__'))
]
rows_mask = np.prod(rows_masks, axis=0).astype(bool)
return subdf[rows_mask].feature_value.values.squeeze()
def _compute_subdf_features(self, subdf, props):
subdf = subdf.reset_index()
derived_features = []
for feature in self.features:
# get the function computing the requested features
fun = getattr(self, feature)
# get a list of required base features
fun_args = inspect.getfullargspec(fun).args
# get required base features' value
kwargs = {arg: self._get_arg_value(arg, subdf) for arg in fun_args}
try:
feature_value = fun(**kwargs)
except Exception as e:
feature_value = None
if feature_value is not None and isinstance(
feature_value, (float, int, bool)):
derived_features.append({
'feature_name': feature,
'feature_value': feature_value
})
return | pd.DataFrame(derived_features) | pandas.DataFrame |
"""
Library of standardized plotting functions for basic plot formats
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import xarray as xr
from scipy.interpolate import interp1d
from scipy.signal import welch
# Standard field labels
# - default: e.g., "Km/s"
# - all superscript: e.g., "K m s^{-1}"
fieldlabels_default_units = {
'wspd': r'Wind speed [m/s]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m/s]',
'v': r'v [m/s]',
'w': r'Vertical wind speed [m/s]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{Km/s}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2/s^2}]$',
}
fieldlabels_superscript_units = {
'wspd': r'Wind speed [m s$^{-1}$]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m s$^{-1}$]',
'v': r'v [m s$^{-1}$]',
'w': r'Vertical wind speed [m s$^{-1}$]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{K m s^{-1}}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2 s^{-2}}]$',
}
# Standard field labels for frequency spectra
spectrumlabels_default_units = {
'u': r'$E_{uu}\;[\mathrm{m^2/s}]$',
'v': r'$E_{vv}\;[\mathrm{m^2/s}]$',
'w': r'$E_{ww}\;[\mathrm{m^2/s}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2/s}]$',
}
spectrumlabels_superscript_units = {
'u': r'$E_{uu}\;[\mathrm{m^2\;s^{-1}}]$',
'v': r'$E_{vv}\;[\mathrm{m^2\;s^{-1}}]$',
'w': r'$E_{ww}\;[\mathrm{m^2\;s^{-1}}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2\;s^{-1}}]$',
}
# Default settings
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
standard_fieldlabels = fieldlabels_default_units
standard_spectrumlabels = spectrumlabels_default_units
# Supported dimensions and associated names
dimension_names = {
'time': ['datetime','time','Time'],
'height': ['height','heights','z'],
'frequency': ['frequency','f',]
}
# Show debug information
debug = False
def plot_timeheight(datasets,
fields=None,
fig=None,ax=None,
colorschemes={},
fieldlimits=None,
heightlimits=None,
timelimits=None,
fieldlabels={},
labelsubplots=False,
showcolorbars=True,
fieldorder='C',
ncols=1,
subfigsize=(12,4),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time-height contours for different datasets and fields
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are MultiIndex Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal ndatasets*nfields
colorschemes : str or dict
Name of colorschemes. If only one field is plotted, colorschemes
can be a string. Otherwise, it should be a dictionary with
entries <fieldname>: name_of_colorschemes
Missing colorschemess are set to 'viridis'
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showcolorbars : bool
Show colorbar per subplot
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets to axes grid
(row by row). Fields is considered the first axis, so 'C' means
fields change slowest, 'F' means fields change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets and
fields and can not be used to set dataset or field specific
limits, colorschemess, norms, etc.
Example uses include setting shading, rasterized, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
colorschemes=colorschemes,
fieldorder=fieldorder
)
args.set_missing_fieldlimits()
nfields = len(args.fields)
ndatasets = len(args.datasets)
ntotal = nfields * ndatasets
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Initialise list of colorbars
cbars = []
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height')
timevalues = _get_dim_values(df,'time')
assert(heightvalues is not None), 'timeheight plot needs a height axis'
assert(timevalues is not None), 'timeheight plot needs a time axis'
if isinstance(timevalues, pd.DatetimeIndex):
# If plot local time, shift timevalues
if plot_local_time is not False:
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Convert to days since 0001-01-01 00:00 UTC, plus one
numerical_timevalues = mdates.date2num(timevalues.values)
else:
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# Timevalues is already a numerical array
numerical_timevalues = timevalues
# Create time-height mesh grid
tst = _get_staggered_grid(numerical_timevalues)
zst = _get_staggered_grid(heightvalues)
Ts,Zs = np.meshgrid(tst,zst,indexing='xy')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
# Store plotting options in dictionary
plotting_properties = {
'vmin': args.fieldlimits[field][0],
'vmax': args.fieldlimits[field][1],
'cmap': args.cmap[field]
}
# Index of axis corresponding to dataset i and field j
if args.fieldorder=='C':
axi = i*nfields + j
else:
axi = j*ndatasets + i
# Extract data from dataframe
fieldvalues = _get_pivoted_field(df_pivot,field)
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
im = axv[axi].pcolormesh(Ts,Zs,fieldvalues.T,**plotting_properties)
# Colorbar mark up
if showcolorbars:
cbar = fig.colorbar(im,ax=axv[axi],shrink=1.0)
# Set field label if known
try:
cbar.set_label(args.fieldlabels[field])
except KeyError:
pass
# Save colorbar
cbars.append(cbar)
# Set title if more than one dataset
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
if not heightlimits is None:
axv[-1].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align time, height and color labels
_align_labels(fig,axv,nrows,ncols)
if showcolorbars:
_align_labels(fig,[cb.ax for cb in cbars],nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Return cbar instead of array if ntotal==1
if len(cbars)==1:
cbars=cbars[0]
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2, cbars
else:
return fig, ax, cbars
def plot_timehistory_at_height(datasets,
fields=None,
heights=None,
fig=None,ax=None,
fieldlimits=None,
timelimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
ncols=1,
subfigsize=(12,3),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time history at specified height(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple heights are
stacked in a single subplot. When multiple datasets and multiple
heights are specified together, heights are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
heights : float, list, 'all' (or None)
Height(s) for which time history is plotted. heights can be
None if all datasets combined have no more than one height
value. 'all' means the time history for all heights in the
datasets will be plotted (in this case all datasets should
have the same heights)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or nheights)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking heights
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by heights. If
None, stack_by_datasets will be set based on the number of heights
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and heights, and they can not be used to set dataset,
field or height specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
# Avoid FutureWarning concerning the use of an implicitly registered
# datetime converter for a matplotlib plotting method. The converter
# was registered by pandas on import. Future versions of pandas will
# require explicit registration of matplotlib converters, as done here.
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
args = PlottingInput(
datasets=datasets,
fields=fields,
heights=heights,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
nheights = len(args.heights)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if nheights>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields*nheights
else:
ntotal = nfields*ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and nheights>1):
showlegend = True
else:
showlegend = False
# Loop over datasets and fields
for i,dfname in enumerate(args.datasets):
df = args.datasets[dfname]
timevalues = _get_dim_values(df,'time',default_idx=True)
assert(timevalues is not None), 'timehistory plot needs a time axis'
heightvalues = _get_dim_values(df,'height')
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# If plot local time, shift timevalues
if (plot_local_time is not False) and \
isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# If any of the requested heights is not available,
# pivot the dataframe to allow interpolation.
# Pivot all fields in a dataset at once to reduce computation time
if (not heightvalues is None) and (not all([h in heightvalues for h in args.heights])):
df_pivot = _get_pivot_table(df,'height',available_fields)
pivoted = True
if debug: print('Pivoting '+dfname)
else:
pivoted = False
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, height in enumerate(args.heights):
# Store plotting options in dictionary
# Set default linestyle to '-' and no markers
plotting_properties = {
'linestyle':'-',
'marker':None,
}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and height k
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple heights are compared
if nheights>1:
axv[axi].set_title('z = {:.1f} m'.format(height),fontsize=16)
# Set colors
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
axi = i*nfields + j
# Use height as label
if showlegend:
plotting_properties['label'] = 'z = {:.1f} m'.format(height)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(nheights-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if pivoted:
signal = interp1d(heightvalues,_get_pivoted_field(df_pivot,field).values,axis=-1,fill_value="extrapolate")(height)
else:
slice_z = _get_slice(df,height,'height')
signal = _get_field(slice_z,field).values
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
axv[axi].plot(timevalues,signal,**plotting_properties)
# Set field label if known
try:
axv[axi].set_ylabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set axis grid
for axi in axv:
axi.xaxis.grid(True,which='both')
axi.yaxis.grid(True)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
# Align labels
_align_labels(fig,axv,nrows,ncols)
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2
else:
return fig, ax
def plot_profile(datasets,
fields=None,
times=None,
timerange=None,
fig=None,ax=None,
fieldlimits=None,
heightlimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
fieldorder='C',
ncols=None,
subfigsize=(4,5),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot vertical profile at specified time(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple times are
stacked in a single subplot. When multiple datasets and multiple
times are specified together, times are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
times : str, int, float, list (or None)
Time(s) for which vertical profiles are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value, or if timerange is specified.
timerange : tuple or list
Start and end times (inclusive) between which all times are
plotted. If cmap is None, then it will automatically be set to
viridis by default. This overrides times when specified.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or ntimes)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking times
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by times. If
None, stack_by_datasets will be set based on the number of times
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets/times (depending
on stack_by_datasets) to axes grid (row by row). Fields is considered the
first axis, so 'C' means fields change slowest, 'F' means fields
change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
timerange=timerange,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
fieldorder=fieldorder,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if ntimes>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields * ntimes
else:
ntotal = nfields * ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=int(ntotal/nfields),
fieldorder=args.fieldorder,
avoid_single_column=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.4,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and ntimes>1):
showlegend = True
else:
showlegend = False
# Set default sequential colormap if timerange was specified
if (timerange is not None) and (cmap is None):
cmap = 'viridis'
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height',default_idx=True)
assert(heightvalues is not None), 'profile plot needs a height axis'
timevalues = _get_dim_values(df,'time')
# If plot local time, shift timevalues
timedelta_to_local = None
if plot_local_time is not False:
timedelta_to_local = pd.to_timedelta(local_time_offset,'h')
timevalues = timevalues + timedelta_to_local
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
if timevalues is not None:
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and time k
if args.fieldorder == 'C':
axi = j*ntimes + k
else:
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple times are compared
if ntimes>1:
if isinstance(time, (int,float,np.number)):
tstr = '{:g} s'.format(time)
else:
if plot_local_time is False:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
elif plot_local_time is True:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H:%M')
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
tstr = pd.to_datetime(time).strftime(plot_local_time)
axv[axi].set_title(tstr, fontsize=16)
# Set color
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
if args.fieldorder == 'C':
axi = j*ndatasets + i
else:
axi = i*nfields + j
# Use time as label
if showlegend:
if isinstance(time, (int,float,np.number)):
plotting_properties['label'] = '{:g} s'.format(time)
else:
if plot_local_time is False:
plotting_properties['label'] = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
elif plot_local_time is True:
plotting_properties['label'] = pd.to_datetime(time).strftime('%Y-%m-%d %H:%M')
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
plotting_properties['label'] = pd.to_datetime(time).strftime(plot_local_time)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(ntimes-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if timevalues is None:
# Dataset will not be pivoted
fieldvalues = _get_field(df,field).values
else:
if plot_local_time is not False:
# specified times are in local time, convert back to UTC
slice_t = _get_slice(df_pivot,time-timedelta_to_local,'time')
else:
slice_t = _get_slice(df_pivot,time,'time')
fieldvalues = _get_pivoted_field(slice_t,field).values.squeeze()
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
try:
axv[axi].plot(fieldvalues,heightvalues,**plotting_properties)
except ValueError as e:
print(e,'--', time, 'not found in index?')
# Set field label if known
try:
axv[axi].set_xlabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_xlim(args.fieldlimits[field])
except KeyError:
pass
for axi in axv:
axi.grid(True,which='both')
# Set height limits if specified
if not heightlimits is None:
axv[0].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig,ax
def plot_spectrum(datasets,
fields=None,
height=None,
times=None,
fig=None,ax=None,
fieldlimits=None,
freqlimits=None,
fieldlabels={},
labelsubplots=False,
showlegend=None,
ncols=None,
subfigsize=(4,5),
datasetkwargs={},
**kwargs
):
"""
Plot frequency spectrum at a given height for different datasets,
time(s) and field(s), using a subplot per time and per field.
Note that this function does not interpolate to the requested height,
i.e., if height is not None, the specified value should be available
in all datasets.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s) with spectrum data. If more than one set,
datasets should be a dictionary with entries
<dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
height : float (or None)
Height for which frequency spectra is plotted. If datasets
have no height dimension, height does not need to be specified.
times : str, int, float, list (or None)
Time(s) for which frequency spectra are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * ntimes
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
freqlimits : list or tuple
Frequency axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
ntotal = nfields * ntimes
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_spectrumlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=ntimes,
avoid_single_column=True,
sharex=True,
subfigsize=subfigsize,
wspace=0.3,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if ndatasets>1:
showlegend = True
else:
showlegend = False
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
frequencyvalues = _get_dim_values(df,'frequency',default_idx=True)
assert(frequencyvalues is not None), 'spectrum plot needs a frequency axis'
timevalues = _get_dim_values(df,'time')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
if showlegend:
plotting_properties['label'] = dfname
# Index of axis corresponding to field j and time k
axi = j*ntimes + k
# Axes mark up
if i==0 and ntimes>1:
axv[axi].set_title(pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC'),fontsize=16)
# Gather label, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Get field spectrum
slice_t = _get_slice(df,time,'time')
slice_tz = _get_slice(slice_t,height,'height')
spectrum = _get_field(slice_tz,field).values
# Plot data
axv[axi].loglog(frequencyvalues[1:],spectrum[1:],**plotting_properties)
# Specify field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set frequency label
for c in range(ncols):
axv[ncols*(nrows-1)+c].set_xlabel('$f$ [Hz]')
# Specify field label if specified
for r in range(nrows):
try:
axv[r*ncols].set_ylabel(args.fieldlabels[args.fields[r]])
except KeyError:
pass
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Set frequency limits if specified
if not freqlimits is None:
axv[0].set_xlim(freqlimits)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig, ax
# ---------------------------------------------
#
# DEFINITION OF AUXILIARY CLASSES AND FUNCTIONS
#
# ---------------------------------------------
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class PlottingInput(object):
"""
Auxiliary class to collect input data and options for plotting
functions, and to check if the inputs are consistent
"""
supported_datatypes = (
pd.Series,
pd.DataFrame,
xr.DataArray,
xr.Dataset,
)
def __init__(self, datasets, fields, **argd):
# Add all arguments as class attributes
self.__dict__.update({'datasets':datasets,
'fields':fields,
**argd})
# Check consistency of all attributes
self._check_consistency()
def _check_consistency(self):
"""
Check consistency of all input data
"""
# ----------------------
# Check dataset argument
# ----------------------
# If a single dataset is provided, convert to a dictionary
# under a generic key 'Dataset'
if isinstance(self.datasets, self.supported_datatypes):
self.datasets = {'Dataset': self.datasets}
for dfname,df in self.datasets.items():
# convert dataset types here
if isinstance(df, (xr.Dataset,xr.DataArray)):
# handle xarray datatypes
self.datasets[dfname] = df.to_dataframe()
columns = self.datasets[dfname].columns
if len(columns) == 1:
# convert to pd.Series
self.datasets[dfname] = self.datasets[dfname][columns[0]]
else:
assert(isinstance(df, self.supported_datatypes)), \
"Dataset {:s} of type {:s} not supported".format(dfname,str(type(df)))
# ----------------------
# Check fields argument
# ----------------------
# If no fields are specified, check that
# - all datasets are series
# - the name of every series is either None or matches other series names
if self.fields is None:
assert(all([isinstance(self.datasets[dfname],pd.Series) for dfname in self.datasets])), \
"'fields' argument must be specified unless all datasets are pandas Series"
series_names = set()
for dfname in self.datasets:
series_names.add(self.datasets[dfname].name)
if len(series_names)==1:
self.fields = list(series_names)
else:
raise InputError('attempting to plot multiple series with different field names')
elif isinstance(self.fields,str):
# If fields='all', retrieve fields from dataset
if self.fields=='all':
self.fields = _get_fieldnames(list(self.datasets.values())[0])
assert(all([_get_fieldnames(df)==self.fields for df in self.datasets.values()])), \
"The option fields = 'all' only works when all datasets have the same fields"
# If fields is a single instance, convert to a list
else:
self.fields = [self.fields,]
# ----------------------------------
# Check match of fields and datasets
# ----------------------------------
# Check if all datasets have at least one of the requested fields
for dfname in self.datasets:
df = self.datasets[dfname]
if isinstance(df,pd.DataFrame):
assert(any([field in df.columns for field in self.fields])), \
'DataFrame '+dfname+' does not contain any of the requested fields'
elif isinstance(df,pd.Series):
if df.name is None:
assert(len(self.fields)==1), \
'Series must have a name if more than one fields is specified'
else:
assert(df.name in self.fields), \
'Series '+dfname+' does not match any of the requested fields'
# ---------------------------------
# Check heights argument (optional)
# ---------------------------------
try:
# If no heights are specified, check that all datasets combined have
# no more than one height value
if self.heights is None:
av_heights = set()
for df in self.datasets.values():
heightvalues = _get_dim_values(df,'height')
try:
for height in heightvalues:
av_heights.add(height)
except TypeError:
# heightvalues is None
pass
if len(av_heights)==0:
# None of the datasets have height values
self.heights = [None,]
elif len(av_heights)==1:
self.heights = list(av_heights)
else:
raise InputError("found more than one height value so 'heights' argument must be specified")
# If heights='all', retrieve heights from dataset
elif isinstance(self.heights,str) and self.heights=='all':
self.heights = _get_dim_values(list(self.datasets.values())[0],'height')
assert(all([np.allclose(_get_dim_values(df,'height'),self.heights) for df in self.datasets.values()])), \
"The option heights = 'all' only works when all datasets have the same vertical levels"
# If heights is single instance, convert to list
elif isinstance(self.heights,(int,float)):
self.heights = [self.heights,]
except AttributeError:
pass
# -----------------------------------
# Check timerange argument (optional)
# -----------------------------------
try:
if self.timerange is not None:
if self.times is not None:
print('Using specified time range',self.timerange,
'and ignoring',self.times)
assert isinstance(self.timerange,(tuple,list)), \
'Need to specify timerange as (starttime,endtime)'
assert (len(self.timerange) == 2)
try:
starttime = pd.to_datetime(self.timerange[0])
endtime = pd.to_datetime(self.timerange[1])
except ValueError:
print('Unable to convert timerange to timestamps')
else:
# get unique times from all datasets
alltimes = []
for df in self.datasets.values():
alltimes += list(_get_dim_values(df,'time'))
alltimes = pd.DatetimeIndex(np.unique(alltimes))
inrange = (alltimes >= starttime) & (alltimes <= endtime)
self.times = alltimes[inrange]
except AttributeError:
pass
# ---------------------------------
# Check times argument (optional)
# ---------------------------------
# If times is single instance, convert to list
try:
# If no times are specified, check that all datasets combined have
# no more than one time value
if self.times is None:
av_times = set()
for df in self.datasets.values():
timevalues = _get_dim_values(df,'time')
try:
for time in timevalues.values:
av_times.add(time)
except AttributeError:
pass
if len(av_times)==0:
# None of the datasets have time values
self.times = [None,]
elif len(av_times)==1:
self.times = list(av_times)
else:
raise InputError("found more than one time value so 'times' argument must be specified")
elif isinstance(self.times,(str,int,float,np.number,pd.Timestamp)):
self.times = [self.times,]
except AttributeError:
pass
# -------------------------------------
# Check fieldlimits argument (optional)
# -------------------------------------
# If one set of fieldlimits is specified, check number of fields
# and convert to dictionary
try:
if self.fieldlimits is None:
self.fieldlimits = {}
elif isinstance(self.fieldlimits, (list, tuple)):
assert(len(self.fields)==1), 'Unclear to what field fieldlimits corresponds'
self.fieldlimits = {self.fields[0]:self.fieldlimits}
except AttributeError:
self.fieldlimits = {}
# -------------------------------------
# Check fieldlabels argument (optional)
# -------------------------------------
# If one fieldlabel is specified, check number of fields
try:
if isinstance(self.fieldlabels, str):
assert(len(self.fields)==1), 'Unclear to what field fieldlabels corresponds'
self.fieldlabels = {self.fields[0]: self.fieldlabels}
except AttributeError:
self.fieldlabels = {}
# -------------------------------------
# Check colorscheme argument (optional)
# -------------------------------------
# If one colorscheme is specified, check number of fields
try:
self.cmap = {}
if isinstance(self.colorschemes, str):
assert(len(self.fields)==1), 'Unclear to what field colorschemes corresponds'
self.cmap[self.fields[0]] = mpl.cm.get_cmap(self.colorschemes)
else:
# Set missing colorschemes to viridis
for field in self.fields:
if field not in self.colorschemes.keys():
if field == 'wdir':
self.colorschemes[field] = 'twilight'
else:
self.colorschemes[field] = 'viridis'
self.cmap[field] = mpl.cm.get_cmap(self.colorschemes[field])
except AttributeError:
pass
# -------------------------------------
# Check fieldorder argument (optional)
# -------------------------------------
# Make sure fieldorder is recognized
try:
assert(self.fieldorder in ['C','F']), "Error: fieldorder '"\
+self.fieldorder+"' not recognized, must be either 'C' or 'F'"
except AttributeError:
pass
def set_missing_fieldlimits(self):
"""
Set missing fieldlimits to min and max over all datasets
"""
for field in self.fields:
if field not in self.fieldlimits.keys():
try:
self.fieldlimits[field] = [
min([_get_field(df,field).min() for df in self.datasets.values() if _contains_field(df,field)]),
max([_get_field(df,field).max() for df in self.datasets.values() if _contains_field(df,field)])
]
except ValueError:
self.fieldlimits[field] = [None,None]
def _get_dim(df,dim,default_idx=False):
"""
Search for specified dimension in dataset and return
level (referred to by either label or position) and
axis {0 or ‘index’, 1 or ‘columns’}
If default_idx is True, return a single unnamed index
if present
"""
assert(dim in dimension_names.keys()), \
"Dimension '"+dim+"' not supported"
# 1. Try to find dim based on name
for name in dimension_names[dim]:
if name in df.index.names:
if debug: print("Found "+dim+" dimension in index with name '{}'".format(name))
return name, 0
else:
try:
if name in df.columns:
if debug: print("Found "+dim+" dimension in column with name '{}'".format(name))
return name, 1
except AttributeError:
# pandas Series has no columns
pass
# 2. Look for Datetime or Timedelta index
if dim=='time':
for idx in range(len(df.index.names)):
if isinstance(df.index.get_level_values(idx),(pd.DatetimeIndex,pd.TimedeltaIndex,pd.PeriodIndex)):
if debug: print("Found "+dim+" dimension in index with level {} without a name ".format(idx))
return idx, 0
# 3. If default index is True, assume that a
# single nameless index corresponds to the
# requested dimension
if (not isinstance(df.index,(pd.MultiIndex,pd.DatetimeIndex,pd.TimedeltaIndex,pd.PeriodIndex))
and default_idx and (df.index.name is None) ):
if debug: print("Assuming nameless index corresponds to '{}' dimension".format(dim))
return 0,0
# 4. Did not found requested dimension
if debug: print("Found no "+dim+" dimension")
return None, None
def _get_available_fieldnames(df,fieldnames):
"""
Return subset of fields available in df
"""
available_fieldnames = []
if isinstance(df,pd.DataFrame):
for field in fieldnames:
if field in df.columns:
available_fieldnames.append(field)
# A Series only has one field, so return that field name
# (if that field is not in fields, an error would have been raised)
elif isinstance(df,pd.Series):
available_fieldnames.append(df.name)
return available_fieldnames
def _get_fieldnames(df):
"""
Return list of fieldnames in df
"""
if isinstance(df,pd.DataFrame):
fieldnames = list(df.columns)
# Remove any column corresponding to
# a dimension (time, height or frequency)
for dim in dimension_names.keys():
name, axis = _get_dim(df,dim)
if axis==1:
fieldnames.remove(name)
return fieldnames
elif isinstance(df,pd.Series):
return [df.name,]
def _contains_field(df,fieldname):
if isinstance(df,pd.DataFrame):
return fieldname in df.columns
elif isinstance(df,pd.Series):
return (df.name is None) or (df.name==fieldname)
def _get_dim_values(df,dim,default_idx=False):
"""
Return values for a given dimension
"""
level, axis = _get_dim(df,dim,default_idx)
# Requested dimension is an index
if axis==0:
return df.index.get_level_values(level).unique()
# Requested dimension is a column
elif axis==1:
return df[level].unique()
# Requested dimension not available
else:
return None
def _get_pivot_table(df,dim,fieldnames):
"""
Return pivot table with given fieldnames as columns
"""
level, axis = _get_dim(df,dim)
# Unstack an index
if axis==0:
return df.unstack(level=level)
# Pivot about a column
elif axis==1:
return df.pivot(columns=level,values=fieldnames)
# Dimension not found, return dataframe
else:
return df
def _get_slice(df,key,dim):
"""
Return cross-section of dataset
"""
if key is None:
return df
# Get dimension level and axis
level, axis = _get_dim(df,dim)
# Requested dimension is an index
if axis==0:
if isinstance(df.index,pd.MultiIndex):
return df.xs(key,level=level)
else:
return df.loc[df.index==key]
# Requested dimension is a column
elif axis==1:
return df.loc[df[level]==key]
# Requested dimension not available, return dataframe
else:
return df
def _get_field(df,fieldname):
"""
Return field from dataset
"""
if isinstance(df,pd.DataFrame):
return df[fieldname]
elif isinstance(df,pd.Series):
if df.name is None or df.name==fieldname:
return df
else:
return None
def _get_pivoted_field(df,fieldname):
"""
Return field from pivoted dataset
"""
if isinstance(df.columns,pd.MultiIndex):
return df[fieldname]
else:
return df
def _create_subplots_if_needed(ntotal,
ncols=None,
default_ncols=1,
fieldorder='C',
avoid_single_column=False,
sharex=False,
sharey=False,
subfigsize=(12,3),
wspace=0.2,
hspace=0.2,
fig=None,
ax=None
):
"""
Auxiliary function to create fig and ax
If fig and ax are None:
- Set nrows and ncols based on ntotal and specified ncols,
accounting for fieldorder and avoid_single_column
- Create fig and ax with nrows and ncols, taking into account
sharex, sharey, subfigsize, wspace, hspace
If fig and ax are not None:
- Try to determine nrows and ncols from ax
- Check whether size of ax corresponds to ntotal
"""
if ax is None:
if not ncols is None:
# Use ncols if specified and appropriate
assert(ntotal%ncols==0), 'Error: Specified number of columns is not a true divisor of total number of subplots'
nrows = int(ntotal/ncols)
else:
# Defaut number of columns
ncols = default_ncols
nrows = int(ntotal/ncols)
if fieldorder=='F':
# Swap number of rows and columns
nrows, ncols = ncols, nrows
if avoid_single_column and ncols==1:
# Swap number of rows and columns
nrows, ncols = ncols, nrows
# Create fig and ax with nrows and ncols
fig,ax = plt.subplots(nrows=nrows,ncols=ncols,sharex=sharex,sharey=sharey,figsize=(subfigsize[0]*ncols,subfigsize[1]*nrows))
# Adjust subplot spacing
fig.subplots_adjust(wspace=wspace,hspace=hspace)
else:
# Make sure user-specified axes has appropriate size
assert(np.asarray(ax).size==ntotal), 'Specified axes does not have the right size'
# Determine nrows and ncols in specified axes
if isinstance(ax,mpl.axes.Axes):
nrows, ncols = (1,1)
else:
try:
nrows,ncols = np.asarray(ax).shape
except ValueError:
# ax array has only one dimension
# Determine whether ax is single row or single column based
# on individual ax positions x0 and y0
x0s = [axi.get_position().x0 for axi in ax]
y0s = [axi.get_position().y0 for axi in ax]
if all(x0==x0s[0] for x0 in x0s):
# All axis have same relative x0 position
nrows = np.asarray(ax).size
ncols = 1
elif all(y0==y0s[0] for y0 in y0s):
# All axis have same relative y0 position
nrows = 1
ncols = np.asarray(ax).size
else:
# More complex axes configuration,
# currently not supported
raise InputError('could not determine nrows and ncols in specified axes, complex axes configuration currently not supported')
return fig, ax, nrows, ncols
def _format_legend(axv,index):
"""
Auxiliary function to format legend
Usage
=====
axv : numpy 1d array
Flattened array of axes
index : int
Index of the axis where to place the legend
"""
all_handles = []
all_labels = []
# Check each axes and add new handle
for axi in axv:
handles, labels = axi.get_legend_handles_labels()
for handle,label in zip(handles,labels):
if not label in all_labels:
all_labels.append(label)
all_handles.append(handle)
leg = axv[index].legend(all_handles,all_labels,loc='upper left',bbox_to_anchor=(1.05,1.0),fontsize=16)
return leg
def _format_time_axis(fig,ax,
plot_local_time,
local_time_offset,
timelimits
):
"""
Auxiliary function to format time axis
"""
ax[-1].xaxis_date()
if timelimits is not None:
timelimits = [pd.to_datetime(tlim) for tlim in timelimits]
hour_interval = _determine_hourlocator_interval(ax[-1],timelimits)
if plot_local_time is not False:
if plot_local_time is True:
localtimefmt = '%I %p'
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
localtimefmt = plot_local_time
# Format first axis (local time)
ax[-1].xaxis.set_minor_locator(mdates.HourLocator(byhour=range(0,24,hour_interval)))
ax[-1].xaxis.set_minor_formatter(mdates.DateFormatter(localtimefmt))
ax[-1].xaxis.set_major_locator(mdates.DayLocator(interval=12)) #Choose large interval so dates are not plotted
ax[-1].xaxis.set_major_formatter(mdates.DateFormatter(''))
# Set time limits if specified
if not timelimits is None:
local_timelimits = | pd.to_datetime(timelimits) | pandas.to_datetime |
# IN DEVELOPMENT
from .. import settings
from .. import logging as logg
from ..preprocessing.moments import get_connectivities
from .utils import make_unique_list, test_bimodality
from .dynamical_model_utils import BaseDynamics, linreg, convolve, tau_inv, unspliced, spliced
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
from matplotlib import rcParams
from scipy.optimize import minimize
class DynamicsRecovery(BaseDynamics):
def __init__(self, adata=None, gene=None, load_pars=None, **kwargs):
super(DynamicsRecovery, self).__init__(adata, gene, **kwargs)
if load_pars and 'fit_alpha' in adata.var.keys():
self.load_pars(adata, gene)
elif self.recoverable:
self.initialize()
def initialize(self):
# set weights
u, s, w, perc = self.u, self.s, self.weights, 98
u_w, s_w, = u[w], s[w]
# initialize scaling
self.std_u, self.std_s = np.std(u_w), np.std(s_w)
scaling = self.std_u / self.std_s if isinstance(self.fit_scaling, bool) else self.fit_scaling
u, u_w = u / scaling, u_w / scaling
# initialize beta and gamma from extreme quantiles of s
weights_s = s_w >= np.percentile(s_w, perc, axis=0)
weights_u = u_w >= np.percentile(u_w, perc, axis=0)
weights_g = weights_s if self.steady_state_prior is None else weights_s | self.steady_state_prior[w]
beta, gamma = 1, linreg(convolve(u_w, weights_g), convolve(s_w, weights_g)) + 1e-6 # 1e-6 to avoid beta = gamma
# initialize gamma / beta * scaling clipped to adapt faster to extreme ratios
gamma = gamma * 1.2 if gamma < .05 / scaling else gamma / 1.2 if gamma > 1.5 / scaling else gamma
u_inf, s_inf = u_w[weights_u | weights_s].mean(), s_w[weights_s].mean()
u0_, s0_ = u_inf, s_inf
alpha = u_inf * beta # np.mean([s_inf * gamma, u_inf * beta]) # np.mean([s0_ * gamma, u0_ * beta])
# initialize switching from u quantiles and alpha from s quantiles
tstat_u, pval_u, means_u = test_bimodality(u_w, kde=True)
tstat_s, pval_s, means_s = test_bimodality(s_w, kde=True)
self.pval_steady = max(pval_u, pval_s)
self.steady_u = means_u[1]
self.steady_s = means_s[1]
if self.pval_steady < 1e-3:
u_inf = np.mean([u_inf, self.steady_u])
alpha = gamma * s_inf
beta = alpha / u_inf
u0_, s0_ = u_inf, s_inf
# alpha, beta, gamma = np.array([alpha, beta, gamma]) * scaling
t_ = tau_inv(u0_, s0_, 0, 0, alpha, beta, gamma)
# update object with initialized vars
self.alpha, self.beta, self.gamma, self.scaling, self.alpha_, = alpha, beta, gamma, scaling, 0
self.u0_, self.s0_, self.t_ = u0_, s0_, t_
self.pars = np.array([alpha, beta, gamma, self.t_, self.scaling])[:, None]
# initialize time point assignment
self.t, self.tau, self.o = self.get_time_assignment()
self.loss = [self.get_loss()]
self.initialize_scaling(sight=.5)
self.initialize_scaling(sight=.1)
self.steady_state_ratio = self.gamma / self.beta
self.set_callbacks()
def initialize_scaling(self, sight=.5): # fit scaling and update if improved
z_vals = self.scaling + np.linspace(-1, 1, num=4) * self.scaling * sight
for z in z_vals:
self.update(scaling=z, beta=self.beta / self.scaling * z)
def fit(self, assignment_mode=None):
if self.max_iter > 0:
# pre-train with explicit time assignment
self.fit_t_and_alpha()
self.fit_scaling_()
self.fit_rates()
self.fit_t_()
# actual EM (each iteration of simplex downhill is
self.fit_t_and_rates()
# train with optimal time assignment (oth. projection)
self.assignment_mode = assignment_mode
self.update(adjust_t_=False)
self.fit_t_and_rates(refit_time=False)
# self.update(adjust_t_=False)
# self.t, self.tau, self.o = self.get_time_assignment()
self.update()
self.tau, self.tau_ = self.get_divergence(mode='tau')
self.likelihood = self.get_likelihood(refit_time=False)
def fit_t_and_alpha(self, **kwargs):
alpha_vals = self.alpha + np.linspace(-1, 1, num=5) * self.alpha / 10
for alpha in alpha_vals: self.update(alpha=alpha)
def mse(x):
return self.get_mse(t_=x[0], alpha=x[1], **kwargs)
res = minimize(mse, np.array([self.t_, self.alpha]), callback=self.cb_fit_t_and_alpha, **self.simplex_kwargs)# method='Nelder-Mead')
self.update(t_=res.x[0], alpha=res.x[1])
def fit_rates(self, **kwargs):
def mse(x):
return self.get_mse(alpha=x[0], gamma=x[1], **kwargs)
res = minimize(mse, np.array([self.alpha, self.gamma]), tol=1e-2, callback=self.cb_fit_rates, **self.simplex_kwargs)
self.update(alpha=res.x[0], gamma=res.x[1])
def fit_t_(self, **kwargs):
def mse(x):
return self.get_mse(t_=x[0], **kwargs)
res = minimize(mse, self.t_, callback=self.cb_fit_t_, **self.simplex_kwargs)
self.update(t_=res.x[0])
def fit_rates_all(self, **kwargs):
def mse(x):
return self.get_mse(alpha=x[0], beta=x[1], gamma=x[2], **kwargs)
res = minimize(mse, np.array([self.alpha, self.beta, self.gamma]), tol=1e-2, callback=self.cb_fit_rates_all, **self.simplex_kwargs)
self.update(alpha=res.x[0], beta=res.x[1], gamma=res.x[2])
def fit_t_and_rates(self, **kwargs):
def mse(x):
return self.get_mse(t_=x[0], alpha=x[1], beta=x[2], gamma=x[3], **kwargs)
res = minimize(mse, np.array([self.t_, self.alpha, self.beta, self.gamma]), tol=1e-2,
callback=self.cb_fit_t_and_rates, **self.simplex_kwargs)
self.update(t_=res.x[0], alpha=res.x[1], beta=res.x[2], gamma=res.x[3])
def fit_scaling_(self, **kwargs):
def mse(x):
return self.get_mse(t_=x[0], beta=x[1], scaling=x[2], **kwargs)
res = minimize(mse, np.array([self.t_, self.beta, self.scaling]), callback=self.cb_fit_scaling_, **self.simplex_kwargs)
self.update(t_=res.x[0], beta=res.x[1], scaling=res.x[2])
# Callback functions for the Optimizer
def cb_fit_t_and_alpha(self, x):
self.update(t_=x[0], alpha=x[1])
def cb_fit_scaling_(self, x):
self.update(t_=x[0], beta=x[1], scaling=x[2])
def cb_fit_rates(self, x):
self.update(alpha=x[0], gamma=x[1])
def cb_fit_t_(self, x):
self.update(t_=x[0])
def cb_fit_t_and_rates(self, x):
self.update(t_=x[0], alpha=x[1], beta=x[2], gamma=x[3])
def cb_fit_rates_all(self, x):
self.update(alpha=x[0], beta=x[1], gamma=x[2])
def set_callbacks(self):
# Overwrite callbacks
if not self.high_pars_resolution:
self.cb_fit_t_and_alpha = None
self.cb_fit_scaling_ = None
self.cb_fit_rates = None
self.cb_fit_t_ = None
self.cb_fit_t_and_rates = None
self.cb_fit_rates_all = None
def update(self, t=None, t_=None, alpha=None, beta=None, gamma=None, scaling=None, u0_=None, s0_=None, adjust_t_=True):
loss_prev = self.loss[-1] if len(self.loss) > 0 else 1e6
alpha, beta, gamma, scaling, t_ = self.get_vars(alpha, beta, gamma, scaling, t_, u0_, s0_)
t, tau, o = self.get_time_assignment(alpha, beta, gamma, scaling, t_, u0_, s0_, t)
loss = self.get_loss(t, t_, alpha, beta, gamma, scaling)
perform_update = loss < loss_prev
on = self.o == 1
if adjust_t_ and np.any(on):
if not perform_update:
alpha, beta, gamma, scaling, t_ = self.get_vars()
t, tau, o = self.get_time_assignment()
loss = self.get_loss()
alt_t_ = t[on].max()
if 0 < alt_t_ < t_:
# alt_u0_, alt_s0_ = mRNA(alt_t_, 0, 0, alpha, beta, gamma)
alt_t_ += np.max(t) / len(t) * np.sum(t == t_) # np.sum((self.u / self.scaling >= alt_u0_) | (self.s >= alt_s0_))
alt_t, alt_tau, alt_o = self.get_time_assignment(alpha, beta, gamma, scaling, alt_t_)
alt_loss = self.get_loss(alt_t, alt_t_, alpha, beta, gamma, scaling)
ut_cur = unspliced(t_, 0, alpha, beta)
ut_alt = unspliced(alt_t_, 0, alpha, beta)
if alt_loss * .99 <= np.min([loss, loss_prev]) or ut_cur * .99 < ut_alt:
t, tau, o, t_, loss, perform_update = alt_t, alt_tau, alt_o, alt_t_, alt_loss, True
if False:
steady_states = t == t_
if perform_update and np.any(steady_states):
t_ += t.max() / len(t) * np.sum(steady_states)
t, tau, o = self.get_time_assignment(alpha, beta, gamma, scaling, t_)
loss = self.get_loss(t, t_, alpha, beta, gamma, scaling)
if perform_update:
if scaling is not None:
self.steady_u *= self.scaling / scaling
self.u0_ *= self.scaling / scaling
if u0_ is not None: self.u0_ = u0_
if s0_ is not None: self.s0_ = s0_
self.t, self.tau, self.o = t, tau, o
self.alpha, self.beta, self.gamma, self.scaling, self.t_ = alpha, beta, gamma, scaling, t_
self.pars = np.c_[self.pars, np.array([alpha, beta, gamma, t_, scaling])[:, None]]
self.loss.append(loss)
return perform_update
default_pars_names = ['alpha', 'beta', 'gamma', 't_', 'scaling', 'std_u', 'std_s', 'likelihood', 'u0', 's0',
'pval_steady', 'steady_u', 'steady_s']
def read_pars(adata, pars_names=None, key='fit'):
pars = []
for name in (default_pars_names if pars_names is None else pars_names):
pkey = key + '_' + name
par = adata.var[pkey].values if pkey in adata.var.keys() else np.zeros(adata.n_vars) * np.nan
pars.append(par)
return pars
def write_pars(adata, pars, pars_names=None, add_key='fit'):
for i, name in enumerate(default_pars_names if pars_names is None else pars_names):
adata.var[add_key + '_' + name] = pars[i]
def recover_dynamics(data, var_names='velocity_genes', n_top_genes=None, max_iter=10, assignment_mode='projection',
t_max=None, fit_time=True, fit_scaling=True, fit_steady_states=True, fit_connected_states=None,
fit_basal_transcription=None, use_raw=False, load_pars=None, return_model=None, plot_results=False,
steady_state_prior=None, add_key='fit', copy=False, **kwargs):
"""Recovers the full splicing kinetics of specified genes.
The model infers transcription rates, splicing rates, degradation rates,
as well as cell-specific latent time and transcriptional states, estimated iteratively by expectation-maximization.
.. image:: https://user-images.githubusercontent.com/31883718/69636459-ef862800-1056-11ea-8803-0a787ede5ce9.png
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
var_names: `str`, list of `str` (default: `'velocity_genes`)
Names of variables/genes to use for the fitting.
n_top_genes: `int` or `None` (default: `None`)
Number of top velocity genes to use for the dynamical model.
max_iter:`int` (default: `10`)
Maximal iterations in the EM-Algorithm.
assignment_mode: `str` (default: `projection`)
Determined how times are assigned to observations.
If `projection`, observations are projected onto the model trajectory.
Else uses an inverse approximating formula.
t_max: `float` or `None` (default: `None`)
Total range for time assignments.
fit_scaling: `bool` or `float` or `None` (default: `True`)
Whether to fit scaling between unspliced and spliced or keep initially given scaling fixed.
fit_time: `bool` or `float` or `None` (default: `True`)
Whether to fit time or keep initially given time fixed.
fit_steady_states: `bool` or `None` (default: `True`)
Allows fitting of observations to steady states next to repression and induction.
fit_connected_states: `bool` or `None` (default: `None`)
Restricts fitting to neighbors given by connectivities.
fit_basal_transcription: `bool` or `None` (default: `None`)
Enables model to incorporate basal transcriptions.
use_raw: `bool` or `None` (default: `None`)
if True, use .layers['sliced'], else use moments from .layers['Ms']
load_pars: `bool` or `None` (default: `None`)
Load parameters from past fits.
return_model: `bool` or `None` (default: `True`)
Whether to return the model as :DynamicsRecovery: object.
plot_results: `bool` or `None` (default: `False`)
Plot results after parameter inference.
steady_state_prior: list of `bool` or `None` (default: `None`)
Mask for indices used for steady state regression.
add_key: `str` (default: `'fit'`)
Key to add to parameter names, e.g. 'fit_t' for fitted time.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata`
"""
adata = data.copy() if copy else data
logg.info('recovering dynamics', r=True)
if 'Ms' not in adata.layers.keys() or 'Mu' not in adata.layers.keys(): use_raw = True
if fit_connected_states is None: fit_connected_states = not use_raw
adata.uns['recover_dynamics'] = {'fit_connected_states': fit_connected_states,
'fit_basal_transcription': fit_basal_transcription, 'use_raw': use_raw}
if isinstance(var_names, str) and var_names not in adata.var_names:
if var_names in adata.var.keys():
var_names = adata.var_names[adata.var[var_names].values]
elif use_raw or var_names is 'all':
var_names = adata.var_names
elif '_genes' in var_names:
from .velocity import Velocity
velo = Velocity(adata, use_raw=use_raw)
velo.compute_deterministic(perc=[5, 95])
var_names = adata.var_names[velo._velocity_genes]
else:
raise ValueError('Variable name not found in var keys.')
var_names = np.array([name for name in make_unique_list(var_names, allow_array=True) if name in adata.var_names])
if len(var_names) == 0:
raise ValueError('Variable name not found in var keys.')
if n_top_genes is not None and len(var_names) > n_top_genes:
X = adata[:, var_names].layers[('spliced' if use_raw else 'Ms')]
var_names = var_names[np.argsort(np.sum(X, 0))[::-1][:n_top_genes]]
if return_model is None:
return_model = len(var_names) < 5
alpha, beta, gamma, t_, scaling, std_u, std_s, likelihood, u0, s0, pval, steady_u, steady_s = read_pars(adata)
likelihood[np.isnan(likelihood)] = 0
idx, L, P = [], [], []
T = adata.layers['fit_t'] if 'fit_t' in adata.layers.keys() else np.zeros(adata.shape) * np.nan
Tau = adata.layers['fit_tau'] if 'fit_tau' in adata.layers.keys() else np.zeros(adata.shape) * np.nan
Tau_ = adata.layers['fit_tau_'] if 'fit_tau_' in adata.layers.keys() else np.zeros(adata.shape) * np.nan
conn = get_connectivities(adata) if fit_connected_states else None
progress = logg.ProgressReporter(len(var_names))
for i, gene in enumerate(var_names):
dm = DynamicsRecovery(adata, gene, use_raw=use_raw, load_pars=load_pars, max_iter=max_iter, fit_time=fit_time,
fit_steady_states=fit_steady_states, fit_connected_states=conn, fit_scaling=fit_scaling,
fit_basal_transcription=fit_basal_transcription, steady_state_prior=steady_state_prior, **kwargs)
if dm.recoverable:
dm.fit(assignment_mode=assignment_mode)
ix = np.where(adata.var_names == gene)[0][0]
idx.append(ix)
T[:, ix], Tau[:, ix], Tau_[:, ix] = dm.t, dm.tau, dm.tau_
alpha[ix], beta[ix], gamma[ix], t_[ix], scaling[ix] = dm.pars[:, -1]
u0[ix], s0[ix], pval[ix], steady_u[ix], steady_s[ix] = dm.u0, dm.s0, dm.pval_steady, dm.steady_u, dm.steady_s
beta[ix] /= scaling[ix]
steady_u[ix] *= scaling[ix]
std_u[ix], std_s[ix], likelihood[ix] = dm.std_u, dm.std_s, dm.likelihood
L.append(dm.loss)
if plot_results and i < 4:
P.append(np.array(dm.pars))
progress.update()
else:
logg.warn(dm.gene, 'not recoverable due to insufficient samples.')
dm = None
progress.finish()
write_pars(adata, [alpha, beta, gamma, t_, scaling, std_u, std_s, likelihood, u0, s0, pval, steady_u, steady_s])
adata.layers['fit_t'] = T if conn is None else conn.dot(T)
adata.layers['fit_tau'] = Tau
adata.layers['fit_tau_'] = Tau_
if L: # is False if only one invalid / irrecoverable gene was given in var_names
cur_len = adata.varm['loss'].shape[1] if 'loss' in adata.varm.keys() else 2
max_len = max(np.max([len(l) for l in L]), cur_len) if L else cur_len
loss = np.ones((adata.n_vars, max_len)) * np.nan
if 'loss' in adata.varm.keys():
loss[:, :cur_len] = adata.varm['loss']
loss[idx] = np.vstack([np.concatenate([l, np.ones(max_len-len(l)) * np.nan]) for l in L])
adata.varm['loss'] = loss
if t_max is not False:
dm = align_dynamics(adata, t_max=t_max, dm=dm, idx=idx)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added \n'
' \'' + add_key + '_pars' + '\', fitted parameters for splicing dynamics (adata.var)')
if plot_results: # Plot Parameter Stats
n_rows, n_cols = len(var_names[:4]), 6
figsize = [2 * n_cols, 1.5 * n_rows] # rcParams['figure.figsize']
fontsize = rcParams['font.size']
fig, axes = pl.subplots(nrows=n_rows, ncols=6, figsize=figsize)
pl.subplots_adjust(wspace=0.7, hspace=0.5)
for i, gene in enumerate(var_names[:4]):
if t_max is not False:
mi = dm.m[i]
P[i] *= np.array([1 / mi, 1 / mi, 1 / mi, mi, 1])[:, None]
ax = axes[i] if n_rows > 1 else axes
for j, pij in enumerate(P[i]):
ax[j].plot(pij)
ax[len(P[i])].plot(L[i])
if i == 0:
for j, name in enumerate(['alpha', 'beta', 'gamma', 't_', 'scaling', 'loss']):
ax[j].set_title(name, fontsize=fontsize)
if return_model:
logg.info('\noutputs model fit of gene:', dm.gene)
return dm if return_model else adata if copy else None
def align_dynamics(data, t_max=None, dm=None, idx=None, mode=None, remove_outliers=None, copy=False):
"""Align dynamics to a common set of parameters
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
t_max: `float` or `None` (default: `None`)
Total range for time assignments.
dm: :class:`~DynamicsRecovery`
DynamicsRecovery object to perform alignment on.
idx: list of `bool` or `None` (default: `None`)
Mask for indices used for alignment.
mode: `str` or None (default: `'align_total_time`)
What to align. Takes the following arguments:
common_splicing_rate, common_scaling, align_increments, align_total_time
remove_outliers: `bool` or `None` (default: `None`)
Whether to remove outliers.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
alpha, beta, gamma, t_, alignment_scaling: `.var`
aligned parameters
fit_t, fit_tau, fit_tau_: `.layer`
aligned time
"""
adata = data.copy() if copy else data
alpha, beta, gamma, t_, scaling, mz = read_pars(adata, pars_names=['alpha', 'beta', 'gamma', 't_', 'scaling', 'alignment_scaling'])
T = adata.layers['fit_t'] if 'fit_t' in adata.layers.keys() else np.zeros(adata.shape) * np.nan
Tau = adata.layers['fit_tau'] if 'fit_tau' in adata.layers.keys() else np.zeros(adata.shape) * np.nan
Tau_ = adata.layers['fit_tau_'] if 'fit_tau_' in adata.layers.keys() else np.zeros(adata.shape) * np.nan
idx = ~ np.isnan(np.sum(T, axis=0)) if idx is None else idx
if 'fit_alignment_scaling' not in adata.var.keys(): mz = np.ones(adata.n_vars)
if mode is None: mode = 'align_total_time'
m = np.ones(adata.n_vars)
mz_prev = np.array(mz)
if dm is not None: # newly fitted
mz[idx] = 1
if mode is 'align_total_time' and t_max is not False:
T_max = np.max(T[:, idx] * (T[:, idx] < t_[idx]), axis=0) \
+ np.max((T[:, idx] - t_[idx]) * (T[:, idx] > t_[idx]), axis=0)
denom = 1 - np.sum((T[:, idx] == t_[idx]) | (T[:, idx] == 0), axis=0) / len(T)
denom += denom == 0
T_max = T_max / denom
T_max += T_max == 0
t_max = 20 if t_max is None else t_max
m[idx] = t_max / T_max
mz *= m
else:
m = 1 / mz
mz = np.ones(adata.n_vars)
if remove_outliers:
mu, std = np.nanmean(mz), np.nanstd(mz)
mz = np.clip(mz, mu - 3 * std, mu + 3 * std)
m = mz / mz_prev
alpha, beta, gamma, T, t_, Tau, Tau_ = alpha / m, beta / m, gamma / m, T * m, t_ * m, Tau * m, Tau_ * m
write_pars(adata, [alpha, beta, gamma, t_, mz], pars_names=['alpha', 'beta', 'gamma', 't_', 'alignment_scaling'])
adata.layers['fit_t'] = T
adata.layers['fit_tau'] = Tau
adata.layers['fit_tau_'] = Tau_
if dm is not None:
dm.m = m[idx]
dm.alpha, dm.beta, dm.gamma, dm.pars[:3] = np.array([dm.alpha, dm.beta, dm.gamma, dm.pars[:3]]) / dm.m[-1]
dm.t, dm.tau, dm.t_, dm.pars[4] = np.array([dm.t, dm.tau, dm.t_, dm.pars[4]]) * dm.m[-1]
return adata if copy else dm
def recover_latent_time(data, vkey='velocity', min_likelihood=.1, min_confidence=.75, min_corr_diffusion=None,
weight_diffusion=None, root_key=None, end_key=None, t_max=None, copy=False):
"""Computes a gene-shared latent time.
Gene-specific latent timepoints obtained from the dynamical model are coupled to a universal gene-shared
latent time, which represents the cell’s internal clock and is based only on its transcriptional dynamics.
.. image:: https://user-images.githubusercontent.com/31883718/69636500-03318e80-1057-11ea-9e14-ae9f907711cc.png
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
min_likelihood: `float` between `0` and `1` or `None` (default: `.1`)
Minimal likelihood fitness for genes to be included to the weighting.
min_confidence: `float` between `0` and `1` (default: `.75`)
Parameter for local coherence selection.
min_corr_diffusion: `float` between `0` and `1` or `None` (default: `None`)
Only select genes that correlate with velocity pseudotime obtained from diffusion random walk on velocity graph.
weight_diffusion: `float` or `None` (default: `None`)
Weight to be applied to couple latent time with diffusion-based velocity pseudotime.
root_key: `str` or `None` (default: `None`)
Key (.uns, .obs) of root cell to be used. If not set, it obtains root cells from velocity-inferred transition matrix.
end_key: `str` or `None` (default: `None`)
Key (.obs) of end points to be used. If not set, it obtains end points from velocity-inferred transition matrix.
t_max: `float` or `None` (default: `None`)
Overall duration of differentiation process. If not set, a splicing duration of 20 hours is used as prior.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
latent_time: `.obs`
latent time from learned dynamics for each cell
"""
adata = data.copy() if copy else data
from .utils import vcorrcoef
from .dynamical_model_utils import root_time, compute_shared_time
from .terminal_states import terminal_states
from .velocity_graph import velocity_graph
from .velocity_pseudotime import velocity_pseudotime
if vkey + '_graph' not in adata.uns.keys():
velocity_graph(adata, approx=True)
if root_key not in adata.uns.keys() and root_key not in adata.obs.keys():
root_key = 'root_cells'
if root_key not in adata.obs.keys():
terminal_states(adata, vkey=vkey)
if end_key is None:
if 'end_points' in adata.obs.keys():
end_key = 'end_points'
elif 'final_cells' in adata.obs.keys():
end_key = 'final_cells'
t = np.array(adata.layers['fit_t'])
idx_valid = ~np.isnan(t.sum(0))
if min_likelihood is not None:
idx_valid &= np.array(adata.var['fit_likelihood'].values >= min_likelihood, dtype=bool)
t = t[:, idx_valid]
t_sum = np.sum(t, 1)
conn = get_connectivities(adata)
logg.info('computing latent time', r=True)
roots = np.argsort(t_sum)
idx_roots = adata.obs[root_key]
idx_roots[pd.isnull(idx_roots)] = 0
if np.any([isinstance(ix, str) for ix in idx_roots]):
idx_roots = np.array(idx_roots, dtype=bool)
idx_roots = idx_roots.astype(int) > 1 - 1e-3
if np.sum(idx_roots) > 0:
roots = roots[idx_roots]
else:
logg.warn('No root cells detected. Consider specifying root cells to improve latent time prediction.')
if end_key in adata.obs.keys():
fates = np.argsort(t_sum)[::-1]
idx_fates = adata.obs[end_key]
idx_fates[ | pd.isnull(idx_fates) | pandas.isnull |
import pandas as pd
import BeautifulSoup as bs
import requests
import pickle
import os
import os.path
import datetime
import time
def promt_time_stamp():
return str(datetime.datetime.fromtimestamp(time.time()).strftime('[%H:%M:%S] '))
def get_index_tickers(list_indexes=list(), load_all=False):
tickers_all = []
path = 'indexes/'
if not os.path.exists(path):
os.mkdir(path)
if load_all:
list_indexes = ['dowjones', 'sp500', 'dax', 'sptsxc', 'bovespa', 'ftse100', 'cac40', 'ibex35',
'eustoxx50', 'sensex', 'smi', 'straitstimes', 'rts', 'nikkei', 'ssec', 'hangseng',
'spasx200', 'mdax', 'sdax', 'tecdax']
for index in list_indexes:
tickers = []
implemented = True
if os.path.isfile(path + index + '.pic'):
print(promt_time_stamp() + 'load ' + index + ' tickers from db ..')
with open(path + index + '.pic', "rb") as input_file:
for ticker in pickle.load(input_file):
tickers.append(ticker)
elif index == 'dowjones':
print(promt_time_stamp() + 'load dowjones tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average')
for ticker in r[1][2][1:].tolist():
tickers.append(ticker)
elif index == 'sp500':
print(promt_time_stamp() + 'load sp500 tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
for ticker in r[0][0][1:].tolist():
tickers.append(ticker)
elif index == 'dax':
print(promt_time_stamp() + 'load dax tickers ..')
r = pd.read_html('https://it.wikipedia.org/wiki/DAX_30')[1]
for ticker in pd.DataFrame(r)[1][1:].tolist():
tickers.append(ticker)
elif index == 'sptsxc':
print(promt_time_stamp() + 'load sptsxc tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/S%26P/TSX_Composite_Index')
for ticker in r[0][0][1:].tolist():
tickers.append(ticker)
elif index == 'bovespa':
print(promt_time_stamp() + 'load bovespa tickers ..')
r = pd.read_html('https://id.wikipedia.org/wiki/Indeks_Bovespa')
for ticker in r[0][1][1:].tolist():
tickers.append(ticker)
elif index == 'ftse100':
print(promt_time_stamp() + 'load ftse100 tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/FTSE_100_Index')
for ticker in r[2][1][1:].tolist():
tickers.append(ticker)
elif index == 'cac40':
print(promt_time_stamp() + 'load cac40 tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/CAC_40')
for ticker in r[2][0][1:].tolist():
tickers.append(ticker)
elif index == 'ibex35':
print(promt_time_stamp() + 'load ibex35 tickers ..')
r = pd.read_html('https://en.wikipedia.org/wiki/IBEX_35')
for ticker in r[1][1][1:].tolist():
tickers.append(ticker)
elif index == 'eustoxx50':
print(promt_time_stamp() + '-eustoxx50 not implemented-')
implemented = False
elif index == 'sensex':
print(promt_time_stamp() + '-sensex not implemented-')
implemented = False
elif index == 'smi':
print(promt_time_stamp() + 'load smi tickers ..')
r = | pd.read_html('https://en.wikipedia.org/wiki/Swiss_Market_Index') | pandas.read_html |
from contextlib import closing
from datetime import datetime
from django.contrib.gis.geos import Point
from django.db import transaction
from bulk_sync import bulk_sync
from pandas import isnull
import codecs
import csv
import dateparser
import requests
import pandas as pd
import pytz
from .models import AirNowForecastSource, AirNowReportingArea, AirNowReportingAreaZipCode, AirNowObservation
CITY_ZIPCODES_URL = "https://files.airnowtech.org/airnow/today/cityzipcodes.csv"
AQI_DATA_URL = "https://s3-us-west-1.amazonaws.com//files.airnowtech.org/airnow/today/reportingarea.dat"
AQI_DATA_COLUMN_NAMES = [
'IssueDate',
'ValidDate',
'ValidTime',
'TimeZone',
'RecordSequence',
'DataType',
'Primary',
'ReportingArea',
'StateCode',
'Latitude',
'Longitude',
'ParameterName',
'AQIValue',
'AQICategory',
'ActionDay',
'Discussion',
'ForecastSource',
]
AQI_DATA_DATES = {
'IssueDateTime': [0],
}
TIMEZONE_CONVERSIONS = {
# The Alaska time zones don't seem to be reported correctly.
'AKT': 'AKST',
'ADT': 'AKDT',
}
def _print_status(status):
print("Results of bulk_sync: "
"{created} created, {updated} updated, {deleted} deleted."
.format(**status['stats']))
def _find_first(items, condition):
return next((i for i in items if condition(i)))
def _convert_valid_time(obs):
if | isnull(obs['ValidTime']) | pandas.isnull |
#! coding=utf-8
import os
import torch
import pandas as pd
import read_data
import warnings
import time
import cal_smilar
from sklearn.externals import joblib
import match
import tldextract
import tqdm
import Sentiment_RNN_Solution
def get_host(x):
val = tldextract.extract(x)
return val.domain + '.' + val.suffix
def in_ip(x):
if x < 0:
return 1
return 0
def not_in_ip(x, y):
if x <= 0.12:
return 1
elif (x > 0.12) & (y == 1):
return 1
else:
return 0
def predict(df=None):
"""
:param df: 一条或多条http请求,DataFrame格式
:return: result 其中pre为预测结果
"""
# 读取模型
net = torch.load('model/LSTM_model.pkl')
gs = joblib.load('model/gs.m')
result = read_data.read('data/data.csv')
if df is None:
result['tmp'] = 0
else:
df = read_data.get_data(df)
result['tmp'] = 1
df['tmp'] = 0
result = | pd.concat([result, df], axis=0) | pandas.concat |
import itertools
from numpy import nan
import numpy as np
from pandas.core.index import Index, _ensure_index
import pandas.core.common as com
import pandas._tseries as lib
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas data
structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
def __init__(self, values, items, ref_items, ndim=2,
do_integrity_check=False):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
assert(values.ndim == ndim)
assert(len(items) == len(values))
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
if do_integrity_check:
self._check_integrity()
def _check_integrity(self):
if len(self.items) < 2:
return
# monotonicity
return (self.ref_locs[1:] > self.ref_locs[:-1]).all()
_ref_locs = None
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.get_indexer(self.items)
assert((indexer != -1).all())
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_rename=True):
"""
If maybe_rename=True, need to set the items for this guy
"""
assert(isinstance(ref_items, Index))
if maybe_rename:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([str(s) for s in self.shape])
name = type(self).__name__
return '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
def __contains__(self, item):
return item in self.items
def __len__(self):
return len(self.values)
def __getstate__(self):
# should not pickle generally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, self.items, self.ref_items)
def merge(self, other):
assert(self.ref_items.equals(other.ref_items))
# Not sure whether to allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _merge_blocks([self, other], self.ref_items)
def reindex_axis(self, indexer, mask, needs_masking, axis=0):
"""
Reindex using pre-computed indexer information
"""
if self.values.size > 0:
new_values = com.take_fast(self.values, indexer, mask,
needs_masking, axis=axis)
else:
shape = list(self.shape)
shape[axis] = len(indexer)
new_values = np.empty(shape)
new_values.fill(np.nan)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
mask = indexer != -1
masked_idx = indexer[mask]
if self.values.ndim == 2:
new_values = com.take_2d(self.values, masked_idx, axis=0,
needs_masking=False)
else:
new_values = self.values.take(masked_idx, axis=0)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block around given column, for "deleting" a column without
having to copy data by returning views on the original array
Returns
-------
leftb, rightb : (Block or None, Block or None)
"""
loc = self.items.get_loc(item)
if len(self.items) == 1:
# no blocks left
return None, None
if loc == 0:
# at front
left_block = None
right_block = make_block(self.values[1:], self.items[1:].copy(),
self.ref_items)
elif loc == len(self.values) - 1:
# at back
left_block = make_block(self.values[:-1], self.items[:-1].copy(),
self.ref_items)
right_block = None
else:
# in the middle
left_block = make_block(self.values[:loc],
self.items[:loc].copy(), self.ref_items)
right_block = make_block(self.values[loc + 1:],
self.items[loc + 1:].copy(), self.ref_items)
return left_block, right_block
def fillna(self, value):
new_values = self.values.copy()
mask = com.isnull(new_values.ravel())
new_values.flat[mask] = value
return make_block(new_values, self.items, self.ref_items)
#-------------------------------------------------------------------------------
# Is this even possible?
class FloatBlock(Block):
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating)
class IntBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.integer)
class BoolBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.bool_))
def make_block(values, items, ref_items, do_integrity_check=False):
dtype = values.dtype
vtype = dtype.type
if issubclass(vtype, np.floating):
klass = FloatBlock
elif issubclass(vtype, np.integer):
if vtype != np.int64:
values = values.astype('i8')
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
else:
klass = ObjectBlock
return klass(values, items, ref_items, ndim=values.ndim,
do_integrity_check=do_integrity_check)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', 'ndim']
def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = blocks
ndim = len(axes)
for block in blocks:
assert(ndim == block.values.ndim)
if do_integrity_check:
self._verify_integrity()
def __nonzero__(self):
return True
@property
def ndim(self):
return len(self.axes)
def is_mixed_dtype(self):
counts = set()
for block in self.blocks:
counts.add(block.dtype)
if len(counts) > 1:
return True
return False
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
if len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
self.axes[axis] = _ensure_index(value)
if axis == 0:
for block in self.blocks:
block.set_ref_items(self.items, maybe_rename=True)
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def set_items_norename(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_rename=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [b.items for b in self.blocks]
axes_array = [ax for ax in self.axes]
return axes_array, block_values, block_items
def __setstate__(self, state):
# discard anything after 3rd, support beta pickling format for a little
# while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
blocks = []
for values, items in zip(bvalues, bitems):
blk = make_block(values, items, self.axes[0],
do_integrity_check=True)
blocks.append(blk)
self.blocks = blocks
def __len__(self):
return len(self.items)
def __repr__(self):
output = 'BlockManager'
for i, ax in enumerate(self.axes):
if i == 0:
output += '\nItems: %s' % ax
else:
output += '\nAxis %d: %s' % (i, ax)
for block in self.blocks:
output += '\n%s' % repr(block)
return output
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
def _verify_integrity(self):
_union_block_items(self.blocks)
mgr_shape = self.shape
for block in self.blocks:
assert(block.values.shape[1:] == mgr_shape[1:])
tot_items = sum(len(x.items) for x in self.blocks)
assert(len(self.items) == tot_items)
def astype(self, dtype):
new_blocks = []
for block in self.blocks:
newb = make_block(block.values.astype(dtype), block.items,
block.ref_items)
new_blocks.append(newb)
new_mgr = BlockManager(new_blocks, self.axes)
return new_mgr.consolidate()
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
dtypes = [blk.dtype for blk in self.blocks]
return len(dtypes) == len(set(dtypes))
def get_slice(self, slobj, axis=0):
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
if axis == 0:
new_items = new_axes[0]
if len(self.blocks) == 1:
blk = self.blocks[0]
newb = make_block(blk.values[slobj], new_items,
new_items)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
else:
new_blocks = self._slice_blocks(slobj, axis)
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _slice_blocks(self, slobj, axis):
new_blocks = []
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = slobj
slicer = tuple(slicer)
for block in self.blocks:
newb = make_block(block.values[slicer], block.items,
block.ref_items)
new_blocks.append(newb)
return new_blocks
def get_series_dict(self):
# For DataFrame
return _blocks_to_series_dict(self.blocks, self.axes[1])
@classmethod
def from_blocks(cls, blocks, index):
# also checks for overlap
items = _union_block_items(blocks)
return BlockManager(blocks, [items, index])
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shallow copy (do not copy data)
Returns
-------
copy : BlockManager
"""
copy_blocks = [block.copy(deep=deep) for block in self.blocks]
# copy_axes = [ax.copy() for ax in self.axes]
copy_axes = list(self.axes)
return BlockManager(copy_blocks, copy_axes, do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
mat = np.empty(self.shape, dtype=float)
elif len(self.blocks) == 1:
blk = self.blocks[0]
if items is None or blk.items.equals(items):
# if not, then just call interleave per below
mat = blk.values
else:
mat = self.reindex_items(items).as_matrix()
else:
if items is None:
mat = self._interleave(self.items)
else:
mat = self.reindex_items(items).as_matrix()
return mat
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
for block in self.blocks:
indexer = items.get_indexer(block.items)
assert((indexer != -1).all())
result[indexer] = block.values
itemmask[indexer] = 1
assert(itemmask.all())
return result
def xs(self, key, axis=1, copy=True):
assert(axis >= 1)
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
values = blk.values
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = values[j, loc]
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def get(self, item):
_, block = self._find_block(item)
return block.get(item)
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
new_items = Index(np.delete(np.asarray(self.items), loc))
self._delete_from_block(i, item)
self.set_items_norename(new_items)
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
assert(value.shape[1:] == self.shape[1:])
if item in self.items:
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and append new block
self._delete_from_block(i, item)
self._add_new_block(item, value)
else:
block.set(item, value)
else:
# insert at end
self.insert(len(self.items), item, value)
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
new_items = self.items.insert(loc, item)
self.set_items_norename(new_items)
# new block
self._add_new_block(item, value)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
new_left, new_right = block.split_block_at(item)
if new_left is not None:
self.blocks.append(new_left)
if new_right is not None:
self.blocks.append(new_right)
def _add_new_block(self, item, value):
# Do we care about dtype at the moment?
# hm, elaborate hack?
loc = self.items.get_loc(item)
new_block = make_block(value, self.items[loc:loc+1].copy(),
self.items)
self.blocks.append(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % str(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = | _ensure_index(new_axis) | pandas.core.index._ensure_index |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = | pd.read_csv(filename) | pandas.read_csv |
import torch
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tqdm
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, accuracy_score
#####################################################################################
experiment_name = "Jun1"
mapping_file_location = "./data/mappings/mapping_all.csv"
scores_location = "./results/EVE_scores/BPU/all_EVE_scores_May23.csv"
labels_location = "./data/labels/All_3k_proteins_ClinVar_labels_May21.csv"
create_concatenation_baseline_scores = False
raw_baseline_scores_location = '/n/groups/marks/projects/marks_lab_and_oatml/DRP_part_2/dbNSFP_single_trascript_files'
concatenated_baseline_scores_location = './data/baseline_scores/all_baseline_scores_'+experiment_name+'.csv'
merged_file_eve_clinvar_baseline_location = './results/EVE_scores/BPU/all_EVE_Clinvar_baselines_BP_'+experiment_name+'.csv'
AUC_accuracy_all_location = './results/AUC_Accuracy/AUC_accuracy_all_'+experiment_name+'.csv'
AUC_accuracy_75pct_location = './results/AUC_Accuracy/AUC_accuracy_75pct_'+experiment_name+'.csv'
AUC_accuracy_all_position_level_location = './results/AUC_Accuracy/AUC_accuracy_all_position_level_'+experiment_name+'.csv'
AUC_accuracy_75pct_position_level_location = './results/AUC_Accuracy/AUC_accuracy_75pct_position_level_'+experiment_name+'.csv'
#####################################################################################
mapping_file = pd.read_csv(mapping_file_location,low_memory=False)
list_proteins = list(mapping_file.protein_name)
num_proteins_to_score = len(mapping_file.protein_name)
print("Number of proteins to score: "+str(num_proteins_to_score))
#####################################################################################
## Create concatenated file with all baseline scores
#####################################################################################
mapping_pid_filename = pd.read_csv('./data/mappings/mapping_pid_baseline-filename.csv',low_memory=False)
mapping_baseline_score = pd.read_csv('./data/mappings/mapping_baseline_score_cleanup.csv',low_memory=False)
variables_to_keep=[
'pid',
'mutant',
'clinvar_clnsig',
'BayesDel_addAF_score',
'BayesDel_noAF_score',
'CADD_phred',
'CADD_phred_hg19',
'CADD_raw',
'CADD_raw_hg19',
'ClinPred_score',
'DANN_score',
'DEOGEN2_score',
'Eigen-PC-phred_coding',
'Eigen-PC-raw_coding',
'Eigen-phred_coding',
'Eigen-raw_coding',
'FATHMM_score',
'fathmm-MKL_coding_score',
'fathmm-XF_coding_score',
'GenoCanyon_score',
'LIST-S2_score',
'LRT_score',
'M-CAP_score',
'MetaLR_score',
'MetaSVM_score',
'MPC_score',
'MutationAssessor_score',
'MutationTaster_score',
'MutPred_score',
'MVP_score',
'Polyphen2_HDIV_score',
'Polyphen2_HVAR_score',
'PrimateAI_score',
'PROVEAN_score',
'REVEL_score',
'SIFT_score',
'SIFT4G_score',
'VEST4_score',
'BayesDel_addAF_pred',
'BayesDel_noAF_pred',
'ClinPred_pred',
'DEOGEN2_pred',
'FATHMM_pred',
'fathmm-MKL_coding_pred',
'fathmm-XF_coding_pred',
'LIST-S2_pred',
'LRT_pred',
'M-CAP_pred',
'MetaLR_pred',
'MetaSVM_pred',
'MutationAssessor_pred',
'MutationTaster_pred',
'PrimateAI_pred',
'PROVEAN_pred',
'SIFT_pred',
'SIFT4G_pred'
#'Aloft_pred'
]
scoring_variables=[
'BayesDel_addAF_score',
'BayesDel_noAF_score',
'CADD_phred',
'CADD_phred_hg19',
'CADD_raw',
'CADD_raw_hg19',
'ClinPred_score',
'DANN_score',
'DEOGEN2_score',
'Eigen-PC-phred_coding',
'Eigen-PC-raw_coding',
'Eigen-phred_coding',
'Eigen-raw_coding',
'FATHMM_score',
'fathmm-MKL_coding_score',
'fathmm-XF_coding_score',
'GenoCanyon_score',
'LIST-S2_score',
'LRT_score',
'M-CAP_score',
'MetaLR_score',
'MetaSVM_score',
'MPC_score',
'MutationAssessor_score',
'MutationTaster_score',
'MutPred_score',
'MVP_score',
'Polyphen2_HDIV_score',
'Polyphen2_HVAR_score',
'PrimateAI_score',
'PROVEAN_score',
'REVEL_score',
'SIFT_score',
'SIFT4G_score',
'VEST4_score'
]
pred_variables_mapping_DT=[
'BayesDel_addAF_pred',
'BayesDel_noAF_pred',
'ClinPred_pred',
'DEOGEN2_pred',
'FATHMM_pred',
'LIST-S2_pred',
'M-CAP_pred',
'MetaLR_pred',
'MetaSVM_pred',
'PrimateAI_pred',
'SIFT_pred',
'SIFT4G_pred'
]
pred_variables_mapping_DN=[
'fathmm-MKL_coding_pred',
'fathmm-XF_coding_pred',
'LRT_pred',
'PROVEAN_pred'
]
pred_variables_to_threshold=[
'MVP_score',
'Polyphen2_HDIV_score',
'Polyphen2_HVAR_score'
]
if create_concatenation_baseline_scores:
list_processed_scoring_files=[]
for protein_name in tqdm.tqdm(list_proteins):
try:
baseline_filename = mapping_pid_filename['filename'][mapping_pid_filename['pid']==protein_name].iloc[0]
scoring_file = pd.read_csv(raw_baseline_scores_location+os.sep+baseline_filename, low_memory=False)
scoring_file['pid']=[protein_name]*len(scoring_file)
scoring_file['mutant']=scoring_file['aaref']+scoring_file['aapos'].astype(str)+scoring_file['aaalt']
scoring_file=scoring_file[variables_to_keep]
for score_var in scoring_variables:
scoring_file[score_var]=pd.to_numeric(scoring_file[score_var], errors="coerce") * int(mapping_baseline_score['directionality'][mapping_baseline_score['prediction_name']==score_var].iloc[0])
for pred_var in pred_variables_mapping_DT:
scoring_file[pred_var]=scoring_file[pred_var].map({"D":"Pathogenic", "T":"Benign"})
for pred_var in pred_variables_mapping_DN:
scoring_file[pred_var]=scoring_file[pred_var].map({"D":"Pathogenic", "N":"Benign"})
#scoring_file['Aloft_pred']=scoring_file['Aloft_pred'].map({"R":"Pathogenic", "D":"Pathogenic","T":"Benign"})
scoring_file['MutationAssessor_pred']=scoring_file['MutationAssessor_pred'].map({"H":"Pathogenic","M":"Pathogenic", "L":"Benign", "N":"Benign"})
scoring_file['Polyphen2_HDIV_pred']=(scoring_file['Polyphen2_HDIV_score']>0.5).map({True:"Pathogenic", False:"Benign"})
scoring_file['Polyphen2_HVAR_pred']=(scoring_file['Polyphen2_HVAR_score']>0.5).map({True:"Pathogenic", False:"Benign"})
scoring_file['MutationTaster_pred']=(scoring_file['MutationTaster_score']<0.5).map({True:"Pathogenic", False:"Benign"})
scoring_file['MVP_pred']=(scoring_file['MVP_score']>0.7).map({True:"Pathogenic", False:"Benign"})
list_processed_scoring_files.append(scoring_file)
except:
print("Problem processing baseline scores for: "+str(protein_name))
#try:
# all_baseline_scores = pd.concat([all_baseline_scores,scoring_file], axis=0)
#except:
# all_baseline_scores = scoring_file
all_baseline_scores = | pd.concat(list_processed_scoring_files, axis=0) | pandas.concat |
import gc
import itertools
import multiprocessing
import time
from collections import Counter
import numpy as np
import pandas as pd
def create_customer_feature_set(train):
customer_feats = pd.DataFrame()
customer_feats['customer_id'] = train.customer_id
customer_feats['customer_max_ratio'] = train.customer_id / \
np.max(train.customer_id)
customer_feats['index_max_ratio'] = train.customer_id / \
(train.index + 1e-14)
customer_feats['customer_count'] = train.customer_id.map(
train.customer_id.value_counts())
customer_feats['cust_first'] = train.customer_id.apply(
lambda x: int(str(x)[:1]))
customer_feats['cust_2first'] = train.customer_id.apply(
lambda x: int(str(x)[:2]))
customer_feats['cust_3first'] = train.customer_id.apply(
lambda x: int(str(x)[:3]))
customer_feats['cust_4first'] = train.customer_id.apply(
lambda x: int(str(x)[:4]))
customer_feats['cust_6first'] = train.customer_id.apply(
lambda x: int(str(x)[:6]))
# customer_feats.cust_3first = pd.factorize(customer_feats.cust_3first)[0]
customer_feats.drop(['customer_id'], axis=1, inplace=True)
return customer_feats
def create_groupings_feature_set(data, features, transform=True):
df_features = pd.DataFrame()
year_mean = group_feat_by_feat(
data, 'year', features, 'mean', transform)
year_max = group_feat_by_feat(
data, 'year', features, 'max', transform)
year_count = group_feat_by_feat(
data, 'year', features, 'count', transform)
month_mean = group_feat_by_feat(
data, 'month', features, 'mean', transform)
month_max = group_feat_by_feat(
data, 'month', features, 'max', transform)
month_count = group_feat_by_feat(
data, 'month', features, 'count', transform)
market_mean = group_feat_by_feat(
data, 'market', features, 'mean', transform)
market_max = group_feat_by_feat(
data, 'market', features, 'max', transform)
market_count = group_feat_by_feat(
data, 'market', features, 'count', transform)
customer_mean = group_feat_by_feat(
data, 'customer_id', features, 'mean', transform)
customer_max = group_feat_by_feat(
data, 'customer_id', features, 'max', transform)
customer_count = group_feat_by_feat(
data, 'customer_id', features, 'count', transform)
df_features = pd.concat([year_mean, year_max, year_count, month_mean, month_max, month_count,
market_mean, market_max, market_count,
customer_mean, customer_max, customer_count], axis=1)
del year_mean, year_max, year_count, month_mean, month_max, month_count, \
market_mean, market_max, market_count, \
customer_mean, customer_max, customer_count
gc.collect()
return df_features
def create_aggregated_lags(df, current_month,
only_target=False, features=None, agg_func='mean',
month_merge=False):
assert(current_month > 0 and current_month < 16)
if month_merge:
df_result = df[df.date == current_month][['customer_id']]
else:
df_result = df[['customer_id']]
print('Creating grouping features based on aggregated data before {} date.'.format(
current_month))
print('Beginning shape:', df_result.shape)
if features is not None:
if 'customer_id' not in features:
features.append('customer_id')
df_lag = df[df.date < current_month]
if only_target:
df_lag = df_lag[['customer_id', 'target']].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
else:
if features is not None:
df_lag = df_lag[features].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
df_lag.columns = ['{}_lag_agg'.format(
x) if 'customer' not in x else x for x in df_lag.columns]
df_result = df_result.merge(
df_lag, on=['customer_id'], how='left', copy=False)
to_drop = [x for x in df_result.columns if 'customer' in x]
df_result.drop(to_drop, axis=1, inplace=True)
print('Final shape:', df_result.shape)
return df_result
def create_lag_features(df, current_month=1, start_lag=0, incremental=False,
only_target=False, features=None, agg_func='mean',
month_merge=False):
if month_merge:
df_result = df[df.date == current_month][['customer_id', 'target']]
else:
df_result = df[['customer_id', 'target']]
lag_subset = np.arange(start_lag, current_month, 1)
print('Beginning shape:', df_result.shape, 'Lag subset:', lag_subset)
if features is not None:
if 'customer_id' not in features:
features.append('customer_id')
if incremental:
print('Creating grouping features based on incremental lags.')
if not incremental:
print('Creating grouping features based on non-incremental lags.')
print('For non-incremental lags only mean aggregation can be used - switch to it.')
agg_func = 'mean'
for i in range(len(lag_subset)):
if incremental:
print('Dates subset:', lag_subset[lag_subset <= lag_subset[i]])
df_lag = df[df.date <= lag_subset[i]]
else:
df_lag = df[df.date == lag_subset[i]]
if only_target:
df_lag = df_lag[['customer_id', 'target']].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
else:
if features is not None:
df_lag = df_lag[features].groupby(
'customer_id', as_index=False).agg('{}'.format(agg_func))
df_lag.columns = ['{}_lag{}'.format(
x, i) if 'customer' not in x else x for x in df_lag.columns]
df_result = df_result.merge(
df_lag, on=['customer_id'], how='left', copy=False)
to_drop = [x for x in df_result.columns if 'customer' in x]
to_drop.append('target')
df_result.drop(to_drop, axis=1, inplace=True)
print('Final shape:', df_result.shape)
return df_result
def prepare_lags_data(train, test,
start_train=1, end_train=11,
start_test=12, end_test=15,
only_target=False, features=None,
incremental=False, agg_func='mean'):
df_train = pd.DataFrame()
df_test = pd.DataFrame()
print('Create training set.\n')
for i in range(start_train, end_train + 1, 1):
if incremental:
lag_features = create_lag_features(
train, i, start_train, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
else:
lag_features = create_lag_features(
train, i, i - 1, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
df_train = pd.concat([df_train, lag_features])
print('Current train shape:', df_train.shape)
print('\nCreate test set.\n')
for i in range(start_test, end_test + 1, 1):
if incremental:
lag_features = create_lag_features(
test, i, start_test, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
else:
lag_features = create_lag_features(
test, i, i - 1, incremental=incremental,
only_target=only_target, features=features, agg_func=agg_func)
df_test = pd.concat([df_test, lag_features])
print('Current test shape:', df_test.shape)
print('Final shapes:', df_train.shape, df_test.shape)
df_train.drop(['target'], axis=1, inplace=True)
df_train.reset_index(inplace=True, drop=True)
df_test.drop(['target'], axis=1, inplace=True)
df_test.reset_index(inplace=True, drop=True)
return df_train, df_test
def prepare_aggregated_lags(train, test,
start_train=0, end_train=11,
start_test=12, end_test=15,
only_target=False,
features=None, agg_func='mean'):
df_train = pd.DataFrame()
df_test = pd.DataFrame()
print('Create training set.\n')
for i in range(start_train, end_train + 1, 1):
lag_features = create_aggregated_lags(
train, i,
only_target=only_target, features=features, agg_func=agg_func)
df_train = pd.concat([df_train, lag_features])
print('\nCreate test set.\n')
for i in range(start_test, end_test + 1, 1):
lag_features = create_aggregated_lags(
test, i,
only_target=only_target, features=features, agg_func=agg_func)
df_test = pd.concat([df_test, lag_features])
print('Final shapes:', df_train.shape, df_test.shape)
df_train.reset_index(inplace=True, drop=True)
df_test.reset_index(inplace=True, drop=True)
return df_train, df_test
def labelcount_encode(df, categorical_features, ascending=False):
print('LabelCount encoding:', categorical_features)
new_df = pd.DataFrame()
for cat_feature in categorical_features:
cat_feature_value_counts = df[cat_feature].value_counts()
value_counts_list = cat_feature_value_counts.index.tolist()
if ascending:
# for ascending ordering
value_counts_range = list(
reversed(range(len(cat_feature_value_counts))))
else:
# for descending ordering
value_counts_range = list(range(len(cat_feature_value_counts)))
labelcount_dict = dict(zip(value_counts_list, value_counts_range))
new_df[cat_feature] = df[cat_feature].map(
labelcount_dict)
new_df.columns = ['{}_lc_encode'.format(x) for x in new_df.columns]
return new_df
def count_encode(df, categorical_features, normalize=False):
print('Count encoding:', categorical_features)
new_df = pd.DataFrame()
for cat_feature in categorical_features:
new_df[cat_feature] = df[cat_feature].astype(
'object').map(df[cat_feature].value_counts())
if normalize:
new_df[cat_feature] = new_df[cat_feature] / np.max(new_df[cat_feature])
new_df.columns = ['{}_count_encode'.format(x) for x in new_df.columns]
return new_df
def target_encode(df_train, df_test, categorical_features, smoothing=10):
print('Target encoding:', categorical_features)
df_te_train = pd.DataFrame()
df_te_test = pd.DataFrame()
for cat_feature in categorical_features:
df_te_train[cat_feature], df_te_test[cat_feature] = \
target_encode_feature(
df_train[cat_feature], df_test[cat_feature], df_train.target, smoothing)
df_te_train.columns = ['{}_target_encode'.format(x) for x in df_te_train.columns]
df_te_test.columns = ['{}_target_encode'.format(x) for x in df_te_test.columns]
return df_te_train, df_te_test
def bin_numerical(df, cols, step):
numerical_features = cols
new_df = pd.DataFrame()
for i in numerical_features:
try:
feature_range = np.arange(0, np.max(df[i]), step)
new_df['{}_binned'.format(i)] = np.digitize(
df[i], feature_range, right=True)
except ValueError:
df[i] = df[i].replace(np.inf, 999)
feature_range = np.arange(0, np.max(df[i]), step)
new_df['{}_binned'.format(i)] = np.digitize(
df[i], feature_range, right=True)
return new_df
def add_statistics(df, features_list):
X = pd.DataFrame()
X['sum_row_{}cols'.format(len(features_list))
] = df[features_list].sum(axis=1)
X['mean_row{}cols'.format(len(features_list))
] = df[features_list].mean(axis=1)
X['std_row{}cols'.format(len(features_list))
] = df[features_list].std(axis=1)
X['max_row{}cols'.format(len(features_list))] = np.amax(
df[features_list], axis=1)
print('Statistics of {} columns done.'.format(features_list))
return X
def feature_combinations(df, features_list):
X = pd.DataFrame()
for comb in itertools.combinations(features_list, 2):
feat = comb[0] + "_" + comb[1]
X[feat] = df[comb[0]] * df[comb[1]]
print('Interactions on {} columns done.'.format(features_list))
return X
def group_feat_by_feat(df, feature_by, features_to, statistic='mean', transform=False):
X = pd.DataFrame()
for i in range(len(features_to)):
if statistic == 'mean':
if transform:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].transform('{}'.format(statistic))
else:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].mean()
if statistic == 'max':
if transform:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].transform('{}'.format(statistic))
else:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].max()
if statistic == 'min':
if transform:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].transform('{}'.format(statistic))
else:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].min()
if statistic == 'count':
if transform:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].transform('{}'.format(statistic))
else:
X['{0}_by_{1}_{2}'.format(feature_by, features_to[i], statistic)] = (
df.groupby(feature_by))[features_to[i]].count()
if not transform:
X['{}'.format(feature_by)] = X.index
X.reset_index(inplace=True, drop=True)
print('Groupings of {} columns by: {} done using {} statistic.'.format(features_to, feature_by,
statistic))
return X
def group_feat_by_feat_multiple(df, feature_by, features_to, statistic='mean', transform=False):
X = pd.DataFrame()
if statistic == 'mean':
if transform:
X = (df.groupby(feature_by))[
features_to].transform('{}'.format(statistic))
else:
X = (df.groupby(feature_by))[features_to].mean()
if statistic == 'max':
if transform:
X = (df.groupby(feature_by))[
features_to].transform('{}'.format(statistic))
else:
X = (df.groupby(feature_by))[features_to].max()
if statistic == 'min':
if transform:
X = (df.groupby(feature_by))[
features_to].transform('{}'.format(statistic))
else:
X = (df.groupby(feature_by))[features_to].min()
if statistic == 'count':
if transform:
X = (df.groupby(feature_by))[
features_to].transform('{}'.format(statistic))
else:
X = (df.groupby(feature_by))[features_to].count()
X.columns = ['{}_by_{}_{}'.format(
feature_by, i, statistic) for i in features_to]
print('Groupings of {} columns by: {} done using {} statistic.'.format(features_to, feature_by,
statistic))
return X
def group_feat_by_feat_list(df, features_list, transformation):
X = pd.DataFrame()
for i in range(len(features_list) - 1):
X['{0}_by_{1}_{2}'.format(features_list[i], features_list[i + 1], transformation)] = (
df.groupby(features_list[i]))[features_list[i + 1]].transform('{}'.format(transformation))
print('Groupings of {} columns done using {} transformation.'.format(
features_list, transformation))
return X
def feature_combinations_grouping(df, features_list, transformation):
X = pd.DataFrame()
for comb in itertools.combinations(features_list, 2):
X['{}_by_{}_{}_combinations'.format(comb[0], comb[1], transformation)] = (
df.groupby(comb[0]))[comb[1]].transform('{}'.format(transformation))
print('Groupings of {} columns done using {} transformation.'.format(
features_list, transformation))
return X
def get_duplicate_cols(df):
dfc = df.sample(n=10000)
dfc = dfc.T.drop_duplicates().T
duplicate_cols = sorted(list(set(df.columns).difference(set(dfc.columns))))
print('Duplicate columns:', duplicate_cols)
del dfc
gc.collect()
return duplicate_cols
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def target_encode_feature(trn_series=None,
tst_series=None,
target=None,
min_samples_leaf=100,
smoothing=10,
noise_level=1e-3):
"""
Smoothing is computed like in the following paper by <NAME>
https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
trn_series : training categorical feature as a pd.Series
tst_series : test categorical feature as a pd.Series
target : target data as a pd.Series
min_samples_leaf (int) : minimum samples to take category average into account
smoothing (int) : smoothing effect to balance categorical average vs prior
"""
assert len(trn_series) == len(target)
assert trn_series.name == tst_series.name
temp = | pd.concat([trn_series, target], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from avaml.aggregatedata.download import CAUSES, DIRECTIONS
from avaml.machine import BulletinMachine
__author__ = 'arwi'
class NaiveYesterday(BulletinMachine):
def __init__(self):
self.fitted = True
super().__init__()
def fit(self, labeled_data):
"""Does nothing. Here for compability."""
def predict(self, labeled_data, force_subprobs=False):
"""Predict data using supplied LabeledData.
:param labeled_data: LabeledData. Dataset to predict. May have empty LabeledData.label.
:return: LabeledData. A copy of data, with LabeledData.pred filled in.
"""
labeled_data = labeled_data.denormalize()
label = labeled_data.data.reorder_levels([1, 0], axis=1)["1"]
main_class = ["danger_level", "emergency_warning", "problem_1", "problem_2", "problem_3", "problem_amount"]
subprobs = ["drift-slab", "glide", "new-loose", "new-slab", "pwl-slab", "wet-loose", "wet-slab", ]
subprob_class = ["cause", "dist", "dsize", "lev_fill", "prob", "trig"]
subprob_multi = ["aspect"]
subprob_real = ["lev_min", "lev_max"]
columns = pd.MultiIndex.from_product([["CLASS"], [""], main_class])
columns = columns.append( | pd.MultiIndex.from_product([["CLASS"], subprobs, subprob_class]) | pandas.MultiIndex.from_product |
"""
utility functions for working with DataFrames
"""
import pandas
TEST_DF = | pandas.DataFrame([1, 2, 3, 4, 5, 6]) | pandas.DataFrame |
#macroOLS.py
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
from matplotlib.backends.backend_pdf import PdfPages
import statsmodels.api as sm
def OLSRegression(df,endogVar, exogVars):
#Select endogenous data for Y
Y = dataFrame[endogVar]
#Select exogenous data for X
X = dataFrame[exogVars]
#Add column of ones for constant
X = sm.add_constant(X)
print(X)
#Run regression
model = sm.OLS(Y,X)
#Save results of regression
results = model.fit()
return results
def plotValues(df,keys):
df[keys].plot.line(figsize=(24,12), legend=False)
plt.title(key + "\n" + datatype, fontsize=40)
plt.show()
plt.close()
def buildSummaryCSV(csvSummary, csvName):
#Create a new csv file
file = open(csvName + ".csv", "w")
#write results in csv file
file.write(csvSummary)
#close CSV
file.close()
start = datetime.datetime(2009, 1, 1)
end = datetime.datetime(2018, 8, 1)
dfDict = {}
dfDict["QData"] = web.DataReader("GDPC1", "fred", start, end).resample("Q").first()
dfDict["QData"] = dfDict["QData"].rename(columns = {"GDPC1":"Real GDP"})
dfDict["QData"]["Nominal GDP"] = web.DataReader("GDP", "fred", start, end).resample("Q").first()
dfDict["QData"]["GDP Deflator"] = web.DataReader("GDPDEF", "fred", start, end).resample("Q").first()
dfDict["QData"]["Base Money"] = web.DataReader("AMBNS", "fred",start, end).resample("Q").first() * 1000
dfDict["QData"]["IOER"] = web.DataReader("IOER", "fred", start, end).resample("Q").first()
dfDict["QData"]["Excess Reserves"] = web.DataReader("EXCSRESNS", "fred", start, end).resample("Q").first()
dfDict["QData"]["Effective Base"] = dfDict["QData"]["Base Money"] - dfDict["QData"]["Excess Reserves"]
dfDict["Logged Data"] = {}
dfDict["Logged First Difference Data"] = {}
#for loop to create logged data and logged first difference data
for key in dfDict["QData"]:
# create logged data by np.log(dataframe)
dfDict["Logged Data"][key] = np.log(dfDict["QData"][key])
# create first difference data by dataframe.diff()
dfDict["Logged First Difference Data"][key] = dfDict["Logged Data"][key].diff().dropna()
sumStatsDict = {}
sumStatsCats = ["Mean", "Median", "Variance"]
for key1 in dfDict:
sumStatsDict[key1] = {}
for key2 in dfDict["QData"]:
df = dfDict[key1][key2]
sumStatsDict[key1][key2]={}
for j in range(len(sumStatsCats)):
key3 = sumStatsCats[j]
if key3 == "Mean":
sumStatsDict[key1][key2][key3] = np.mean(df)
if key3 == "Median":
sumStatsDict[key1][key2][key3] = np.median(df)
if key3 == "Variance":
sumStatsDict[key1][key2][key3] = np.var(df)
# adjust text size in plots
plt.rcParams.update({'font.size': 24})
# reduce white space in margins to zero
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['axes.xmargin'] = 0
for datatype in dfDict:
for key in dfDict["QData"]:
df = dfDict[datatype]
plotValues(df, key)
# make dataframe out of all Logged First Difference Data
# drop NAN values
dataFrame = | pd.DataFrame(dfDict["Logged First Difference Data"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 17:07:00 2020
@author: hexx
This code do the following:
(1)saves policy, COVID, and Projection data downloaded online to local folder
(2)process and saved data to be usded to project mobility
"""
import pandas as pd
import numpy as np
import os
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
from myFunctions import def_add_datashift, createFolder
import warnings
warnings.filterwarnings("ignore")
createFolder('./Mobility projection')
scenario_cases = ['lower', 'mean', 'upper'] #'upper', 'lower',
startDate = '2020-02-24'
today_x = pd.to_datetime('today')
today =today_x.strftime("%Y-%m-%d")
# today ='2020-07-08'
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
YYG_Date = PODA_Model['YYG_File_Date']
moving_avg = PODA_Model['Moving_Average']
#create folder to save YYG Projection
createFolder('./YYG Data/'+YYG_Date)
# createFolder('./COVID/'+today)
df_StateName_Code = PODA_Model['StateName_StateCode']
ML_Data = PODA_Model['ML_Data']
# load Policy Data
df_Policy = pd.read_csv('https://raw.githubusercontent.com/COVID19StatePolicy/SocialDistancing/master/data/USstatesCov19distancingpolicy.csv', encoding= 'unicode_escape')
createFolder('./Policy File')
df_Policy.to_excel('./Policy File/Policy'+today+'.xlsx') # save policy data
# Read Population Data
df_Population = PODA_Model['State Population']
#Read County Area
df_Area = PODA_Model['State Area']
#Employment
df_Employee = PODA_Model['State Employment']
confirmed = ML_Data[ML_Data['State Name']=='Michigan']
confirmed = confirmed[['US Total Confirmed', 'US Daily Confirmed', 'US Daily Death']]
confirmed = confirmed.rename(columns={"US Total Confirmed":"ML US Total Confirmed", "US Daily Confirmed":"ML US Daily Confirmed",
"US Daily Death":"ML US Daily Death"})
infected_to_Confirmed = pd.DataFrame(columns = ['Country Name', 'scenario', 'shiftDay', 'regr_coef', 'regr_interp'])
infected_to_Confirmed_State = pd.DataFrame(columns = ['State Name', 'scenario', 'shiftDay', 'regr_coef', 'regr_interp'])
for zz, scenario in enumerate(scenario_cases):
'''
Calculate the new infected to confirmed correlation
'''
df_US_Projection = pd.read_csv('https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+YYG_Date+'/US.csv')
df_US_Projection.to_csv('./YYG Data/'+YYG_Date+'/US.csv') # save US Projection data
df_US_Projection['date'] = pd.to_datetime(df_US_Projection['date'])
df_US_Projection.set_index('date', inplace=True)
YYG_Daily_Infected = df_US_Projection[['predicted_new_infected_'+scenario]]
YYG_Daily_Infected = YYG_Daily_Infected[(YYG_Daily_Infected.index < today_x) & (YYG_Daily_Infected.index > pd.to_datetime('2020-05-01'))]
R2_old=0
for j in range(0, 20):
YYG_Data_shifted = YYG_Daily_Infected['predicted_new_infected_'+scenario].shift(j).to_frame()
YYG_Data_shifted['date']=YYG_Data_shifted.index
YYG_Data_shifted=YYG_Data_shifted.set_index('date')
# merged = pd.merge_asof(YYG_Data_shifted, confirmed, left_index=True, right_index=True).dropna()
merged = confirmed.join(YYG_Data_shifted).dropna()
x_conv=merged['predicted_new_infected_'+scenario].to_numpy()
y_conv = merged['ML US Daily Confirmed'].to_numpy()
x_length = len(x_conv)
x_conv = x_conv.reshape(x_length, 1)
y_conv = y_conv.reshape(x_length, 1)
regr = linear_model.LinearRegression(fit_intercept = False)
regr.fit(x_conv, y_conv)
R2_new = regr.score(x_conv, y_conv)
if R2_new > R2_old:
new_row = {'Country Name': 'US', 'scenario': scenario, 'shiftDay': j,
'regr_coef': regr.coef_[0][0], 'regr_interp':regr.intercept_, 'R2': R2_new}
merged_select = merged
regr_selected = regr
R2_old = R2_new
infected_to_Confirmed=infected_to_Confirmed.append(new_row, ignore_index =True)
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
# normalized scale
ax.plot(merged_select.index, merged_select['predicted_new_infected_'+scenario]*new_row['regr_coef'] + new_row['regr_interp'], 'o', label='YYG Predicted')
# ax.plot(merged_select.index, merged_select['predicted_total_infected_mean'], 'o', label='YYG Predicted')
ax.plot(merged_select.index, merged_select['ML US Daily Confirmed'], label='confirmed')
ax.set_xlabel('Label')
ax.set_ylabel('Prediction')
ax.set_xlim(pd.to_datetime('2020-05-01'), pd.to_datetime('today'))
fig.autofmt_xdate(rotation=45)
ax.legend()
ax.set_title('US'+scenario)
'''
'''
all_Data=pd.DataFrame()
#YYG State level projection
df_US_Projection['State Name']='US'
df_US_Projection['country_region_code'] = 'US'
df_US_Projection['country_region'] = 'United States'
df_US_Projection['retail_and_recreation'] =1
df_US_Projection['grocery_and_pharmacy'] =1
df_US_Projection['parks'] = 1
df_US_Projection['transit_stations'] = 1
df_US_Projection['workplaces'] = 1
df_US_Projection['residential'] = 1
df_US_Projection['US Daily Confirmed'] = df_US_Projection['predicted_new_infected_'+scenario].shift(new_row['shiftDay'])*new_row['regr_coef'] + new_row['regr_interp']
df_US_Projection['US Daily Confirmed'] = df_US_Projection['US Daily Confirmed'].rolling(window=moving_avg).mean()
# df_US_Projection['US Daily Confirmed'] = df_US_Projection['US Total Confirmed'].diff().rolling(window=moving_avg).mean()
for i, da in enumerate(confirmed.index):
df_US_Projection.loc[da,'US Total Confirmed']= confirmed.loc[da, 'ML US Total Confirmed']
df_US_Projection.loc[da,'US Daily Confirmed']= confirmed.loc[da, 'ML US Daily Confirmed']
# df_US_Projection['US Daily Confirmed'] = (df_US_Projection['predicted_new_infected_'+scenario].shift(shiftDay))/infected_Confirmed_Ratio
df_US_Projection['US Daily Confirmed Dfdt'] = df_US_Projection['US Daily Confirmed'].diff()
# df_US_Projection = def_add_datashift (df_US_Projection, 'US Total Confirmed', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Confirmed', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Confirmed Dfdt', [1, 3, 7, 10])
df_US_Projection['US Total Death'] = df_US_Projection['predicted_total_deaths_'+scenario].fillna(0) + df_US_Projection['total_deaths'].fillna(0)
df_US_Projection['US Daily Death'] = (df_US_Projection['predicted_deaths_'+scenario].fillna(0) + df_US_Projection['actual_deaths'].fillna(0)).rolling(window=moving_avg).mean()
for i, da in enumerate(confirmed.index):
df_US_Projection.loc[da,'US Daily Death']= confirmed.loc[da, 'ML US Daily Death']
df_US_Projection['US Daily Death Dfdt'] = df_US_Projection['US Daily Death'].diff()
# df_US_Projection = def_add_datashift (df_US_Projection, 'US Total Death', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Death', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Death Dfdt', [1, 3, 7, 10])
df_US_Projection = df_US_Projection.iloc[:, 18:100]
df_US_Projection = df_US_Projection[df_US_Projection.index > pd.to_datetime(startDate)]
stateNameList = df_StateName_Code['State Name'].drop_duplicates().dropna().tolist()
ML_Data_StateDailyDeath=pd.DataFrame()
for stateName in stateNameList:
if stateName == 'District of Columbia':
continue
state_Code = df_StateName_Code.loc[df_StateName_Code['State Name'] == stateName, 'State Code'].iloc[0]
print (scenario +': '+ stateName)
YYG_State_Proj_Location ='https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+ YYG_Date +'/US_'+ state_Code+'.csv'
df_State_Projection = pd.read_csv(YYG_State_Proj_Location, header=0)
# save YYG State Projection data
if zz==0:
df_State_Projection.to_csv('./YYG Data/'+YYG_Date+'/US_'+state_Code+'.csv')
df_State_Projection['date'] = pd.to_datetime(df_State_Projection['date'])
df_State_Projection.set_index('date', inplace=True)
ML_Data_State = ML_Data[ML_Data['State Name'] == stateName]
ML_Data_StateDailyDeath = ML_Data_State[['State Daily Death']]
ML_Data_StateDailyDeath.rename(columns={'State Daily Death': 'ML State Daily Death'},inplace=True)
ML_Data_StateDailyDeath = ML_Data_StateDailyDeath[ML_Data_StateDailyDeath.index > df_State_Projection.index[0]]
'''
Calculate the new infected to confirmed correlation
'''
# df_State_Projection = pd.read_csv('https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+YYG_Date+'/US.csv')
# df_US_Projection.to_csv('./YYG Data/'+YYG_Date+'/US.csv') # save US Projection data
# df_US_Projection['date'] = pd.to_datetime(df_US_Projection['date'])
# df_US_Projection.set_index('date', inplace=True)
YYG_Total_Infected = df_State_Projection[['predicted_total_infected_'+scenario]]
YYG_Total_Infected = YYG_Total_Infected[(YYG_Total_Infected.index < today_x) & (YYG_Total_Infected.index > pd.to_datetime('2020-05-01'))]
confirmed_State = ML_Data_State[['State Total Confirmed', 'State Daily Confirmed']]
confirmed_State = confirmed_State.rename(columns={"State Total Confirmed":"ML State Total Confirmed", "State Daily Confirmed":"ML State Daily Confirmed"})
R2_old=0
for j in range(0, 20):
YYG_Data_shifted = YYG_Total_Infected['predicted_total_infected_'+scenario].shift(j)
YYG_Data_shifted = YYG_Total_Infected['predicted_total_infected_'+scenario].shift(j).to_frame()
YYG_Data_shifted['date']=YYG_Data_shifted.index
YYG_Data_shifted=YYG_Data_shifted.set_index('date')
merged = confirmed_State.join(YYG_Data_shifted).dropna()
# merged = pd.merge_asof(YYG_Data_shifted, confirmed_State, left_index=True, right_index=True).dropna()
x_conv=merged['predicted_total_infected_'+scenario].to_numpy()
y_conv = merged['ML State Total Confirmed'].to_numpy()
x_length = len(x_conv)
x_conv = x_conv.reshape(x_length, 1)
y_conv = y_conv.reshape(x_length, 1)
regr = linear_model.LinearRegression(fit_intercept = True)
regr.fit(x_conv, y_conv)
R2_new = regr.score(x_conv, y_conv)
if R2_new > R2_old:
new_row_State = {'State Name': stateName, 'scenario': scenario, 'shiftDay': j,
'regr_coef': regr.coef_[0][0], 'regr_interp':regr.intercept_, 'R2': R2_new}
merged_select = merged
regr_selected = regr
R2_old = R2_new
infected_to_Confirmed_State=infected_to_Confirmed_State.append(new_row_State, ignore_index =True)
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
# normalized scale
ax.plot(merged_select.index, merged_select['predicted_total_infected_'+scenario]*new_row_State['regr_coef'] + new_row_State['regr_interp'], 'o', label='YYG Predicted')
# ax.plot(merged_select.index, merged_select['predicted_total_infected_mean'], 'o', label='YYG Predicted')
ax.plot(merged_select.index, merged_select['ML State Total Confirmed'], label='confirmed')
ax.set_xlabel('Label')
ax.set_ylabel('Prediction')
ax.set_xlim(pd.to_datetime('2020-05-01'), pd.to_datetime('today'))
fig.autofmt_xdate(rotation=45)
ax.legend()
ax.set_title(stateName+scenario)
'''
'''
df_State_Projection['State Total Confirmed'] = df_State_Projection['predicted_total_infected_'+scenario].shift(new_row_State['shiftDay'])*new_row_State['regr_coef'] + new_row_State['regr_interp']
df_State_Projection['State Daily Confirmed'] = df_State_Projection['State Total Confirmed'].diff().rolling(window=moving_avg).mean()
for i, da in enumerate(confirmed_State.index):
df_State_Projection.loc[da,'State Total Confirmed']= confirmed_State.loc[da, 'ML State Total Confirmed']
df_State_Projection.loc[da,'State Daily Confirmed']= confirmed_State.loc[da, 'ML State Daily Confirmed']
df_State_Projection=df_State_Projection[df_State_Projection.index >= pd.to_datetime('2020-03-01')].sort_index()
# df_US_Projection['US Daily Confirmed'] = (df_US_Projection['predicted_new_infected_'+scenario].shift(shiftDay))/infected_Confirmed_Ratio
df_State_Projection['State Daily Confirmed Dfdt'] = df_State_Projection['State Daily Confirmed'].diff()
# df_US_Projection = def_add_datashift (df_US_Projection, 'US Total Confirmed', [1, 3, 7, 10])
df_State_Projection = def_add_datashift (df_State_Projection, 'State Daily Confirmed', [1, 3, 7, 10])
df_State_Projection = def_add_datashift (df_State_Projection, 'State Daily Confirmed Dfdt', [1, 3, 7, 10])
# df_State_Projection = df_State_Projection[df_State_Projection.index >= pd.to_datetime('2020-03-01')].sort_index()
df_State_Data = (df_State_Projection['predicted_total_deaths_'+scenario].fillna(0) + df_State_Projection['total_deaths'].fillna(0)).rename('State Total Death').to_frame()
# df_State_Data = df_State_Data[df_State_Data.index >= pd.to_datetime('2020-03-01')].sort_index()
df_State_Data = pd.merge_asof(df_State_Data, ML_Data_StateDailyDeath, left_index=True,right_index=True, direction='forward')
# df_State_Data['State Daily Death'] = df_State_Projection['predicted_deaths_'+scenario].fillna(0) + df_State_Data['ML State Daily Death'].fillna(0)
df_State_Data['State Daily Death'] = (df_State_Projection['predicted_deaths_'+scenario].fillna(0) + df_State_Projection['actual_deaths'].fillna(0)).rolling(window=moving_avg).mean()
'''
replace the YYG historical daily death data by the ML historical data
'''
for i, da in enumerate(ML_Data_StateDailyDeath.index):
df_State_Data.loc[da,'State Daily Death']= ML_Data_StateDailyDeath.loc[da, 'ML State Daily Death']
df_State_Data = df_State_Data[df_State_Data.index >= | pd.to_datetime('2020-03-01') | pandas.to_datetime |
import pickle
import pandas as pd
vectorizer = pickle.load(open('models/vectorizer.pkl','rb'))
finalized_model = pickle.load(open('models/finalized_model.pkl','rb'))
label_binarizer = pickle.load(open('models/label_binarizer.pkl','rb'))
medium_cleaned = pickle.load(open('models/medium_cleaned.pkl','rb'))
medium_tags = pickle.load(open('models/medium_tag_data.pkl','rb'))
def get_tag(cleaned_query):
"""
Function which predicts the tag of the sentence
based on the labeled tags given in the dataset
It first converts the processed sentence
into a vector using TF-IDF vectorizer which
has been fitted on the whole corpus of words
in the Title column of Medium Stories.
Logistic Regression along with OneVsRestClassifier
is used for predicting the label/tag
The tags were converted to binary format using
MultiLabelBinarizer. Using its inverse transform
function, the tag name in string form is obtained.
Parameters:
cleaned_query (str): pre-processed input
Returns:
tag_name (list of strings): list of predicted tags
Example:
cleaned query : "extract data from csv"
tag_name: ['Tag-data-science']
"""
# TF-IDF Vectorizer
query_vector = vectorizer.transform( | pd.Series(cleaned_query) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ML Program."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
import numpy as np
import os
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import make_scorer
import xgboost as xg
from sklearn.model_selection import GridSearchCV
from random import randrange, uniform
def my_custom_loss_func(ground_truth, predictions):
diff = np.abs(ground_truth - predictions).max()
loss_value = np.log(1 + diff)
return loss_value
def preprocess(x):
x['signup_time'] = pd.to_datetime(x['signup_time'])
x['purchase_time'] = pd.to_datetime(x['purchase_time'])
x['signup_week'] = x['signup_time'].dt.week
x['signup_year'] = x['signup_time'].dt.year
x['purchase_week'] = x['purchase_time'].dt.week
x['purchase_year'] = x['purchase_time'].dt.year
x['signup_to_purchase'] = (x['purchase_time'] - x['signup_time']).dt.days
x['signup_time'] = (pd.to_datetime(x['signup_time']) - pd.to_datetime('1900-01-01')).dt.days
x['purchase_time'] = (pd.to_datetime(x['purchase_time']) - pd.to_datetime('1900-01-01')).dt.days
x['purchase_deviation'] = (x['purchase_value'] - x['purchase_value'].mean()) / x['purchase_value'].std()
x = x.sort_values(by=['user_id', 'purchase_time'], ascending=True)
return x
def train(cost):
my_path = os.path.abspath(os.path.dirname(__file__))
train_data_file = os.path.join(my_path,'fraud.csv')
fraud_df = | pd.read_csv(train_data_file) | pandas.read_csv |
import numpy as np, pandas as pd, matplotlib.pyplot as plt
import os, pickle, subprocess, itertools
from numpy import array as nparr
from glob import glob
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
def scp_rms_percentile_files(statsdir, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
print('made {}'.format(outdir))
p = subprocess.Popen([
"scp", "lbouma@phn12:{}/percentiles_RMS_vs_med_mag*.csv".format(statsdir),
"{}/.".format(outdir),
])
sts = os.waitpid(p.pid, 0)
return 1
def scp_knownplanets(statsdir, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
print('made {}'.format(outdir))
p = subprocess.Popen([
"scp", "lbouma@phn12:{}/*-0*/*_TFA*_snr_of_fit.csv".format(statsdir),
"{}/.".format(outdir),
])
sts = os.waitpid(p.pid, 0)
return 1
def get_rms_file_dict(projids, camccdstr, ap='TF1'):
d = {}
for projid in projids:
run_id = "{}-{}".format(camccdstr, projid)
indir = (
'../results/optimizing_pipeline_parameters/{}_stats'.format(run_id)
)
inpath = os.path.join(
indir, "percentiles_RMS_vs_med_mag_{}.csv".format(ap))
d[projid] = pd.read_csv(inpath)
return d
def get_knownplanet_file_dict(projids, camccdstr, ap='TFA1'):
d = {}
for projid in projids:
d[projid] = {}
run_id = "{}-{}".format(camccdstr, projid)
indir = (
'../results/optimizing_pipeline_parameters/{}_knownplanets'.format(run_id)
)
inpaths = glob(
os.path.join(
indir, "*-0*_{}_snr_of_fit.csv".format(ap)
)
)
for inpath in inpaths:
toi_id = str(
os.path.basename(inpath).split('_')[0].replace('-','.')
)
d[projid][toi_id] = pd.read_csv(inpath)
return d
def plot_rms_comparison(d, projids, camccdstr, expmtstr, overplot_theory=True,
apstr='TF1', yaxisval='RMS',
outdir='../results/optimizing_pipeline_parameters/',
xlim=[6,16], ylim=[3e-5*1e6, 1e-2*1e6],
descriptionkey=None):
"""
descriptionkey: e.g., "kernelspec". or "aperturelist", or whatever.
"""
fig,ax = plt.subplots(figsize=(4,3))
pctiles = [25,50,75]
defaultcolors = plt.rcParams['axes.prop_cycle'].by_key()['color']
defaultlines = ['-.','-',':','--']*2
if len(projids) > len(defaultcolors):
raise NotImplementedError('need a new color scheme')
if len(projids) > len(defaultlines):
raise NotImplementedError('need a new line scheme')
linestyles = defaultlines[:len(projids)]
colors = defaultcolors[:len(projids)]
#linestyles = itertools.cycle(defaultlines[:len(projids)])
#colors = itertools.cycle(defaultcolors[:len(projids)])
for projid, color in zip(projids, colors):
run_id = "{}-{}".format(camccdstr, projid)
for ix, ls in zip([0,1,2], linestyles):
midbins = nparr(d[projid].iloc[ix].index).astype(float)
rms_vals = nparr(d[projid].iloc[ix])
if isinstance(descriptionkey,str):
pdict = pickle.load(open(
'../data/reduction_param_pickles/projid_{}.pickle'.
format(projid), 'rb')
)
description = pdict[descriptionkey]
label = '{}. {}%'.format(
description, str(pctiles[ix])
)
ax.plot(midbins, rms_vals*1e6/(2**(1/2.)), label=label,
color=color, lw=1, ls=ls)
ax.text(0.98, 0.02, camccdstr, transform=ax.transAxes, ha='right',
va='bottom', fontsize='xx-small')
if overplot_theory:
# show the sullivan+2015 interpolated model
Tmag = np.linspace(6, 13, num=200)
lnA = 3.29685004771
B = 0.8500214657
C = -0.2850416324
D = 0.039590832137
E = -0.00223080159
F = 4.73508403525e-5
ln_sigma_1hr = (
lnA + B*Tmag + C*Tmag**2 + D*Tmag**3 +
E*Tmag**4 + F*Tmag**5
)
sigma_1hr = np.exp(ln_sigma_1hr)
sigma_30min = sigma_1hr * np.sqrt(2)
ax.plot(Tmag, sigma_1hr, 'k-', zorder=3, lw=1,
label='S+15 $\sigma_{\mathrm{1hr}}$ (interp)')
ax.legend(loc='upper left', fontsize=4)
ax.set_yscale('log')
ax.set_xlabel('{:s} median instrument magnitude'.
format(apstr.upper()))
ax.set_ylabel('{:s} {:s}'.
format(apstr.upper(), yaxisval)
+' [$\mathrm{ppm}\,\mathrm{hr}^{1/2}$]')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
projidstr = ''
for p in projids:
projidstr += '_{}'.format(p)
theorystr = '_withtheory' if overplot_theory else ''
savname = ( os.path.join(
outdir,'{}compare_percentiles{}{}_{:s}_vs_med_mag_{:s}.png'.
format(expmtstr, projidstr, theorystr, yaxisval, apstr.upper())
))
fig.tight_layout()
fig.savefig(savname, dpi=300)
print('made {}'.format(savname))
def plot_knownplanet_comparison(d, projids, camccdstr, expmtstr, apstr='TFA1',
descriptionkey='kernelspec',
outdir='../results/optimizing_pipeline_parameters/',
ylim=None, divbymaxsnr=False):
plt.close('all')
fig,ax = plt.subplots(figsize=(4,3))
defaultcolors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if len(projids) < len(defaultcolors):
colors = defaultcolors[:len(projids)]
if len(projids) > len(defaultcolors):
if len(projids)==13:
# cf http://colorbrewer2.org/#type=qualitative&scheme=Paired&n=12
# seaborn paired 12 color + black.
colors = ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99",
"#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a",
"#ffed6f", "#b15928", "#000000"]
else:
# give up trying to do smart color schemes. just loop it, and
# overlap. (the use of individual colors becomes small at N>13
# anyway).
colors = ["#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99",
"#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a",
"#ffed6f", "#b15928", "#000000"]
colors *= 4
colors = colors[:len(projids)]
#colors = itertools.cycle(defaultcolors[:len(projids)])
# may need to get max snr over projids, for each toi.
n_projids = len(projids)
toi_counts, temp_tois = [], []
for projid in projids:
toi_ids = np.sort(list(d[projid].keys()))
toi_counts.append(len(toi_ids))
temp_tois.append(toi_ids)
# some projids have different numbers of TOIs. they will get nans.
utois = np.sort(np.unique(np.concatenate(temp_tois)))
# some TOIs are "bad" and need to be filtered. (e.g., if they have
# systematics that mess with BLS, and so you dont want them in this
# comparison). NOTE: these are manually appended.
manual_bad_tois = ['243', '354', '359']
utois = [u for u in utois if u.split('.')[0] not in manual_bad_tois]
n_tois = len(utois)
arr = np.zeros((n_projids, n_tois))
for i, projid in enumerate(projids):
for j, toi in enumerate(utois):
if toi in list(d[projid].keys()):
arr[i,j] = d[projid][toi]['trapz_snr']
else:
arr[i,j] = np.nan
arr[arr == np.inf] = 0
max_snrs = np.nanmax(arr, axis=0)
max_snr_d = {}
for toi, max_snr in zip(utois, max_snrs):
max_snr_d[toi] = max_snr
# if you didn't get anything, set snr to 0. this way helps with the mean.
snrs_normalized = arr/max_snrs[None,:]
snrs_normalized[np.isnan(snrs_normalized)] = 0
normalized_snr_avgd_over_tois = np.mean(snrs_normalized, axis=1)
nzeros = np.sum((snrs_normalized==0).astype(int),axis=1)
for projid, color, nsnr, nz in zip(
projids, colors, normalized_snr_avgd_over_tois, nzeros
):
run_id = "{}-{}".format(camccdstr, projid)
labeldone = False
for ix, toi_id in enumerate(utois):
if toi_id in list(d[projid].keys()):
snr_val = float(d[projid][toi_id]['trapz_snr'])
else:
snr_val = np.nan
print(toi_id, snr_val)
if isinstance(descriptionkey,str):
pdict = pickle.load(open(
'../data/reduction_param_pickles/projid_{}.pickle'.
format(projid), 'rb')
)
description = pdict[descriptionkey]
label = '{}'.format(description)
if divbymaxsnr:
label = '{} ({:.2f}) [{}/{}]'.format(
description, nsnr, nz, len(utois)
)
if not | pd.isnull(snr_val) | pandas.isnull |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from dash_html_components.Div import Div
import yfinance as yf
import pandas as pd
pd.options.mode.chained_assignment = None
from dash.exceptions import PreventUpdate
from datetime import date, datetime
from functions import *
app = dash.Dash(external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
app.layout = html.Div([
html.Div([
html.Div([
html.H1('Dashboard',style={'text-align':'center'}, className = "start"),
html.H6("Time Period",style={'color':'white'}),
dcc.Dropdown(id="time_period", options=[
{'label': '6 Months', 'value': '6m'},
{'label': '1 year', 'value': '1y'},
{'label': '3 years', 'value': '3y'},
{'label': '5 years', 'value': '5y'},
], placeholder='Time Period', value='1y'),
html.Br(),
html.H6("Technical Indicators",style={'color':'white'}),
dcc.Dropdown(id="indicators", options=[
{'label': 'RSI', 'value': 'RSI'},
{'label': 'SMA', 'value': 'SMA'},
{'label': 'EMA', 'value': 'EMA'},
{'label': 'MACD', 'value': 'MACD'},
{'label': 'Bollinger Bands', 'value': 'Bollinger Bands'}
],placeholder='Indicator', value='Bollinger Bands' ),
html.Br(),
html.H6("Returns",style={'color':'white'}),
dcc.Dropdown(id="returns", options=[
{'label': 'Daily Returns', 'value': 'Daily Returns'},
{'label': 'Cumulative Returns', 'value': 'Cumulative Returns'}
],placeholder='Returns', value='Daily Returns'),
]),
], className="Navigation"),
html.Br(),html.Br(),
html.Div([
html.Div([
html.Div([
dcc.Dropdown(id="dropdown_tickers", options=[
{"label":"HDFC Bank Limited", "value":"HDFCBANK.NS"},
{"label":"ICICI Bank Limited", "value":"ICICIBANK.NS"},
{"label":"RBL Bank Limited", "value":"RBLBANK.NS"},
{"label":"Equitas Small Finance Bank Limited", "value":"EQUITASBNK.NS"},
{"label":"DCB Bank Limited", "value":"DCBBANK.NS"},
{"label":"Maruti Suzuki India Limited", "value":"MARUTI.NS"},
{"label":"Tata Motors Limited ", "value":"TATAMOTORS.NS"},
{"label":"Escorts Limited", "value":"ESCORTS.NS"},
{"label":"Atul Auto Limited", "value":"ATULAUTO.NS"},
{"label":"Force Motors Limited", "value":"FORCEMOT.BO"},
{"label":"Tata Chemicals Limited", "value":"TATACHEM.NS"},
{"label":"Pidilite Industries Limited", "value":"PIDILITIND.NS"},
{"label":"Deepak Nitrite Limited", "value":"DEEPAKNTR.NS"},
{"label":"Navin Fluorine International Limited", "value":"NAVINFLUOR.NS"},
{"label":"Valiant Organics Limited", "value":"VALIANTORG.NS"},
{"label":"Avenue Supermarts Limited ", "value":"DMART.NS"},
{"label":"Trent Limited", "value":"TRENT.NS"},
{"label":"V-Mart Retail Limited", "value":"VMART.NS"},
{"label":"Future Retail Limited", "value":"FRETAIL.NS"},
{"label":"Shoppers Stop Limited", "value":"SHOPERSTOP.NS"},
{"label":"Zomato Limited", "value":"ZOMATO.NS"},
{"label":"G R Infraprojects Limited", "value":"GRINFRA.NS"},
{"label":"Dodla Dairy Limited", "value":"DODLA.NS"},
{"label":"India Pesticides Limited ", "value":"IPL.NS"},
{"label":"Times Green Energy (India) Lim", "value":"TIMESGREEN.BO"},
{"label":"DLF Limited", "value":"DLF.NS"},
{"label":"Godrej Properties Limited", "value":"GODREJPROP.NS"},
{"label":"Oberoi Realty Limited", "value":"OBEROIRLTY.NS"},
{"label":"Sunteck Realty Limited ", "value":"SUNTECK.NS"},
{"label":"Nirlon Limited", "value":"NIRLON.BO"},
], placeholder='Select Stock'),
html.Div([], id="c_graph"),
html.Div([], id="graphs"),
html.Br(),
html.H4('Past Trend vs. Future Projections',style={'text-align':'center'}),
html.H5('Closing Prices',style={'text-align':'center'}),
html.Div([], id="gbm_graph"),
html.Br(),
html.H5('Daily Volatility (%)',style={'text-align':'center'}),
html.Div([], id="garch_graph"),
html.Br(),
html.H4('Risk Ratios',style={'text-align':'center'}),
html.Div([
html.Div([
html.H6("Alpha (NIFTY 50)"),
html.Div(id="a_val"),
],style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.H6("Beta (NIFTY 50)"),
html.Div(id="b_val"),
],style={'width': '49%', 'display': 'inline-block'}),
]),
html.Div([
html.Div([
html.H6("Sharpe Ratio"),
html.Div(id="sr_val"),
],style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.H6("Sortino Ratio"),
html.Div(id="sor_val"),
],style={'width': '49%', 'display': 'inline-block'}),
]),
html.Div([
html.H6("Standard Deviation"),
html.Div(id="sd_val"),
]),
], id="main-content"),
],className="Panel1"),
html.Div([
html.Div([
dcc.Dropdown(id="dropdown_tickers2", options=[
{"label":"HDFC Bank Limited", "value":"HDFCBANK.NS"},
{"label":"ICICI Bank Limited", "value":"ICICIBANK.NS"},
{"label":"RBL Bank Limited", "value":"RBLBANK.NS"},
{"label":"Equitas Small Finance Bank Limited", "value":"EQUITASBNK.NS"},
{"label":"DCB Bank Limited", "value":"DCBBANK.NS"},
{"label":"Maruti Suzuki India Limited", "value":"MARUTI.NS"},
{"label":"Tata Motors Limited ", "value":"TATAMOTORS.NS"},
{"label":"Escorts Limited", "value":"ESCORTS.NS"},
{"label":"Atul Auto Limited", "value":"ATULAUTO.NS"},
{"label":"Tata Chemicals Limited", "value":"TATACHEM.NS"},
{"label":"Pidilite Industries Limited", "value":"PIDILITIND.NS"},
{"label":"Deepak Nitrite Limited", "value":"DEEPAKNTR.NS"},
{"label":"Navin Fluorine International Limited", "value":"NAVINFLUOR.NS"},
{"label":"Valiant Organics Limited", "value":"VALIANTORG.NS"},
{"label":"Avenue Supermarts Limited ", "value":"DMART.NS"},
{"label":"Trent Limited", "value":"TRENT.NS"},
{"label":"V-Mart Retail Limited", "value":"VMART.NS"},
{"label":"Future Retail Limited", "value":"FRETAIL.NS"},
{"label":"Shoppers Stop Limited", "value":"SHOPERSTOP.NS"},
{"label":"Zomato Limited", "value":"ZOMATO.NS"},
{"label":"G R Infraprojects Limited", "value":"GRINFRA.NS"},
{"label":"Dodla Dairy Limited", "value":"DODLA.NS"},
{"label":"India Pesticides Limited ", "value":"IPL.NS"},
{"label":"Times Green Energy (India) Lim", "value":"TIMESGREEN.BO"},
{"label":"DLF Limited", "value":"DLF.NS"},
{"label":"Godrej Properties Limited", "value":"GODREJPROP.NS"},
{"label":"Oberoi Realty Limited", "value":"OBEROIRLTY.NS"},
{"label":"Sunteck Realty Limited ", "value":"SUNTECK.NS"},
{"label":"Nirlon Limited", "value":"NIRLON.BO"},
], placeholder='Select Stock'),
html.Div([], id="c_graph2"),
html.Div([], id="graphs2"),
html.Br(),
html.H4('Past Trend vs. Future Projections',style={'text-align':'center'}),
html.H5('Closing Prices',style={'text-align':'center'}),
html.Div([], id="gbm_graph2"),
html.Br(),
html.H5('Daily Volatility (%)',style={'text-align':'center'}),
html.Div([], id="garch_graph2"),
html.Br(),
html.H4('Risk Ratios',style={'text-align':'center'}),
html.Div([
html.Div([
html.H6("Alpha (NIFTY 50)"),
html.Div(id="a_val2"),
],style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.H6("Beta (NIFTY 50)"),
html.Div(id="b_val2"),
],style={'width': '49%', 'display': 'inline-block'}),
]),
html.Div([
html.Div([
html.H6("Sharpe Ratio"),
html.Div(id="sr_val2"),
],style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.H6("Sortino Ratio"),
html.Div(id="sor_val2"),
],style={'width': '49%', 'display': 'inline-block'}),
]),
html.Div([
html.H6("Standard Deviation"),
html.Div(id="sd_val2"),
]),
], id="main-content2"),
], className="Panel2"),
html.Br(),
html.Div([
html.H3('Interpretation',style={'text-align':'center'}),
html.H5('Technical indicators'),
html.Li('Bollinger Bands is a measure of volatility. High volatility is signified by wide bands while low volatility is signified by narrow bands. Generally, high volatility is followed by low volatility'),
html.Li('RSI or Relative Strength Index, is a measure to evaluate overbought and oversold conditions.'),
html.Li('SMA or Simple Moving Average using 50 day (fast) and 200 day (slow) lines - short term going above long term is bullish trend. Short term going below long term is bearish'),
html.Li('EMA or Exponential Moving Average gives higher significance to recent price data'),
html.Li('MACD or Moving Average Convergence Divergence signifies no trend reversal unless there are crossovers. The market is bullish when signal line crosses above blue line, bearish when signal line crosses below blue line'),
html.H5('Risk ratios'),
html.Li('Alpha: Return performance as compared to benchmark of market'),
html.Li('Beta: Relative price movement of a stock to go up and down as compared to the market trend'),
html.Li('Sharpe Ratio: Returns generated per unit of risk - the higher the better'),
html.Li('Sortino Ratio: Returns as compared to only downside risk'),
])
],className="Panels"),
],className="container")
beta_r = N50()
@app.callback(
[Output("c_graph", "children")],
[Output("graphs", "children")],
[Output("a_val", "children")],
[Output("b_val", "children")],
[Output("sr_val", "children")],
[Output("sor_val", "children")],
[Output("sd_val", "children")],
[Output("gbm_graph", "children")],
[Output("garch_graph", "children")],
[Input("time_period", "value")],
[Input("dropdown_tickers", "value")],
[Input("indicators", "value")],
[Input("returns", "value")],
)
def stock_prices(v, v2, v3, v4):
if v2 == None:
raise PreventUpdate
if os.path.exists(v2+'.csv'):
df = pd.read_csv(v2+'.csv')
now = datetime.now()
today345pm = now.replace(hour=15, minute=45, second=0, microsecond=0)
if df['Date'].iloc[-1]!=date.today().isoformat() and date.today().isoweekday() in range(1,6) and now>today345pm:
df = yf.download(v2,start='2016-01-01')
df.reset_index(inplace=True)
df.to_csv(v2+'.csv')
else:
df = yf.download(v2,start='2016-01-01')
df.reset_index(inplace=True)
df.to_csv(v2+'.csv')
df = df.tail(1800)
df['Date']= pd.to_datetime(df['Date'])
if v=='6m':
time_period = 126
elif v=='1y':
time_period = 252
elif v=='3y':
time_period = 756
elif v=='5y':
time_period = 1800
# Alpha & Beta Ratio
beta_r = pd.read_csv('benchmark.csv')
beta_r = beta_r.tail(time_period)
df_data = df.tail(time_period)
Alpha_Ratio, Beta_Ratio = alpha_beta(beta_r, df_data)
# Standard Deviation
SD = round(df_data['Adj Close'].std(),2)
# Sharpe & Sortino Ratio
Sharpe_Ratio, Sortino_Ratio = sharpe_sortino(df_data)
# Plotting over the time period's data
MACD(df)
RSI(df)
BB(df)
df['SMA_50'] = SMA(df, 50)
df['SMA_200'] = SMA(df, 200)
df['EMA'] = EMA(df)
fig = get_stock_price_fig(df.tail(time_period),v3,v4)
current = df_data.iloc[-1][2]
yesterday = df_data.iloc[-2][2]
# Change graph
fig1 = change_graph(current,yesterday)
df = df[['Date','Adj Close']]
# GBM Model
fig2= gbm(df.tail(30))
# GARCH Model
fig3 = garch(df.tail(30))
return [dcc.Graph(figure=fig1,config={'displayModeBar': False}),
dcc.Graph(figure=fig,config={'displayModeBar': False}),
Alpha_Ratio,
Beta_Ratio,
Sharpe_Ratio,
Sortino_Ratio,
SD,
dcc.Graph(figure=fig2,config={'displayModeBar': False}),
dcc.Graph(figure=fig3,config={'displayModeBar': False}),]
@app.callback(
[Output("c_graph2", "children")],
[Output("graphs2", "children")],
[Output("a_val2", "children")],
[Output("b_val2", "children")],
[Output("sr_val2", "children")],
[Output("sor_val2", "children")],
[Output("sd_val2", "children")],
[Output("gbm_graph2", "children")],
[Output("garch_graph2", "children")],
[Input("time_period", "value")],
[Input("dropdown_tickers2", "value")],
[Input("indicators", "value")],
[Input("returns", "value")],
)
def stock_prices2(v, v2, v3, v4):
if v2 == None:
raise PreventUpdate
if os.path.exists(v2+'.csv'):
df2 = pd.read_csv(v2+'.csv')
now = datetime.now()
today345pm = now.replace(hour=15, minute=45, second=0, microsecond=0)
if df2['Date'].iloc[-1]!=date.today().isoformat() and date.today().isoweekday() in range(1,6) and now>today345pm:
df2 = yf.download(v2,start='2016-01-01')
df2.reset_index(inplace=True)
df2.to_csv(v2+'.csv')
else:
df2 = yf.download(v2,start='2016-01-01')
df2.reset_index(inplace=True)
df2.to_csv(v2+'.csv')
df2 = df2.tail(1800)
df2['Date']= pd.to_datetime(df2['Date'])
if v=='6m':
time_period = 126
elif v=='1y':
time_period = 252
elif v=='3y':
time_period = 756
elif v=='5y':
time_period = 1800
# Alpha & Beta Ratio
beta_r2 = | pd.read_csv('benchmark.csv') | pandas.read_csv |
import unittest
import pandas as pd
from PEC4 import DataframeOperations
class TestDataframeOperations(unittest.TestCase):
def test_merge_two_dataframes_deleting_column(self):
df1 = pd.DataFrame.from_dict({'col_1': [1, 2], 'col_2': ['a', 'b']})
df2 = | pd.DataFrame.from_dict({'col_3': [1, 2], 'col_4': ['A', 'B']}) | pandas.DataFrame.from_dict |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
from autots.models.model_list import no_shared
from autots.tools.impute import fill_median
horizontal_aliases = ['horizontal', 'probabilistic']
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal"
def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
model_id (str): name of model to find series for
series_id (str): name of series to find models for
Returns:
list
"""
if model_id is None and series_id is None:
raise ValueError(
"either series_id or model_id must be specified in parse_horizontal."
)
if mosaic_or_horizontal(all_series) == 'mosaic':
if model_id is not None:
return [ser for ser, mod in all_series.items() if model_id in mod.values()]
else:
return list(set(all_series[series_id].values()))
else:
if model_id is not None:
return [ser for ser, mod in all_series.items() if mod == model_id]
else:
# list(set([mod for ser, mod in all_series.items() if ser == series_id]))
return [all_series[series_id]]
def BestNEnsemble(
ensemble_params,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime: dict,
prediction_interval: float = 0.9,
):
"""Generate mean forecast for ensemble of models.
Args:
ensemble_params (dict): BestN ensemble param dict
should have "model_weights": {model_id: weight} where 1 is default weight per model
forecasts (dict): {forecast_id: forecast dataframe} for all models
same for lower_forecasts, upper_forecasts
forecast_runtime (dict): dictionary of {forecast_id: timedelta of runtime}
prediction_interval (float): metadata on interval
"""
startTime = datetime.datetime.now()
forecast_keys = list(forecasts.keys())
model_weights = dict(ensemble_params.get("model_weights", {}))
ensemble_params['model_weights'] = model_weights
ensemble_params['models'] = {
k: v
for k, v in dict(ensemble_params.get('models')).items()
if k in forecast_keys
}
model_count = len(forecast_keys)
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
model_divisor = 0
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
current_weight = float(model_weights.get(idx, 1))
ens_df = ens_df + (x * current_weight)
# also .get(idx, 0)
ens_df_lower = ens_df_lower + (lower_forecasts[idx] * current_weight)
ens_df_upper = ens_df_upper + (upper_forecasts[idx] * current_weight)
model_divisor = model_divisor + current_weight
ens_df = ens_df / model_divisor
ens_df_lower = ens_df_lower / model_divisor
ens_df_upper = ens_df_upper / model_divisor
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.datetime.now() - startTime,
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
X = fill_median(X)
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def mosaic_classifier(df_train, known):
"""CLassify unknown series with the appropriate model for mosaic ensembles."""
known.index.name = "forecast_period"
upload = pd.melt(
known,
var_name="series_id",
value_name="model_id",
ignore_index=False,
).reset_index(drop=False)
upload['forecast_period'] = upload['forecast_period'].astype(int)
missing_cols = df_train.columns[
~df_train.columns.isin(upload['series_id'].unique())
]
if not missing_cols.empty:
forecast_p = np.arange(upload['forecast_period'].max() + 1)
p_full = np.tile(forecast_p, len(missing_cols))
missing_rows = pd.DataFrame(
{
'forecast_period': p_full,
'series_id': np.repeat(missing_cols.values, len(forecast_p)),
'model_id': np.nan,
},
index=None if len(p_full) > 1 else [0],
)
upload = pd.concat([upload, missing_rows])
X = fill_median(
(summarize_series(df_train).transpose()).merge(
upload, left_index=True, right_on="series_id"
)
)
X.set_index("series_id", inplace=True) # .drop(columns=['series_id'], inplace=True)
to_predict = X[X['model_id'].isna()].drop(columns=['model_id'])
X = X[~X['model_id'].isna()]
Y = X['model_id']
Xf = X.drop(columns=['model_id'])
# from sklearn.linear_model import RidgeClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(Xf, Y)
predicted = clf.predict(to_predict)
result = pd.concat(
[to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")],
axis=1,
)
cols_needed = ['model_id', 'series_id', 'forecast_period']
final = pd.concat(
[X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0
)
final['forecast_period'] = final['forecast_period'].astype(str)
final = final.pivot(values="model_id", columns="series_id", index="forecast_period")
try:
final = final[df_train.columns]
if final.isna().to_numpy().sum() > 0:
raise KeyError("NaN in mosaic generalization")
except KeyError as e:
raise ValueError(
f"mosaic_classifier failed to generalize for all columns: {repr(e)}"
)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
# here split for mosaic or horizontal
if mosaic_or_horizontal(known_matches) == "mosaic":
# make it a dataframe
mosaicy = pd.DataFrame.from_dict(known_matches)
# remove unavailable models
mosaicy = pd.DataFrame(mosaicy[mosaicy.isin(available_models)])
# so we can fill some missing by just using a forward fill, should be good enough
mosaicy.fillna(method='ffill', limit=5, inplace=True)
mosaicy.fillna(method='bfill', limit=5, inplace=True)
if mosaicy.isna().any().any() or mosaicy.shape[1] != df_train.shape[1]:
if full_models is not None:
k2 = pd.DataFrame(mosaicy[mosaicy.isin(full_models)])
else:
k2 = mosaicy.copy()
final = mosaic_classifier(df_train, known=k2)
return final.to_dict()
else:
return mosaicy.to_dict()
else:
# remove any unavailable models
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
# test if generalization is needed
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
startTime = datetime.datetime.now()
# this is meant to fill in any failures
available_models = [mod for mod, fcs in forecasts.items() if fcs.shape[0] > 0]
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
print("No full models available for horizontal generalization!")
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
| pd.DataFrame() | pandas.DataFrame |
import csv
import glob
import math
import os
import socket
import sys
from random import random, seed
from timeit import default_timer as timer
import time
from statistics import mean
from pathlib import Path
import networkx as nx
import numpy as np
from scapy.layers.inet import IP, UDP
from scapy.utils import PcapWriter, PcapReader
import tkinter as tk
from tkinter import filedialog
import zat
from zat.log_to_dataframe import LogToDataFrame
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
import matplotlib.transforms as mtrans
class For_Malpaca_Preparation_Netflow():
@staticmethod
def get_data_equal_to_fixed_threshold_for_malpaca_enriched(threshold, folder_to_filtered_files,
folder_to_move_data_to, old_file_addition):
threshold = int(threshold)
folder_to_filtered_files = folder_to_filtered_files
folder_to_move_data_to = folder_to_move_data_to
new_folder_path = folder_to_move_data_to + "/" + (str(threshold)) + "_fixed_threshold"
os.mkdir(new_folder_path)
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
print("Create pcap file")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + old_file_addition + ".pcap"
file_packet_dic = {}
connections_used = []
new_file_path = new_folder_path + "/" + scenario_name + "_" + file_name
write_count = 1
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
src_ip = str(src_ip.strip())
dst_ip = str(dst_ip.strip())
ip_protocol = str(ip_protocol.strip())
src_port = str(src_port).strip()
dst_port = str(dst_port).strip()
if (src_ip, dst_ip, ip_protocol, src_port, dst_port) not in file_packet_dic:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = [packet]
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)].append(packet)
if (packet_count % 500000) == 0:
if packet_count != 0:
print("Write " + str(write_count) + " Start")
for (src_ip, dst_ip, ip_protocol, src_port, dst_port), packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= threshold:
connections_used.append((src_ip, dst_ip, ip_protocol, src_port, dst_port))
pktdump = PcapWriter(new_file_path, append=True, sync=True)
for index, packet in enumerate(packets_value):
if index < threshold:
pktdump.write(packet)
else:
break
pktdump.close()
file_packet_dic.clear()
print("Write " + str(write_count) + " End")
write_count = write_count + 1
packets.close()
if len(file_packet_dic) > 0:
print("Write Last Packets Start")
for (src_ip, dst_ip, ip_protocol, src_port, dst_port), packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= threshold:
connections_used.append((src_ip, dst_ip, ip_protocol, src_port, dst_port))
pktdump = PcapWriter(new_file_path, append=True, sync=True)
for index, packet in enumerate(packets_value):
if index < threshold:
pktdump.write(packet)
else:
break
pktdump.close()
file_packet_dic.clear()
print("Write Last Packets End")
print("Create csv file")
csv_df = pd.read_csv(path_to_csv_file)
csv_df["src_ip"] = csv_df["src_ip"].apply(lambda x: str(x).strip())
csv_df["dst_ip"] = csv_df["dst_ip"].apply(lambda x: str(x).strip())
csv_df["src_port"] = csv_df["src_port"].apply(lambda x: str(x).strip())
csv_df["dst_port"] = csv_df["dst_port"].apply(lambda x: str(x).strip())
csv_df["ip_protocol"] = csv_df["ip_protocol"].apply(lambda x: str(x).strip())
csv_df["src_ip"] = csv_df["src_ip"].astype(str)
csv_df["dst_ip"] = csv_df["dst_ip"].astype(str)
csv_df["src_port"] = csv_df["src_port"].astype(str)
csv_df["dst_port"] = csv_df["dst_port"].astype(str)
csv_df["ip_protocol"] = csv_df["ip_protocol"].astype(str)
if len(connections_used) > 0:
for index, (src_ip, dst_ip, ip_protocol, src_port, dst_port) in enumerate(connections_used):
src_ip = str(src_ip).strip()
dst_ip = str(dst_ip).strip()
ip_protocol = str(ip_protocol).strip()
src_port = str(src_port).strip()
dst_port = str(dst_port).strip()
row = csv_df[(csv_df["src_ip"] == src_ip) & (csv_df["dst_ip"] == dst_ip) &
(csv_df["ip_protocol"] == ip_protocol) & (csv_df["src_port"] == src_port) & (csv_df["dst_port"] == dst_port)]
if index == 0:
combined_df = row
else:
combined_df = combined_df.append(row)
file_packet_dic.clear()
connections_used.clear()
new_csv_file_path = new_folder_path + "/" + scenario_name + "_" + file_name + "_summary.csv"
combined_df["connection_length"] = threshold
combined_df.to_csv(new_csv_file_path, index=False)
file_packet_dic.clear()
connections_used.clear()
@staticmethod
def get_data_skip_x_then_take_fixed_threshold_for_malpaca_enriched(skip, threshold, folder_to_filtered_files,
folder_to_move_data_to, old_file_addition):
skip = int(skip)
threshold = int(threshold)
folder_to_filtered_files = folder_to_filtered_files
folder_to_move_data_to = folder_to_move_data_to
new_folder_path = folder_to_move_data_to + "/" + str(threshold) + "_fixed_threshold_" + str(skip) + "_skip"
os.mkdir(new_folder_path)
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
print("Create pcap file")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + old_file_addition + ".pcap"
file_packet_dic = {}
connections_used = []
new_file_path = new_folder_path + "/" + scenario_name + "_" + file_name
write_count = 1
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
if (src_ip, dst_ip, ip_protocol, src_port, dst_port) not in file_packet_dic:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = [packet]
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)].append(packet)
if (packet_count % 500000) == 0:
if packet_count != 0:
print("Write " + str(write_count) + " Start")
for address, packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= (threshold + skip):
connections_used.append(address)
pktdump = PcapWriter(new_file_path, append=True, sync=True)
for index, packet in enumerate(packets_value):
if (index > skip):
if (index <= (skip + threshold)):
pktdump.write(packet)
pktdump.close()
file_packet_dic.clear()
print("Write " + str(write_count) + " End")
write_count = write_count + 1
packets.close()
if len(file_packet_dic) > 0:
print("Write Last Packets Start")
for (src_ip, dst_ip, ip_protocol, src_port, dst_port), packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= (threshold + skip):
connections_used.append((src_ip, dst_ip, ip_protocol, src_port, dst_port))
pktdump = PcapWriter(new_file_path, append=True, sync=True)
for index, packet in enumerate(packets_value):
if (index > skip):
if (index <= (skip + threshold)):
pktdump.write(packet)
pktdump.close()
file_packet_dic.clear()
print("Write Last Packets End")
print("Create csv file")
csv_df = pd.read_csv(path_to_csv_file)
csv_df["src_ip"] = csv_df["src_ip"].apply(lambda x: str(x).strip())
csv_df["dst_ip"] = csv_df["dst_ip"].apply(lambda x: str(x).strip())
csv_df["src_port"] = csv_df["src_port"].apply(lambda x: str(x).strip())
csv_df["dst_port"] = csv_df["dst_port"].apply(lambda x: str(x).strip())
csv_df["ip_protocol"] = csv_df["ip_protocol"].apply(lambda x: str(x).strip())
csv_df["src_ip"] = csv_df["src_ip"].astype(str)
csv_df["dst_ip"] = csv_df["dst_ip"].astype(str)
csv_df["src_port"] = csv_df["src_port"].astype(str)
csv_df["dst_port"] = csv_df["dst_port"].astype(str)
csv_df["ip_protocol"] = csv_df["ip_protocol"].astype(str)
if len(connections_used) > 0:
for index, (src_ip, dst_ip, ip_protocol, src_port, dst_port) in enumerate(connections_used):
src_ip = str(src_ip).strip()
dst_ip = str(dst_ip).strip()
ip_protocol = str(ip_protocol).strip()
src_port = str(src_port).strip()
dst_port = str(dst_port).strip()
row = csv_df[(csv_df["src_ip"] == src_ip) & (csv_df["dst_ip"] == dst_ip) &
(csv_df["ip_protocol"] == ip_protocol) & (csv_df["src_port"] == src_port) & (
csv_df["dst_port"] == dst_port)]
if index == 0:
combined_df = row
else:
combined_df = combined_df.append(row)
file_packet_dic.clear()
connections_used.clear()
new_csv_file_path = new_folder_path + "/" + scenario_name + "_" + file_name + "_summary.csv"
combined_df["connection_length"] = threshold
combined_df.to_csv(new_csv_file_path, index=False)
file_packet_dic.clear()
connections_used.clear()
@staticmethod
def get_data_skip_x_then_take_fixed_threshold_from_end_for_malpaca_enriched(skip, threshold,
folder_to_filtered_files,
folder_to_move_data_to,
old_file_addition):
skip = int(skip)
threshold = int(threshold)
folder_to_filtered_files = folder_to_filtered_files
folder_to_move_data_to = folder_to_move_data_to
new_folder_path = folder_to_move_data_to + "/" + (str(threshold)) + "_fixed_threshold_" + str(
skip) + "_skip_from_end"
os.mkdir(new_folder_path)
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
print("Create pcap file")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + old_file_addition + ".pcap"
file_packet_dic = {}
connections_used = []
new_file_path = new_folder_path + "/" + scenario_name + "_" + file_name
write_count = 1
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
if (src_ip, dst_ip, ip_protocol, src_port, dst_port) not in file_packet_dic:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = [packet]
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)].append(packet)
if (packet_count % 500000) == 0:
if packet_count != 0:
print("Write " + str(write_count) + " Start")
for address, packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= (threshold + skip):
connections_used.append(address)
pktdump = PcapWriter(new_file_path, append=True, sync=True)
threshold_int = (threshold + skip) * (-1)
packets_value = packets_value[threshold_int:]
for index, packet in enumerate(packets_value):
if index < threshold:
pktdump.write(packet)
else:
break
pktdump.close()
file_packet_dic.clear()
print("Write " + str(write_count) + " End")
write_count = write_count + 1
packets.close()
if len(file_packet_dic) > 0:
print("Write Last Packets Start")
for (src_ip, dst_ip, ip_protocol, src_port, dst_port), packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= (threshold + skip):
connections_used.append((src_ip, dst_ip, ip_protocol, src_port, dst_port))
pktdump = PcapWriter(new_file_path, append=True, sync=True)
threshold_int = (threshold + skip) * (-1)
packets_value = packets_value[threshold_int:]
for index, packet in enumerate(packets_value):
if index < threshold:
pktdump.write(packet)
else:
break
pktdump.close()
file_packet_dic.clear()
print("Write Last Packets End")
print("Create csv file")
csv_df = pd.read_csv(path_to_csv_file)
csv_df["src_ip"] = csv_df["src_ip"].apply(lambda x: str(x).strip())
csv_df["dst_ip"] = csv_df["dst_ip"].apply(lambda x: str(x).strip())
csv_df["src_port"] = csv_df["src_port"].apply(lambda x: str(x).strip())
csv_df["dst_port"] = csv_df["dst_port"].apply(lambda x: str(x).strip())
csv_df["ip_protocol"] = csv_df["ip_protocol"].apply(lambda x: str(x).strip())
csv_df["src_ip"] = csv_df["src_ip"].astype(str)
csv_df["dst_ip"] = csv_df["dst_ip"].astype(str)
csv_df["src_port"] = csv_df["src_port"].astype(str)
csv_df["dst_port"] = csv_df["dst_port"].astype(str)
csv_df["ip_protocol"] = csv_df["ip_protocol"].astype(str)
if len(connections_used) > 0:
for index, (src_ip, dst_ip, ip_protocol, src_port, dst_port) in enumerate(connections_used):
src_ip = str(src_ip).strip()
dst_ip = str(dst_ip).strip()
ip_protocol = str(ip_protocol).strip()
src_port = str(src_port).strip()
dst_port = str(dst_port).strip()
row = csv_df[(csv_df["src_ip"] == src_ip) & (csv_df["dst_ip"] == dst_ip) &
(csv_df["ip_protocol"] == ip_protocol) & (csv_df["src_port"] == src_port) & (
csv_df["dst_port"] == dst_port)]
if index == 0:
combined_df = row
else:
combined_df = combined_df.append(row)
file_packet_dic.clear()
connections_used.clear()
new_csv_file_path = new_folder_path + "/" + scenario_name + "_" + file_name + "_summary.csv"
combined_df["connection_length"] = threshold
combined_df.to_csv(new_csv_file_path, index=False)
file_packet_dic.clear()
connections_used.clear()
@staticmethod
def get_data_equal_to_fixed_threshold_from_end_for_malpaca_enriched(threshold, folder_to_filtered_files,
folder_to_move_data_to, old_file_addition):
threshold = threshold
folder_to_filtered_files = folder_to_filtered_files
folder_to_move_data_to = folder_to_move_data_to
new_folder_path = folder_to_move_data_to + "/" + (str(threshold)) + "_fixed_threshold_from_end"
os.mkdir(new_folder_path)
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
print("Create pcap file")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + old_file_addition + ".pcap"
file_packet_dic = {}
connections_used = []
new_file_path = new_folder_path + "/" + scenario_name + "_" + file_name
write_count = 1
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
if (src_ip, dst_ip, ip_protocol, src_port, dst_port) not in file_packet_dic:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = [packet]
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)].append(packet)
if (packet_count % 500000) == 0:
if packet_count != 0:
print("Write " + str(write_count) + " Start")
for address, packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= threshold:
connections_used.append(address)
pktdump = PcapWriter(new_file_path, append=True, sync=True)
threshold_int = int(threshold) * (-1)
packets_value = packets_value[threshold_int:]
for index, packet in enumerate(packets_value):
if index < threshold:
pktdump.write(packet)
else:
break
pktdump.close()
file_packet_dic.clear()
print("Write " + str(write_count) + " End")
write_count = write_count + 1
packets.close()
if len(file_packet_dic) > 0:
print("Write Last Packets Start")
for (src_ip, dst_ip, ip_protocol, src_port, dst_port), packets_value in file_packet_dic.items():
amount = len(packets_value)
if amount >= threshold:
connections_used.append((src_ip, dst_ip, ip_protocol, src_port, dst_port))
pktdump = PcapWriter(new_file_path, append=True, sync=True)
threshold_int = int(threshold) * (-1)
packets_value = packets_value[threshold_int:]
for index, packet in enumerate(packets_value):
if index < threshold:
pktdump.write(packet)
else:
break
pktdump.close()
file_packet_dic.clear()
print("Write Last Packets End")
print("Create csv file")
csv_df = pd.read_csv(path_to_csv_file)
csv_df["src_ip"] = csv_df["src_ip"].apply(lambda x: str(x).strip())
csv_df["dst_ip"] = csv_df["dst_ip"].apply(lambda x: str(x).strip())
csv_df["src_port"] = csv_df["src_port"].apply(lambda x: str(x).strip())
csv_df["dst_port"] = csv_df["dst_port"].apply(lambda x: str(x).strip())
csv_df["ip_protocol"] = csv_df["ip_protocol"].apply(lambda x: str(x).strip())
csv_df["src_ip"] = csv_df["src_ip"].astype(str)
csv_df["dst_ip"] = csv_df["dst_ip"].astype(str)
csv_df["src_port"] = csv_df["src_port"].astype(str)
csv_df["dst_port"] = csv_df["dst_port"].astype(str)
csv_df["ip_protocol"] = csv_df["ip_protocol"].astype(str)
if len(connections_used) > 0:
for index, (src_ip, dst_ip, ip_protocol, src_port, dst_port) in enumerate(connections_used):
src_ip = str(src_ip).strip()
dst_ip = str(dst_ip).strip()
ip_protocol = str(ip_protocol).strip()
src_port = str(src_port).strip()
dst_port = str(dst_port).strip()
row = csv_df[(csv_df["src_ip"] == src_ip) & (csv_df["dst_ip"] == dst_ip) &
(csv_df["ip_protocol"] == ip_protocol) & (csv_df["src_port"] == src_port) & (
csv_df["dst_port"] == dst_port)]
if index == 0:
combined_df = row
else:
combined_df = combined_df.append(row)
file_packet_dic.clear()
connections_used.clear()
new_csv_file_path = new_folder_path + "/" + scenario_name + "_" + file_name + "_summary.csv"
combined_df["connection_length"] = threshold
combined_df.to_csv(new_csv_file_path, index=False)
file_packet_dic.clear()
connections_used.clear()
@staticmethod
def get_data_equal_to_fixed_window_size_for_malpaca(folder_to_filtered_files, folder_to_move_data_to, window_size, old_file_addition):
window_size = window_size
folder_to_filtered_files = folder_to_filtered_files
folder_to_move_data_to = folder_to_move_data_to
new_folder_path = folder_to_move_data_to + "/" + (str(window_size)) + "_window_size"
os.mkdir(new_folder_path)
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
print("Create pcap file")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + old_file_addition + ".pcap"
file_packet_dic = {}
window_dic = {}
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
if (src_ip, dst_ip, ip_protocol, src_port, dst_port) not in file_packet_dic:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = [packet]
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)].append(packet)
new_file_path = new_folder_path + "/" + scenario_name + "_" + file_name
for (src_ip, dst_ip, ip_protocol, src_port, dst_port), packets_value in file_packet_dic.items():
amount_packets = len(packets_value)
if amount_packets >= window_size:
amount_windows = (math.floor(amount_packets / window_size))
amount_packets = amount_windows * window_size
window_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = amount_windows
pktdump = PcapWriter(new_file_path, append=True, sync=True)
for index, packet in enumerate(packets_value):
if index < amount_packets:
pktdump.write(packet)
else:
break
pktdump.close()
print("Create csv file")
csv_df = pd.read_csv(path_to_csv_file)
csv_df["src_ip"] = csv_df["src_ip"].apply(lambda x: str(x).strip())
csv_df["dst_ip"] = csv_df["dst_ip"].apply(lambda x: str(x).strip())
csv_df["src_port"] = csv_df["src_port"].apply(lambda x: str(x).strip())
csv_df["dst_port"] = csv_df["dst_port"].apply(lambda x: str(x).strip())
csv_df["ip_protocol"] = csv_df["ip_protocol"].apply(lambda x: str(x).strip())
csv_df["src_ip"] = csv_df["src_ip"].astype(str)
csv_df["dst_ip"] = csv_df["dst_ip"].astype(str)
csv_df["src_port"] = csv_df["src_port"].astype(str)
csv_df["dst_port"] = csv_df["dst_port"].astype(str)
csv_df["ip_protocol"] = csv_df["ip_protocol"].astype(str)
if len(window_dic) > 0:
row_list = []
for index, (address, amount_windows) in enumerate(window_dic.items()):
#src_ip, dst_ip, ip_protocol, src_port, dst_port, ip_tos
src_ip = str(address[0]).strip()
dst_ip = str(address[1]).strip()
ip_protocol = str(address[2]).strip()
src_port = str(address[3]).strip()
dst_port = str(address[4]).strip()
row = csv_df[(csv_df["src_ip"] == src_ip) & (csv_df["dst_ip"] == dst_ip) &
(csv_df["ip_protocol"] == ip_protocol) & (csv_df["src_port"] == src_port) & (
csv_df["dst_port"] == dst_port)]
for window_index in range(0, amount_windows):
new_row = row.copy()
new_row["connection_length"] = window_size
new_row["window"] = window_index
row_list.append(new_row)
combined_df = pd.concat(row_list)
file_packet_dic.clear()
window_dic.clear()
new_csv_file_path = new_folder_path + "/" + scenario_name + "_" + file_name + "_summary.csv"
combined_df = combined_df.sort_values(by=["src_ip", "dst_ip", "ip_protocol", "src_port", "dst_port", "window"], ascending=True)
combined_df.to_csv(new_csv_file_path, index=False)
@staticmethod
def split_connection_into_X_equal_parts_for_malpaca(threshold, parts, folder_to_filtered_files, folder_to_move_data_to, old_file_addition):
folder_to_filtered_files = folder_to_filtered_files
folder_to_move_data_to = folder_to_move_data_to
threshold = int(threshold)
parts = int(parts)
new_folder_name = folder_to_move_data_to + "/" + str(threshold) + "_threshold_" + str(parts) + "_parts"
os.mkdir(new_folder_name)
for piece in range(1, (parts + 1)):
new_folder = new_folder_name + "/" + str(threshold) + "_threshold_" + str(piece) + "_part"
os.mkdir(new_folder)
scan_file_order_path = folder_to_filtered_files + "/" + "scan_order.txt"
scanned_files = []
with open(scan_file_order_path, 'r') as inputfile:
scanned_files = inputfile.readlines()
scanned_files_list = [x.strip() for x in scanned_files]
scanned_files_list = list(map(lambda x: (x.split(",")[0], x.split(",")[1]), scanned_files_list))
scanned_files_list = sorted(list(set(scanned_files_list)))
for index, (scenario_name, file_name) in enumerate(scanned_files_list):
print("Scenario name: " + scenario_name)
print("File name : " + file_name)
print("Number: " + str(index + 1) + "/" + str(len(scanned_files_list)))
print("Create pcap file")
path_to_csv_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_summary.csv"
path_to_pcap_file = folder_to_filtered_files + "/" + scenario_name + "/" + file_name + "/" + file_name + "_" + old_file_addition + ".pcap"
parts_list = []
for part in range(parts):
parts_list.append([])
file_packet_dic = {}
connections_used = []
with PcapReader(path_to_pcap_file) as packets:
for packet_count, packet in enumerate(packets):
packet_string = packet.show(dump=True)
packet_for_print = packet_string
packet_string = packet_string.split("\n")
packet_string = [x.replace(" ", "") for x in packet_string]
current_layer = "none"
packet_dic = {}
for line in packet_string:
if len(line) > 0:
if line[0] == '#':
new_layer = line.split('[')[1].split(']')[0]
current_layer = new_layer
packet_dic[current_layer] = {}
elif (line[0] != '\\') & (line[0] != '|'):
key = line.split("=")[0]
value = line.split("=")[1]
packet_dic[current_layer][key] = value
src_ip = packet_dic["IP"]["src"]
dst_ip = packet_dic["IP"]["dst"]
ip_protocol = packet_dic["IP"]["proto"].upper()
if ip_protocol == "UDP" and "UDP" in packet_dic:
src_port = packet_dic["UDP"]["sport"]
dst_port = packet_dic["UDP"]["dport"]
elif ip_protocol == "TCP" and "TCP" in packet_dic:
src_port = packet_dic["TCP"]["sport"]
dst_port = packet_dic["TCP"]["dport"]
elif ip_protocol == "ICMP" and "ICMP" in packet_dic:
src_port = 0
dst_port = str(packet_dic["ICMP"]["type"]) + "/" + str(packet_dic["ICMP"]["code"])
else:
src_port = 0
dst_port = 0
if not isinstance(src_port, int):
if not all(char.isdigit() for char in src_port):
try:
src_port = socket.getservbyname(src_port, ip_protocol)
except:
src_port = src_port
if not isinstance(dst_port, int) or ():
if not all(char.isdigit() for char in dst_port):
try:
dst_port = socket.getservbyname(dst_port, ip_protocol)
except:
dst_port = dst_port
if (src_ip, dst_ip, ip_protocol, src_port, dst_port) not in file_packet_dic:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)] = [packet]
else:
file_packet_dic[(src_ip, dst_ip, ip_protocol, src_port, dst_port)].append(packet)
for address, packets_value in file_packet_dic.items():
len_connection = len(packets_value)
if len_connection >= (threshold * parts):
connections_used.append(address)
remainder = len_connection - (threshold * parts)
to_skip_packets = math.floor((remainder / (parts - 1)))
one_move = threshold + to_skip_packets
one_to_last_packet = one_move * (parts - 1)
index = 0
for start_value in range(0, one_to_last_packet, one_move):
packet_slice = packets_value[start_value:(start_value + threshold)]
parts_list[index].append(packet_slice)
index = index + 1
parts_list[index].append(packets_value[-threshold:])
summary_df = | pd.read_csv(path_to_csv_file) | pandas.read_csv |
import os
import torch.nn as nn
import torch
import warnings
import argparse
from Logger import *
import pickle
from Dataset import *
warnings.filterwarnings("ignore")
from Functions import *
from Network import *
import pandas as pd
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, default='0', help='which gpu to use')
parser.add_argument('--path', type=str, default='../', help='path of csv file with DNA sequences and labels')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=24, help='size of each batch during training')
parser.add_argument('--weight_decay', type=float, default=0, help='weight dacay used in optimizer')
parser.add_argument('--ntoken', type=int, default=4, help='number of tokens to represent DNA nucleotides (should always be 4)')
parser.add_argument('--nclass', type=int, default=919, help='number of classes from the linear decoder')
parser.add_argument('--ninp', type=int, default=512, help='ninp for transformer encoder')
parser.add_argument('--nhead', type=int, default=8, help='nhead for transformer encoder')
parser.add_argument('--nhid', type=int, default=2048, help='nhid for transformer encoder')
parser.add_argument('--nlayers', type=int, default=6, help='nlayers for transformer encoder')
parser.add_argument('--save_freq', type=int, default=1, help='saving checkpoints per save_freq epochs')
parser.add_argument('--dropout', type=float, default=.1, help='transformer dropout')
parser.add_argument('--warmup_steps', type=int, default=3200, help='training schedule warmup steps')
parser.add_argument('--lr_scale', type=float, default=1, help='learning rate scale')
parser.add_argument('--nmute', type=int, default=18, help='number of mutations during training')
parser.add_argument('--kmers', type=int, nargs='+', default=[7], help='k-mers to be aggregated')
#parser.add_argument('--kmer_aggregation', type=bool, default=True, help='k-mers to be aggregated')
parser.add_argument('--kmer_aggregation', dest='kmer_aggregation', action='store_true')
parser.add_argument('--no_kmer_aggregation', dest='kmer_aggregation', action='store_false')
parser.set_defaults(kmer_aggregation=True)
parser.add_argument('--nfolds', type=int, default=5, help='number of cross validation folds')
parser.add_argument('--fold', type=int, default=0, help='which fold to train')
parser.add_argument('--val_freq', type=int, default=1, help='which fold to train')
parser.add_argument('--num_workers', type=int, default=1, help='num_workers')
opts = parser.parse_args()
return opts
#def train_fold():
args=get_args()
# DECLARE HOW MANY GPUS YOU WISH TO USE.
# KAGGLE ONLY HAS 1, BUT OFFLINE, YOU CAN USE MORE
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_id #0,1,2,3 for four gpu
if torch.cuda.device_count() > 1:
DATAPARALLEL=True
else:
DATAPARALLEL=False
# print(torch.cuda.device_count())
#
# exit()
# VERSION FOR SAVING MODEL WEIGHTS
VER=26
# IF VARIABLE IS NONE, THEN NOTEBOOK COMPUTES TOKENS
# OTHERWISE NOTEBOOK LOADS TOKENS FROM PATH
LOAD_TOKENS_FROM = '../../input/py-bigbird-v26'
# IF VARIABLE IS NONE, THEN NOTEBOOK TRAINS A NEW MODEL
# OTHERWISE IT LOADS YOUR PREVIOUSLY TRAINED MODEL
#LOAD_MODEL_FROM = '../input/whitespace'
LOAD_MODEL_FROM = None
# IF FOLLOWING IS NONE, THEN NOTEBOOK
# USES INTERNET AND DOWNLOADS HUGGINGFACE
# CONFIG, TOKENIZER, AND MODEL
DOWNLOADED_MODEL_PATH = "../../input/deberta-xlarge"
if DOWNLOADED_MODEL_PATH is None:
DOWNLOADED_MODEL_PATH = 'model'
MODEL_NAME = 'microsoft/deberta-xlarge'
#MODEL_NAME = "google/bigbird-roberta-large"
from torch import cuda
config = {'model_name': MODEL_NAME,
'max_length': 1536,
'train_batch_size':2,
'valid_batch_size':1,
'epochs':7,
'learning_rates': [2.5e-5, 2.5e-5, 2.5e-5, 2.5e-6, 2.5e-6, 2.5e-6, 2.5e-7],
'max_grad_norm':1,
'device': 'cuda' if cuda.is_available() else 'cpu'}
config['learning_rates']=[lr*args.lr_scale for lr in config['learning_rates']]
print('learning_rates:')
print(config['learning_rates'])
#lr_scale
# THIS WILL COMPUTE VAL SCORE DURING COMMIT BUT NOT DURING SUBMIT
COMPUTE_VAL_SCORE = True
if len( os.listdir('../../input/test') )>5:
COMPUTE_VAL_SCORE = False
from transformers import *
if DOWNLOADED_MODEL_PATH == 'model':
os.system('mkdir model')
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, add_prefix_space=True)
tokenizer.save_pretrained('model')
config_model = AutoConfig.from_pretrained(MODEL_NAME)
config_model.num_labels = 15
config_model.save_pretrained('model')
backbone = AutoModelForTokenClassification.from_pretrained(MODEL_NAME,
config=config_model)
backbone.save_pretrained('model')
#load data and libraries
import numpy as np, os
from scipy import stats
import pandas as pd, gc
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForTokenClassification
from torch.utils.data import Dataset, DataLoader
import torch
from sklearn.metrics import accuracy_score
train_df = | pd.read_csv('../../input/train.csv') | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
| assert_series_equal(s, expected) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Race-car Data Creation Class.
This script contains all utilities to create proper dataset.
Revision History:
2020-05-10 (Animesh): Baseline Software.
2020-08-22 (Animesh): Updated Docstring.
Example:
from _data_handler import DataHandler
"""
#___Import Modules:
import os
import random
import pandas as pd
import matplotlib.pyplot as plt
from rc_nn_utility import ParseData
#___Global Variables:
SEED = 717
#__Classes:
class DataHandler:
"""Data Creation Utility Class.
This class contains all methods to complete create datasets such as random
data set, or 5 fold cross validation dataset.
"""
def __init__(self):
"""Constructor.
"""
pass
def merge_all(self, idir, output):
"""File Merger.
This method merges contents from multiple csv files.
Args:
idir (directory path): Directory path containing all csv files.
output (csv file): File containing all contents.
Returns:
(float): Accuracy percentage.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# write merged contents to output file
pd.DataFrame(content, columns =['image']).to_csv(output, index=False)
return None
def list_merge(self, lists):
"""List Merger.
This method merges contents from multiple lists.
Args:
lists (list): List of multiple lists to merge.
Returns:
data (list): Merged list.
"""
# loop over lists and put them all in one list
data = []
for list in lists:
data.extend(list)
return data
def refine_running(self, input, output, speed = 15):
"""Refine Running.
This method removes data with provided motor value from a list.
Args:
input (csv file): File containing contents to refine.
output (csv file): File containing refined contents.
speed (int): Motor value to be removed.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(input)
file = []
for index in range(len(data)):
# parse motor data to varify speed
_,_,mot = parsedata.parse_data(data["image"][index])
# append data if car is runneing
if mot != speed:
file.append(data["image"][index])
# write merged contents to output file
pd.DataFrame(file, columns=["image"]).to_csv(output, index=False)
return None
def histogram(self, ilist, odir):
"""Plot Histogram.
This method plots histogram from servo and motor value parsed from a
list of images.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
servo = []
motor = []
for index in range(len(data)):
# parse servo and motor data
_,ser,mot = parsedata.parse_data(data["image"][index])
servo.append(ser)
motor.append(mot)
# plot histogram of servo data
plt.figure()
plt.hist(servo, bins=11)
plt.title("Servo Data Histogram")
plt.savefig(os.path.join(odir,"Servo Data Histogram.png"))
# plot histogram of motor data
plt.figure()
plt.hist(motor, bins=11)
plt.title("Motor Data Histogram")
plt.savefig(os.path.join(odir,"Motor Data Histogram.png"))
return None
def devide_data(self, ilist, odir):
"""Dataset Devider.
This method devides dataset according to servo value.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
data_10 = []
data_11 = []
data_12 = []
data_13 = []
data_14 = []
data_15 = []
data_16 = []
data_17 = []
data_18 = []
data_19 = []
data_20 = []
for index in range(len(data)):
# parse servo and motor data
_,servo,_ = parsedata.parse_data(data["image"][index])
# devide dataset
if servo == 10:
data_10.append(data["image"][index])
elif servo == 11:
data_11.append(data["image"][index])
elif servo == 12:
data_12.append(data["image"][index])
elif servo == 13:
data_13.append(data["image"][index])
elif servo == 14:
data_14.append(data["image"][index])
elif servo == 15:
data_15.append(data["image"][index])
elif servo == 16:
data_16.append(data["image"][index])
elif servo == 17:
data_17.append(data["image"][index])
elif servo == 18:
data_18.append(data["image"][index])
elif servo == 19:
data_19.append(data["image"][index])
elif servo == 20:
data_20.append(data["image"][index])
# write data
pd.DataFrame(data_10, columns=["image"]).to_csv(os.path.join(odir, \
"servo_10.csv"), index=False)
pd.DataFrame(data_11, columns=["image"]).to_csv(os.path.join(odir, \
"servo_11.csv"), index=False)
pd.DataFrame(data_12, columns=["image"]).to_csv(os.path.join(odir, \
"servo_12.csv"), index=False)
pd.DataFrame(data_13, columns=["image"]).to_csv(os.path.join(odir, \
"servo_13.csv"), index=False)
pd.DataFrame(data_14, columns=["image"]).to_csv(os.path.join(odir, \
"servo_14.csv"), index=False)
pd.DataFrame(data_15, columns=["image"]).to_csv(os.path.join(odir, \
"servo_15.csv"), index=False)
pd.DataFrame(data_16, columns=["image"]).to_csv(os.path.join(odir, \
"servo_16.csv"), index=False)
pd.DataFrame(data_17, columns=["image"]).to_csv(os.path.join(odir, \
"servo_17.csv"), index=False)
pd.DataFrame(data_18, columns=["image"]).to_csv(os.path.join(odir, \
"servo_18.csv"), index=False)
pd.DataFrame(data_19, columns=["image"]).to_csv(os.path.join(odir, \
"servo_19.csv"), index=False)
pd.DataFrame(data_20, columns=["image"]).to_csv(os.path.join(odir, \
"servo_20.csv"), index=False)
return None
def train_test_dev(self, type, idir, odir, ratio=None, total=None):
"""Final Dataset Creator.
This method creates train, test and dev dataset.
Args:
type (string): Determines the type of input dataset
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
ratio (list): List containing ratio of train, test and dev dataset.
total (list): List containing the number of total data to be parsed
from each CSV file.
"""
if type == "random":
self.random(idir, odir, ratio)
elif type == "folded":
self.folded(idir, odir)
elif type == "controlled":
self.controlled(idir, odir, ratio, total)
return None
def random(self, idir, odir, ratio):
"""Randomly Shuffled Dataset Creator.
This method creates a randomly shuffled train, test and dev dataset.
Args:
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
ratio (list): List containing ratio of train, test and dev dataset.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# randomly shuffle dataset
random.shuffle(content)
# devide dataset into train, test, dev set according to given ratio
train = content[0:int(ratio[0]*len(content))]
test = content[int(ratio[0]*len(content)):
int((ratio[0]+ratio[1])*len(content))]
dev = content[int((ratio[0]+ratio[1])*len(content)):]
# write data
pd.DataFrame(train, columns=["image"]).to_csv(odir + 'train.csv',
index=False)
pd.DataFrame(test, columns=["image"]).to_csv(odir + 'test.csv',
index=False)
pd.DataFrame(dev, columns=["image"]).to_csv(odir + 'dev.csv',
index=False)
return None
def folded(self, idir, odir):
"""5 Fold Cross-Validation Dataset Creator.
This method creates 5 fold cross validation dataset.
Args:
idir (directory path): Directory containing input CSV files.
odir (directory path): Output directory.
"""
# read all files from provided folder
files = os.listdir(idir)
D10 = []
D11 = []
D20 = []
D21 = []
D30 = []
D31 = []
D40 = []
D41 = []
D50 = []
D51 = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
data = pd.read_csv(idir + ifile)
D10.extend(data['image'][0:int(len(data)/10)])
D11.extend(data['image'][int(len(data)/10):2*int(len(data)/10)])
D20.extend(data['image'][2*int(len(data)/10):3*int(len(data)/10)])
D21.extend(data['image'][3*int(len(data)/10):4*int(len(data)/10)])
D30.extend(data['image'][4*int(len(data)/10):5*int(len(data)/10)])
D31.extend(data['image'][5*int(len(data)/10):6*int(len(data)/10)])
D40.extend(data['image'][6*int(len(data)/10):7*int(len(data)/10)])
D41.extend(data['image'][7*int(len(data)/10):8*int(len(data)/10)])
D50.extend(data['image'][8*int(len(data)/10):9*int(len(data)/10)])
D51.extend(data['image'][9*int(len(data)/10):])
# create 5 folds of train, test and dev dataset
train1 = self.list_merge([D10,D11,D20,D21,D30,D31,D40,D41])
train2 = self.list_merge([D20,D21,D30,D31,D40,D41,D50,D51])
train3 = self.list_merge([D10,D11,D30,D31,D40,D41,D50,D51])
train4 = self.list_merge([D10,D11,D20,D21,D40,D41,D50,D51])
train5 = self.list_merge([D10,D11,D20,D21,D30,D31,D50,D51])
test1 = D50
test2 = D10
test3 = D20
test4 = D30
test5 = D40
dev1 = D51
dev2 = D11
dev3 = D21
dev4 = D31
dev5 = D41
# create required directories
if not os.path.exists(odir + 'fold1/'):
os.mkdir(odir + 'fold1/')
if not os.path.exists(odir + 'fold2/'):
os.mkdir(odir + 'fold2/')
if not os.path.exists(odir + 'fold3/'):
os.mkdir(odir + 'fold3/')
if not os.path.exists(odir + 'fold4/'):
os.mkdir(odir + 'fold4/')
if not os.path.exists(odir + 'fold5/'):
os.mkdir(odir + 'fold5/')
# write data
pd.DataFrame(train1,columns=["image"]).to_csv(odir + 'fold1/train.csv',
index=False)
pd.DataFrame(train2,columns=["image"]).to_csv(odir + 'fold2/train.csv',
index=False)
pd.DataFrame(train3,columns=["image"]).to_csv(odir + 'fold3/train.csv',
index=False)
pd.DataFrame(train4,columns=["image"]).to_csv(odir + 'fold4/train.csv',
index=False)
pd.DataFrame(train5,columns=["image"]).to_csv(odir + 'fold5/train.csv',
index=False)
pd.DataFrame(test1,columns=["image"]).to_csv(odir + 'fold1/test.csv',
index=False)
pd.DataFrame(test2,columns=["image"]).to_csv(odir + 'fold2/test.csv',
index=False)
pd.DataFrame(test3,columns=["image"]).to_csv(odir + 'fold3/test.csv',
index=False)
pd.DataFrame(test4,columns=["image"]).to_csv(odir + 'fold4/test.csv',
index=False)
pd.DataFrame(test5,columns=["image"]).to_csv(odir + 'fold5/test.csv',
index=False)
pd.DataFrame(dev1,columns=["image"]).to_csv(odir + 'fold1/dev.csv',
index=False)
| pd.DataFrame(dev2,columns=["image"]) | pandas.DataFrame |
# created by <NAME> <EMAIL>
import os
import logging
# import pkg_resources
from datetime import datetime
import attr
import pandas as pd
import numpy as np
from BuildingControlsSimulator.DataClients.DataStates import (
UNITS,
CHANNELS,
STATES,
)
from BuildingControlsSimulator.DataClients.DataSpec import (
Internal,
convert_spec,
)
from BuildingControlsSimulator.DataClients.DateTimeChannel import (
DateTimeChannel,
)
from BuildingControlsSimulator.DataClients.ThermostatChannel import (
ThermostatChannel,
)
from BuildingControlsSimulator.DataClients.EquipmentChannel import (
EquipmentChannel,
)
from BuildingControlsSimulator.DataClients.SensorsChannel import SensorsChannel
from BuildingControlsSimulator.DataClients.WeatherChannel import WeatherChannel
from BuildingControlsSimulator.DataClients.DataSource import DataSource
from BuildingControlsSimulator.DataClients.DataDestination import (
DataDestination,
)
logger = logging.getLogger(__name__)
@attr.s(kw_only=True)
class DataClient:
# data channels
thermostat = attr.ib(default=None)
equipment = attr.ib(default=None)
sensors = attr.ib(default=None)
weather = attr.ib(default=None)
datetime = attr.ib(default=None)
full_data_periods = attr.ib(factory=list)
# input variables
source = attr.ib(validator=attr.validators.instance_of(DataSource))
destination = attr.ib(validator=attr.validators.instance_of(DataDestination))
nrel_dev_api_key = attr.ib(default=None)
nrel_dev_email = attr.ib(default=None)
archive_tmy3_dir = attr.ib(default=os.environ.get("ARCHIVE_TMY3_DIR"))
archive_tmy3_meta = attr.ib(default=None)
archive_tmy3_data_dir = attr.ib(default=os.environ.get("ARCHIVE_TMY3_DATA_DIR"))
ep_tmy3_cache_dir = attr.ib(default=os.environ.get("EP_TMY3_CACHE_DIR"))
simulation_epw_dir = attr.ib(default=os.environ.get("SIMULATION_EPW_DIR"))
weather_dir = attr.ib(default=os.environ.get("WEATHER_DIR"))
# state variables
sim_config = attr.ib(default=None)
start_utc = attr.ib(default=None)
end_utc = attr.ib(default=None)
eplus_fill_to_day_seconds = attr.ib(default=None)
eplus_warmup_seconds = attr.ib(default=None)
internal_spec = attr.ib(factory=Internal)
def __attrs_post_init__(self):
# first, post init class specification
self.make_data_directories()
def make_data_directories(self):
os.makedirs(self.weather_dir, exist_ok=True)
os.makedirs(self.archive_tmy3_data_dir, exist_ok=True)
os.makedirs(self.ep_tmy3_cache_dir, exist_ok=True)
os.makedirs(self.simulation_epw_dir, exist_ok=True)
if self.source.local_cache:
os.makedirs(
os.path.join(
self.source.local_cache,
self.source.operator_name,
self.source.source_name,
),
exist_ok=True,
)
if self.destination.local_cache:
os.makedirs(
os.path.join(
self.destination.local_cache,
self.destination.operator_name,
),
exist_ok=True,
)
def get_data(self):
# check for invalid start/end combination
if self.sim_config["end_utc"] <= self.sim_config["start_utc"]:
raise ValueError("sim_config contains invalid start_utc >= end_utc.")
# load from cache or download data from source
_data = self.source.get_data(self.sim_config)
if _data.empty:
logger.error(
"EMPTY DATA SOURCE: \nsim_config={} \nsource={}\n".format(
self.sim_config, self.source
)
)
_data = self.internal_spec.get_empty_df()
# remove any fully duplicated records
_data = _data.drop_duplicates(ignore_index=True)
# remove multiple records for same datetime
# there may also be multiple entries for same exact datetime in ISM
# in this case keep the record that has the most combined runtime
# because in observed cases of this the extra record has 0 runtime.
_runtime_sum_column = "sum_runtime"
_data[_runtime_sum_column] = _data[
set(self.internal_spec.equipment.spec.keys()) & set(_data.columns)
].sum(axis=1)
# last duplicate datetime value will have maximum sum_runtime
_data = _data.sort_values(
[self.internal_spec.datetime_column, _runtime_sum_column],
ascending=True,
)
_data = _data.drop_duplicates(
subset=[STATES.DATE_TIME], keep="last", ignore_index=True
)
_data = _data.drop(columns=[_runtime_sum_column])
# truncate the data to desired simulation start and end time
_data = _data[
(_data[self.internal_spec.datetime_column] >= self.sim_config["start_utc"])
& (_data[self.internal_spec.datetime_column] <= self.sim_config["end_utc"])
].reset_index(drop=True)
# remove unused categories from categorical columns after date range
# for simulation is selected
for _cat_col in [
_col
for _col in _data.columns
if isinstance(_data[_col].dtype, pd.api.types.CategoricalDtype)
]:
_data[_cat_col].cat = _data[_cat_col].cat.remove_unused_categories()
# run settings change point detection before filling missing data
# the fill data would create false positive change points
# the change points can also be used to correctly fill the schedule
# and comfort preferences
(
_change_points_schedule,
_change_points_comfort_prefs,
_change_points_hvac_mode,
) = ThermostatChannel.get_settings_change_points(
_data, self.internal_spec.data_period_seconds
)
_expected_period = f"{self.internal_spec.data_period_seconds}S"
# ffill first 15 minutes of missing data periods
_data = DataClient.fill_missing_data(
full_data=_data,
expected_period=_expected_period,
data_spec=self.internal_spec,
)
# compute full_data_periods with only first 15 minutes ffilled
self.full_data_periods = DataClient.get_full_data_periods(
full_data=_data,
data_spec=self.internal_spec,
expected_period=_expected_period,
min_sim_period=self.sim_config["min_sim_period"],
)
# need time zone before init of DatetimeChannel
internal_timezone = DateTimeChannel.get_timezone(
self.sim_config["latitude"], self.sim_config["longitude"]
)
# there will be filled data even if there are no full_data_periods
# the fill data is present to run continuous simulations smoothly
# in the presence of potentially many missing data periods
if self.full_data_periods:
# the simulation period must be full days starting at 0 hour to use
# SimulationControl: Run Simulation for Weather File Run Periods
_start_utc, _end_utc = self.get_simulation_period(
expected_period=_expected_period,
internal_timezone=internal_timezone,
)
# add records for warmup period
_data = DataClient.add_fill_records(
df=_data,
data_spec=self.internal_spec,
start_utc=_start_utc,
end_utc=_end_utc,
expected_period=_expected_period,
)
# drop records before and after full simulation time
# end is less than
_data = _data[
(_data[self.internal_spec.datetime_column] >= _start_utc)
& (_data[self.internal_spec.datetime_column] <= _end_utc)
].reset_index(drop=True)
# bfill to interpolate missing data
# first and last records must be full because we used full data periods
# need to add a NA_code to stop fillna from clobbering columns
# where NA means something
na_code_name = "NA_code"
_data[STATES.CALENDAR_EVENT].cat.add_categories(
new_categories=na_code_name, inplace=True
)
_data[STATES.CALENDAR_EVENT] = _data[STATES.CALENDAR_EVENT].fillna(
na_code_name
)
# bfill then ffill to handle where no data after null
_data = _data.fillna(method="bfill", limit=None)
_data = _data.fillna(method="ffill", limit=None)
_data = DataClient.resample_to_step_size(
df=_data,
step_size_seconds=self.sim_config["sim_step_size_seconds"],
data_spec=self.internal_spec,
)
# we can replace na_code_name now that filling is complete
_data.loc[
_data[STATES.CALENDAR_EVENT] == na_code_name,
[STATES.CALENDAR_EVENT],
] = pd.NA
# finally convert dtypes to final types now that nulls in
# non-nullable columns have been properly filled or removed
_data = convert_spec(
_data,
src_spec=self.internal_spec,
dest_spec=self.internal_spec,
src_nullable=True,
dest_nullable=False
)
else:
raise ValueError(
f"ID={self.sim_config['identifier']} has no full_data_periods "
+ "for requested duration: "
+ f"start_utc={self.sim_config['start_utc']}, "
+ f"end_utc={self.sim_config['end_utc']} "
+ f"with min_sim_period={self.sim_config['min_sim_period']}"
)
self.datetime = DateTimeChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.datetime.spec
)
],
spec=self.internal_spec.datetime,
latitude=self.sim_config["latitude"],
longitude=self.sim_config["longitude"],
internal_timezone=internal_timezone,
)
# finally create the data channel objs for usage during simulation
self.thermostat = ThermostatChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.thermostat.spec
)
],
spec=self.internal_spec.thermostat,
change_points_schedule=_change_points_schedule,
change_points_comfort_prefs=_change_points_comfort_prefs,
change_points_hvac_mode=_change_points_hvac_mode,
)
self.equipment = EquipmentChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.equipment.spec
)
],
spec=self.internal_spec.equipment,
)
self.sensors = SensorsChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.sensors.spec
)
],
spec=self.internal_spec.sensors,
)
self.sensors.drop_unused_room_sensors()
self.weather = WeatherChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.weather.spec
)
],
spec=self.internal_spec.weather,
archive_tmy3_dir=self.archive_tmy3_dir,
archive_tmy3_data_dir=self.archive_tmy3_data_dir,
ep_tmy3_cache_dir=self.ep_tmy3_cache_dir,
simulation_epw_dir=self.simulation_epw_dir,
)
def get_simulation_period(self, expected_period, internal_timezone):
# set start and end times from full_data_periods and simulation config
# take limiting period as start_utc and end_utc
if not self.full_data_periods:
self.start_utc = None
self.end_utc = None
return self.start_utc, self.end_utc
if self.sim_config["start_utc"] >= self.full_data_periods[0][0]:
self.start_utc = self.sim_config["start_utc"]
else:
logger.info(
f"config start_utc={self.sim_config['start_utc']} is before "
+ f"first full data period={self.full_data_periods[0][0]}. "
+ "Simulation start_utc set to first full data period."
)
self.start_utc = self.full_data_periods[0][0]
if self.sim_config["end_utc"] <= self.full_data_periods[-1][-1]:
self.end_utc = self.sim_config["end_utc"]
else:
logger.info(
f"config end_utc={self.sim_config['end_utc']} is after "
+ f"last full data period={self.full_data_periods[-1][-1]}. "
+ "Simulation end_utc set to last full data period."
)
self.end_utc = self.full_data_periods[-1][-1]
if self.end_utc < self.start_utc:
raise ValueError(
f"end_utc={self.end_utc} before start_utc={self.start_utc}.\n"
+ f"Set sim_config start_utc and end_utc within "
+ f"full_data_period: {self.full_data_periods[0][0]} to "
+ f"{self.full_data_periods[-1][-1]}"
)
# fill additional day before simulation and up end of day end of simulation
(self.start_utc, self.end_utc,) = DataClient.eplus_day_fill_simulation_time(
start_utc=self.start_utc,
end_utc=self.end_utc,
expected_period=expected_period,
internal_timezone=internal_timezone,
)
return self.start_utc, self.end_utc
def store_output(self, output, sim_name, src_spec):
self.destination.put_data(df=output, sim_name=sim_name, src_spec=src_spec)
def store_input(
self,
filepath_or_buffer,
df_input=None,
src_spec=None,
dest_spec=None,
file_extension=None,
):
"""For usage capturing input data for unit tests."""
if not df_input:
df_input = self.get_full_input()
if not src_spec:
src_spec = self.internal_spec
if not dest_spec:
dest_spec = self.destination.data_spec
if not file_extension:
file_extension = self.destination.file_extension
_df = convert_spec(
df=df_input, src_spec=src_spec, dest_spec=dest_spec, copy=True
)
self.destination.write_data_by_extension(
_df,
filepath_or_buffer,
data_spec=dest_spec,
file_extension=file_extension,
)
@staticmethod
def add_fill_records(df, data_spec, start_utc, end_utc, expected_period):
if not (start_utc and end_utc):
return df
rec = pd.Series(pd.NA, index=df.columns)
should_resample = False
if df[(df[data_spec.datetime_column] == start_utc)].empty:
# append record with start_utc time
rec[data_spec.datetime_column] = start_utc
df = df.append(rec, ignore_index=True).sort_values(
data_spec.datetime_column
)
should_resample = True
if df[(df[data_spec.datetime_column] == end_utc)].empty:
# append record with end_utc time
rec[data_spec.datetime_column] = end_utc
df = df.append(rec, ignore_index=True).sort_values(
data_spec.datetime_column
)
should_resample = True
if should_resample:
# frequency rules have different str format
_str_format_dict = {
"M": "T", # covert minutes formats
"S": "S",
}
# replace last char using format conversion dict
resample_freq = (
expected_period[0:-1] + _str_format_dict[expected_period[-1]]
)
# resampling
df = df.set_index(data_spec.datetime_column)
df = df.resample(resample_freq).asfreq()
df = df.reset_index()
# adding a null record breaks categorical dtypes
# convert back to categories
for state in df.columns:
if data_spec.full.spec[state]["dtype"] == "category":
df[state] = df[state].astype("category")
return df
@staticmethod
def eplus_day_fill_simulation_time(
start_utc, end_utc, expected_period, internal_timezone
):
# EPlus requires that total simulation time be divisible by 86400 seconds
# or whole days. EPlus also has some transient behaviour at t_init
# adding time to beginning of simulation input data that will be
# backfilled is more desirable than adding time to end of simulation
# this time will not be included in the full_data_periods and thus
# will not be considered during analysis
# fill extra day before simulation and up to end of day at end of simulation
# the added_timedelta is the difference to wholes days minus one period
# this period can be considered 23:55 to 00:00
# EnergyPlus will be initialized for this extra period but not simulated
# date 10 days into year is used for offset because it wont cross DST or
# year line under any circumstances
tz_offset_seconds = internal_timezone.utcoffset(
datetime(start_utc.year, 1, 10)
).total_seconds()
filled_start_utc = start_utc - pd.Timedelta(
days=1,
hours=start_utc.hour,
minutes=start_utc.minute,
seconds=start_utc.second + tz_offset_seconds,
)
filled_end_utc = end_utc
return filled_start_utc, filled_end_utc
@staticmethod
def get_full_data_periods(
full_data, data_spec, expected_period="300S", min_sim_period="7D"
):
"""Get full data periods. These are the periods for which there is data
on all channels. Preliminary forward filling of the data is used to
fill small periods of missing data where padding values is advantageous
for examplem the majority of missing data periods are less than 15 minutes
(3 message intervals).
The remaining missing data is back filled after the full_data_periods are
computed to allow the simulations to run continously. Back fill is used
because set point changes during the missing data period should be
assumed to be not in tracking mode and in regulation mode after greater
than
"""
if full_data.empty:
return []
# compute time deltas between records
diffs = full_data.dropna(axis="rows", subset=data_spec.full.null_check_columns)[
data_spec.datetime_column
].diff()
# seperate periods by missing data
periods_df = diffs[diffs > pd.to_timedelta(expected_period)].reset_index()
# make df of periods
periods_df["start"] = full_data.loc[
periods_df["index"], data_spec.datetime_column
].reset_index(drop=True)
periods_df["end"] = periods_df["start"] - periods_df[1]
periods_df = periods_df.drop(axis="columns", columns=["index", 1])
# append start and end datetimes from full_data
periods_df.loc[len(periods_df)] = [
pd.NA,
full_data.loc[len(full_data) - 1, data_spec.datetime_column],
]
periods_df["start"] = periods_df["start"].shift(1)
periods_df.loc[0, "start"] = full_data.loc[0, data_spec.datetime_column]
# only include full_data_periods that are geq min_sim_period
# convert all np.arrays to lists for ease of use
_full_data_periods = [
list(rec)
for rec in periods_df[
periods_df["end"] - periods_df["start"] >= pd.Timedelta(min_sim_period)
].to_numpy()
]
return _full_data_periods
@staticmethod
def fill_missing_data(
full_data,
data_spec,
expected_period,
limit=3,
method="ffill",
):
"""Fill periods of missing data within limit using method.
Periods larger than limit will not be partially filled."""
if full_data.empty:
return full_data
# frequency rules have different str format
_str_format_dict = {
"M": "T", # covert minutes formats
"S": "S",
}
# replace last char using format conversion dict
resample_freq = expected_period[0:-1] + _str_format_dict[expected_period[-1]]
# resample to add any timesteps that are fully missing
full_data = full_data.set_index(data_spec.datetime_column)
full_data = full_data.resample(resample_freq).asfreq()
full_data = full_data.reset_index()
# compute timesteps between steps of data
diffs = full_data.dropna(axis="rows", subset=data_spec.full.null_check_columns)[
data_spec.datetime_column
].diff()
fill_start_df = (
(
diffs[
(diffs > pd.to_timedelta(expected_period))
& (diffs <= pd.to_timedelta(expected_period) * limit)
]
/ | pd.Timedelta(expected_period) | pandas.Timedelta |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 23 22:05:18 2021
@author: rayin
"""
import os, sys
import numpy as np
import pandas as pd
import torch
import warnings
import torchvision.models as models
import matplotlib.pyplot as plt
import shap
import seaborn as sns
from matplotlib import rcParams
from model import draw_roc_curve
from model import patient_variable_influence
from model import draw_roc_train_test_split
from model import randomforest_base
from model import xgboost_base
from model import draw_roc_syn_test
from model import draw_roc_curve
from feature_data_imputation import data_imputation
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn.inspection import permutation_importance
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import matthews_corrcoef
warnings.filterwarnings("ignore")
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
sys.path.append(os.path.abspath("/Users/rayin/Google Drive/Harvard/5_data/UDN/work/code/"))
#import all the gene available data
case_gene_update = pd.read_csv('data/processed/variant_clean.csv', index_col=0)
case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].replace('pathogenic', 1, inplace=True)
case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].replace('less_pathogenic', 0, inplace=True)
label = case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].reset_index()
label = label[label.columns[1]]
feature = pd.read_csv('data/feature/feature.csv', index_col=0)
feature_imputation = data_imputation(feature, 'MICE')
patient_level_variable_onehot = pd.read_csv('data/feature/patient_level_variable_onehot.csv', index_col=0)
syn_test_sample = pd.read_csv('data/synthetic/syn_test.csv', index_col=0)
##################################################################################################################################
# #feature correlation heatmap
# plt.figure(figsize=(16, 16))
# # Store heatmap object in a variable to easily access it when you want to include more features (such as title).
# # Set the range of values to be displayed on the colormap from -1 to 1, and set the annotation to True to display the correlation values on the heatmap.
# heatmap = sns.heatmap(feature.corr(), vmin=-1, vmax=1, annot=False)
# # Give a title to the heatmap. Pad defines the distance of the title from the top of the heatmap.
# heatmap.set_xticklabels(
# heatmap.get_xticklabels(),
# rotation=45,
# horizontalalignment='right')
# heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12)
# plt.savefig("result/figure/heatmap.png", bbox_inches='tight', dpi=600)
##################################################################################################################################
#interpretation: shap values
np.random.seed(0)
train_x, test_x, train_y, test_y = train_test_split(feature_imputation, label, test_size = 0.2)
model = ensemble.RandomForestClassifier(n_estimators=340, max_depth=5, max_features=7, min_samples_split=30,
min_samples_leaf=10, random_state=42)
model.fit(train_x, train_y)
shap_values = shap.TreeExplainer(model).shap_values(test_x)
shap.plots.force(shap_values[1])
#shap.summary_plot(shap_values, train_x, plot_type="bar")
f = plt.figure(figsize=(16, 8))
shap.summary_plot(shap_values[1], test_x)
#f.savefig("result/figure/shap.eps", bbox_inches='tight', dpi=600)
#variable importance bar plot (shape and random forest)
shap.summary_plot(shap_values[0], test_x, plot_type="bar", max_display=20)
fig = plt.figure(figsize=(12, 16))
feature_names = feature.columns.values
# sorted_idx = model.feature_importances_.argsort()
# plt.barh(feature_names[sorted_idx], model.feature_importances_[sorted_idx], color='g')
# fig.xlabel("Random Forest Feature Importance")
#random forest model
result = permutation_importance(
model, test_x, test_y, random_state=42, n_jobs=1)
result_mean = result.importances_mean
sorted_idx = np.argsort(result_mean, axis=-1)
plt.barh(feature_names[sorted_idx[18:38]], result_mean[sorted_idx[18:38]], color='g')
plt.xlabel("Random Forest Feature Importance", size=20)
#fig.set_xlabel(fontsize=15)
plt.tick_params(labelsize=20)
plt.xlim(0, 0.04)
#shap interaction values
f = plt.figure(figsize=(20, 20))
shap_interaction_values = shap.TreeExplainer(model).shap_interaction_values(test_x)
shap.summary_plot(shap_interaction_values[0], test_x, max_display=20)
f.savefig("result/figure/shap_interaction.png", bbox_inches='tight', dpi=100)
#independence
shap.dependence_plot("number of phenotypes", shap_values[1], test_x, interaction_index="number of total diseases")
f = plt.figure()
shap.summary_plot(shap_values, test_x)
f.savefig("result/figure/summary_plot1.png", bbox_inches='tight', dpi=600)
##################################################################################################################################
#measure the contribution adding demographic information of patient for pathogenicity prediction
raw_auc = pd.read_csv('result/auc_difference.csv', index_col=0)
auc_index = raw_auc.index.values
auc_columns = raw_auc.columns.values
age_symptom_onset = []
current_age = []
ethnicity = []
gender = []
race = []
for i in range(0, raw_auc.shape[1]):
age_symptom_onset.append(round(raw_auc[auc_columns[i]].iloc[1] - raw_auc[auc_columns[i]].iloc[0], 4))
current_age.append(round(raw_auc[auc_columns[i]].iloc[2] - raw_auc[auc_columns[i]].iloc[0], 4))
ethnicity.append(round(raw_auc[auc_columns[i]].iloc[3] - raw_auc[auc_columns[i]].iloc[0], 4))
gender.append(round(raw_auc[auc_columns[i]].iloc[4] - raw_auc[auc_columns[i]].iloc[0], 4))
race.append(round(raw_auc[auc_columns[i]].iloc[5] - raw_auc[auc_columns[i]].iloc[0], 4))
attri_diff = pd.concat([pd.Series(age_symptom_onset), pd.Series(current_age), pd.Series(ethnicity),
pd.Series(gender), | pd.Series(race) | pandas.Series |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from plateau.utils.pandas import (
aggregate_to_lists,
concat_dataframes,
drop_sorted_duplicates_keep_last,
is_dataframe_sorted,
mask_sorted_duplicates_keep_last,
merge_dataframes_robust,
sort_dataframe,
)
class TestConcatDataframes:
@pytest.fixture(params=[True, False])
def dummy_default(self, request):
if request.param:
return pd.DataFrame(data={"a": [-2, -3], "b": 1.0}, columns=["a", "b"])
else:
return None
@pytest.fixture(params=[True, False])
def maybe_iter(self, request):
if request.param:
return iter
else:
return list
def test_many(self, dummy_default, maybe_iter):
dfs = [
pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(
data={"a": [2, 3], "b": 2.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(data={"a": [4, 5], "b": 3.0}, columns=["a", "b"]),
]
expected = pd.DataFrame(
{"a": [0, 1, 2, 3, 4, 5], "b": [1.0, 1.0, 2.0, 2.0, 3.0, 3.0]},
columns=["a", "b"],
)
actual = concat_dataframes(maybe_iter(dfs), dummy_default)
pdt.assert_frame_equal(actual, expected)
def test_single(self, dummy_default, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([df.copy()]), dummy_default)
pdt.assert_frame_equal(actual, df)
def test_default(self, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([]), df)
pdt.assert_frame_equal(actual, df)
def test_fail_no_default(self, maybe_iter):
with pytest.raises(ValueError) as exc:
concat_dataframes(maybe_iter([]), None)
assert str(exc.value) == "Cannot concatenate 0 dataframes."
@pytest.mark.parametrize(
"dfs",
[
[pd.DataFrame({"a": [0, 1]})],
[pd.DataFrame({"a": [0, 1]}), pd.DataFrame({"a": [2, 3]})],
],
)
def test_whipe_list(self, dfs):
concat_dataframes(dfs)
assert dfs == []
@pytest.mark.parametrize(
"dfs,expected",
[
(
# dfs
[pd.DataFrame(index=range(3))],
# expected
pd.DataFrame(index=range(3)),
),
(
# dfs
[pd.DataFrame(index=range(3)), pd.DataFrame(index=range(2))],
# expected
pd.DataFrame(index=range(5)),
),
],
)
def test_no_columns(self, dfs, expected):
actual = concat_dataframes(dfs)
pdt.assert_frame_equal(actual, expected)
def test_fail_different_colsets(self, maybe_iter):
dfs = [pd.DataFrame({"a": [1]}), pd.DataFrame({"a": [1], "b": [2]})]
with pytest.raises(
ValueError, match="Not all DataFrames have the same set of columns!"
):
concat_dataframes(maybe_iter(dfs))
@pytest.mark.parametrize(
"df,columns",
[
(
# df
pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [3, 2, 1], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [3, 2, 1, 3, 2, 1], "b": [2, 2, 2, 1, 1, 1]}),
# columns
["a", "b"],
),
(
# df
pd.DataFrame({"a": [3, 2, 1], "b": [1, 2, 3]}, index=[1000, 2000, 3000]),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [3.0, 2.0, 1.0], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": ["3", "2", "1"], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [True, False], "b": [1, 2]}),
# columns
["a"],
),
(
# df
pd.DataFrame(
{
"a": [
pd.Timestamp("2018-01-03"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-01"),
],
"b": [1, 2, 3],
}
),
# columns
["a"],
),
(
# df
pd.DataFrame(
{"a": pd.Series(["3", "2", "1"]).astype("category"), "b": [1, 2, 3]}
),
# columns
["a"],
),
],
)
def test_sort_dataframe(df, columns):
expected = df.sort_values(columns).reset_index(drop=True)
actual = sort_dataframe(df, columns)
pdt.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"df,columns,expected_mask",
[
(
# df
pd.DataFrame({"a": [1, 2, 3]}),
# columns
["a"],
# expected_mask
np.array([False, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3]}),
# columns
["a"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3], "b": [1, 2, 3]}),
# columns
["a"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3], "b": [1, 2, 3]}),
# columns
["a", "b"],
# expected_mask
np.array([False, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3], "b": [1, 1, 3]}),
# columns
["a", "b"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1]}),
# columns
["a"],
# expected_mask
np.array([False]),
),
(
# df
pd.DataFrame({"a": []}),
# columns
["a"],
# expected_mask
np.array([], dtype=bool),
),
(
# df
pd.DataFrame(
{
"a": [1, 1, 3],
"b": [1.0, 1.0, 3.0],
"c": ["a", "a", "b"],
"d": [True, True, False],
"e": [
| pd.Timestamp("2018") | pandas.Timestamp |
import ccxt
import numpy as np
import pandas as pd
import time
from func_get import get_json, get_currency, get_bid_price, get_ask_price, get_last_price, get_base_currency_free, get_quote_currency_free, get_base_currency_value, get_quote_currency_value, get_order_fee, get_greed_index, get_available_cash_flow
from func_cal import round_amount, cal_unrealised, cal_available_budget, cal_end_balance
from func_update import update_json, append_order, remove_order, append_error_log, append_cash_flow_df, update_last_loop_price, update_transfer
from func_noti import noti_success_order, noti_warning, print_current_balance, print_hold_assets, print_pending_order
def cal_buy_price_list(n_buy_orders, bid_price, open_orders_df_path, config_params):
# Update open_orders_df before cal price.
open_orders_df = pd.read_csv(open_orders_df_path)
open_buy_orders_df = open_orders_df[open_orders_df['side'] == 'buy']
open_sell_orders_df = open_orders_df[open_orders_df['side'] == 'sell']
min_open_sell_price = min(open_sell_orders_df['price'], default=np.inf)
if len(open_buy_orders_df) > 0:
buy_price = min(open_buy_orders_df['price']) - config_params['grid']
else:
if len(open_sell_orders_df) == 0:
buy_price = bid_price - (config_params['grid'] * config_params['start_safety'])
else:
# Use (grid * 2) to prevent dupplicate order.
buy_price = min(min_open_sell_price - (config_params['grid'] * 2), bid_price)
buy_price_list = []
for _ in range(n_buy_orders):
buy_price_list.append(buy_price)
buy_price -= config_params['grid']
return buy_price_list
def cal_sell_price(order, ask_price, config_params):
sell_price = max(order['price'] + config_params['grid'], ask_price)
return sell_price
def open_buy_orders_grid(exchange, bot_name, config_params, transfer_path, open_orders_df_path, transactions_df_path, error_log_df_path, cash_flow_df_path):
base_currency, quote_currency = get_currency(config_params['symbol'])
bid_price = get_bid_price(exchange, config_params['symbol'])
print(f"Bid price: {bid_price} {quote_currency}")
open_orders_df = pd.read_csv(open_orders_df_path)
open_buy_orders_df = open_orders_df[open_orders_df['side'] == 'buy']
open_sell_orders_df = open_orders_df[open_orders_df['side'] == 'sell']
max_open_buy_price = max(open_buy_orders_df['price'], default=0)
min_open_sell_price = min(open_sell_orders_df['price'], default=np.inf)
cash_flow_df_path = cash_flow_df_path.format(bot_name)
cash_flow_df = pd.read_csv(cash_flow_df_path)
if min(bid_price, min_open_sell_price - config_params['grid']) - max_open_buy_price > config_params['grid']:
cancel_open_buy_orders_grid(exchange, config_params, open_orders_df_path, transactions_df_path, error_log_df_path)
n_open_buy_orders = 0
else:
n_open_buy_orders = len(open_buy_orders_df)
n_buy_orders = max(config_params['circuit_limit'] - n_open_buy_orders, 0)
print(f"Open {n_buy_orders} buy orders")
transfer = get_json(transfer_path)
available_cash_flow = get_available_cash_flow(transfer, cash_flow_df)
quote_currency_free = get_quote_currency_free(exchange, quote_currency)
available_budget = cal_available_budget(quote_currency_free, available_cash_flow, transfer)
buy_price_list = cal_buy_price_list(n_buy_orders, bid_price, open_orders_df_path, config_params)
for price in buy_price_list:
amount = config_params['value'] / price
amount = round_amount(amount, exchange, config_params['symbol'], type='down')
if available_budget >= config_params['value']:
buy_order = exchange.create_order(config_params['symbol'], 'limit', 'buy', amount, price, params={'postOnly':True})
append_order(buy_order, 'amount', open_orders_df_path)
print(f"Open buy {amount} {base_currency} at {price} {quote_currency}")
quote_currency_free = get_quote_currency_free(exchange, quote_currency)
available_budget = cal_available_budget(quote_currency_free, available_cash_flow, transfer)
else:
print(f"Error: Cannot buy at price {price} {quote_currency} due to insufficient fund!!!")
break
def open_sell_orders_grid(buy_order, exchange, config_params, open_orders_df_path, error_log_df_path):
base_currency, quote_currency = get_currency(config_params['symbol'])
ask_price = get_ask_price(exchange, config_params['symbol'])
sell_price = cal_sell_price(buy_order, ask_price, config_params)
try:
sell_amount = buy_order['filled']
sell_order = exchange.create_order(config_params['symbol'], 'limit', 'sell', sell_amount, sell_price)
append_order(sell_order, 'amount', open_orders_df_path)
except (ccxt.InvalidOrder, ccxt.InsufficientFunds):
# InvalidOrder: Filled with small amount before force closed.
# InvalidOrder: The order is closed by system (could caused by post_only param for buy orders).
# InvalidOrder: Exchange fail to update actual filled amount.
# InsufficientFunds: Not available amount to sell (could caused by decimal).
free_amount = get_base_currency_free(exchange, base_currency)
sell_amount = round_amount(free_amount, exchange, config_params['symbol'], type='down')
if sell_amount > 0:
# Free amount more than minimum order, sell all.
sell_order = exchange.create_order(config_params['symbol'], 'market', 'sell', sell_amount)
else:
sell_order = None
append_error_log('CannotOpenSell', error_log_df_path)
print(f"Open sell {sell_amount} {base_currency} at {sell_price} {quote_currency}")
def clear_orders_grid(side, exchange, bot_name, config_params, open_orders_df_path, transactions_df_path, error_log_df_path):
open_orders_df = pd.read_csv(open_orders_df_path)
open_orders_list = open_orders_df[open_orders_df['side'] == side]['order_id'].to_list()
if side == 'sell':
# Buy orders: FIFO.
# Sell orders: LIFO.
open_orders_list.reverse()
for order_id in open_orders_list:
order = exchange.fetch_order(order_id, config_params['symbol'])
if order['status'] == 'closed':
noti_success_order(order, bot_name, config_params['symbol'])
if side == 'buy':
open_sell_orders_grid(order, exchange, config_params, open_orders_df_path, error_log_df_path)
remove_order(order_id, open_orders_df_path)
append_order(order, 'filled', transactions_df_path)
elif order['status'] == 'canceled':
# Canceld by param PostOnly.
remove_order(order_id, open_orders_df_path)
def cancel_open_buy_orders_grid(exchange, config_params, open_orders_df_path, transactions_df_path, error_log_df_path):
open_orders_df = pd.read_csv(open_orders_df_path)
open_buy_orders_df = open_orders_df[open_orders_df['side'] == 'buy']
open_buy_orders_list = open_buy_orders_df['order_id'].to_list()
if len(open_buy_orders_list) > 0:
for order_id in open_buy_orders_list:
order = exchange.fetch_order(order_id, config_params['symbol'])
try:
exchange.cancel_order(order_id, config_params['symbol'])
print(f"Cancel order {order_id}")
if order['filled'] > 0:
append_order(order, 'filled', transactions_df_path)
open_sell_orders_grid(order, exchange, config_params, open_orders_df_path, error_log_df_path)
remove_order(order_id, open_orders_df_path)
except ccxt.OrderNotFound:
# No order in the system (could casued by the order is queued), skip for the next loop.
append_error_log('OrderNotFound', error_log_df_path)
print(f"Error: Cannot cancel order {order_id} due to unavailable order!!!")
except ccxt.InvalidOrder:
# The order is closed by system (could caused by post_only param for buy orders).
append_error_log(f'InvalidOrder', error_log_df_path)
remove_order(order_id, open_orders_df_path)
def check_circuit_breaker(exchange, bot_name, config_system, config_params, last_loop_path, open_orders_df_path, transactions_df_path, error_log_df_path):
cont_flag = 1
_, quote_currency = get_currency(config_params['symbol'])
last_loop = get_json(last_loop_path)
transactions_df = pd.read_csv(transactions_df_path)
update_last_loop_price(exchange, config_params['symbol'], last_loop_path)
if len(transactions_df) >= config_params['circuit_limit']:
side_list = transactions_df['side'][-config_params['circuit_limit']:].unique()
last_price = get_last_price(exchange, config_params['symbol'])
if (len(side_list) == 1) & (side_list[0] == 'buy') & (last_price <= last_loop['price']):
cancel_open_buy_orders_grid(exchange, config_params, open_orders_df_path, transactions_df_path, error_log_df_path)
noti_warning(f"Circuit breaker at {last_price} {quote_currency}", bot_name)
time.sleep(config_system['idle_rest'])
return cont_flag
def check_cut_loss(exchange, bot_name, config_system, config_params, config_params_path, last_loop_path, transfer_path, open_orders_df_path, error_log_df_path, cash_flow_df_path):
cont_flag = 1
_, quote_currency = get_currency(config_params['symbol'])
quote_currency_free = get_quote_currency_free(exchange, quote_currency)
open_orders_df = pd.read_csv(open_orders_df_path)
cash_flow_df_path = cash_flow_df_path.format(bot_name)
cash_flow_df = pd.read_csv(cash_flow_df_path)
min_sell_price = min(open_orders_df['price'], default=0)
last_price = get_last_price(exchange, config_params['symbol'])
transfer = get_json(transfer_path)
available_cash_flow = get_available_cash_flow(transfer, cash_flow_df)
available_budget = cal_available_budget(quote_currency_free, available_cash_flow, transfer)
# No available budget to buy while the price is down to buying level.
if (available_budget < config_params['value']) & ((min_sell_price - last_price) >= (config_params['grid'] * 2)):
cont_flag = 0
while available_budget < config_params['value']:
cut_loss(exchange, bot_name, config_system, config_params, config_params_path, last_loop_path, open_orders_df_path, error_log_df_path, withdraw_flag=False)
quote_currency_free = get_quote_currency_free(exchange, quote_currency)
available_budget = cal_available_budget(quote_currency_free, available_cash_flow, transfer)
return cont_flag
def update_loss(loss, last_loop_path):
last_loop = get_json(last_loop_path)
total_loss = last_loop['loss']
total_loss -= loss
last_loop['loss'] = total_loss
update_json(last_loop, last_loop_path)
def cut_loss(exchange, bot_name, config_system, config_params, config_params_path, last_loop_path, open_orders_df_path, error_log_df_path, withdraw_flag):
open_orders_df = pd.read_csv(open_orders_df_path)
max_sell_price = max(open_orders_df['price'])
canceled_df = open_orders_df[open_orders_df['price'] == max_sell_price]
_, quote_currency = get_currency(config_params['symbol'])
canceled_id = canceled_df['order_id'].reset_index(drop=True)[0]
buy_amount = canceled_df['amount'].reset_index(drop=True)[0]
buy_price = max_sell_price - config_params['grid']
buy_value = buy_price * buy_amount
try:
exchange.cancel_order(canceled_id, config_params['symbol'])
time.sleep(config_system['idle_stage'])
canceled_order = exchange.fetch_order(canceled_id, config_params['symbol'])
while canceled_order['status'] != 'canceled':
# Cancel orders will be removed from db on the next loop by check_orders_status.
time.sleep(config_system['idle_stage'])
canceled_order = exchange.fetch_order(canceled_id, config_params['symbol'])
remove_order(canceled_id, open_orders_df_path)
sell_order = exchange.create_order(config_params['symbol'], 'market', 'sell', buy_amount)
time.sleep(config_system['idle_stage'])
while sell_order['status'] != 'closed':
time.sleep(config_system['idle_stage'])
sell_order = exchange.fetch_order(sell_order['id'], config_params['symbol'])
fee = get_order_fee(sell_order, exchange, config_params['symbol'])
new_sell_price = sell_order['price']
new_sell_amount = sell_order['amount']
new_sell_value = new_sell_price * new_sell_amount
loss = new_sell_value - buy_value + fee
update_loss(loss, last_loop_path)
noti_warning(f"Cut loss {loss} {quote_currency} at {new_sell_price} {quote_currency}", bot_name)
if withdraw_flag == False:
time.sleep(config_system['idle_rest'])
except ccxt.InvalidOrder:
# Order has already been canceled from last loop but failed to update df.
append_error_log(f'InvalidOrder:LastLoopClose', error_log_df_path)
remove_order(canceled_id, open_orders_df_path)
def reset_loss(last_loop_path):
last_loop = get_json(last_loop_path)
last_loop['loss'] = 0
update_json(last_loop, last_loop_path)
def update_reinvest(new_value, config_params_path):
config_params = get_json(config_params_path)
config_params['value'] = new_value
update_json(config_params, config_params_path)
def update_end_date_grid(prev_date, exchange, bot_name, config_system, config_params, config_params_path, last_loop_path, transfer_path, open_orders_df_path, transactions_df_path, error_log_df_path, cash_flow_df_path):
cash_flow_df_path = cash_flow_df_path.format(bot_name)
cash_flow_df = pd.read_csv(cash_flow_df_path)
open_orders_df = | pd.read_csv(open_orders_df_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 09:32:29 2019
@author: Gary
"""
import pandas as pd
import numpy as np
# =============================================================================
# def add_Elsner_table(df,sources='./sources/',
# outdir='./out/',
# ehfn='elsner_corrected_table.csv'):
# #print('Adding Elsner/Hoelzer table to CAS table')
# ehdf = pd.read_csv(sources+ehfn,quotechar='$')
# # =============================================================================
# # # checking overlap first:
# # ehcas = list(ehdf.eh_CAS.unique())
# # dfcas = list(df.bgCAS.unique())
# # with open(outdir+'elsner_non_overlap.txt','w') as f:
# # f.write('**** bgCAS numbers without an Elsner entry: *****\n')
# # for c in dfcas:
# # if c not in ehcas:
# # f.write(f'{c}\n')
# # f.write('\n\n***** Elsner CAS numbers without a FF entry: *****\n')
# # for c in ehcas:
# # if c not in dfcas:
# # f.write(f'{c}\n')
# #
# # =============================================================================
# mg = pd.merge(df,ehdf,left_on='bgCAS',right_on='eh_CAS',
# how='left',validate='1:1')
# return mg
# =============================================================================
# =============================================================================
# def add_WellExplorer_table(df,sources='./sources/',
# outdir='./out/',
# wefn='well_explorer_corrected.csv'):
# """Add the WellExplorer data table. """
# #print('Adding WellExplorer table to CAS table')
# wedf = pd.read_csv(sources+wefn)
# #print(wedf.head())
# # checking overlap first:
# # =============================================================================
# # wecas = list(wedf.we_CASNumber.unique())
# # dfcas = list(df.bgCAS.unique())
# # with open(outdir+'wellexplorer_non_overlap.txt','w') as f:
# # f.write('**** bgCAS numbers without an WellExplorer entry: *****\n')
# # for c in dfcas:
# # if c not in wecas:
# # f.write(f'{c}\n')
# # f.write('\n\n***** WellExplorer CAS numbers without a FF entry: *****\n')
# # for c in wecas:
# # if c not in dfcas:
# # f.write(f'{c}\n')
# #
# # =============================================================================
# mg = pd.merge(df,wedf,left_on='bgCAS',right_on='we_CASNumber',
# how='left',validate='1:1')
# return mg
# =============================================================================
def add_TEDX_ref(df,sources='./sources/',
tedx_fn = 'TEDX_EDC_trimmed.xls'):
#print('Adding TEDX link to CAS table')
tedxdf = pd.read_excel(sources+tedx_fn)
tedx_cas = tedxdf.CAS_Num.unique().tolist()
df['is_on_TEDX'] = df.bgCAS.isin(tedx_cas)
return df
# =============================================================================
# def add_TSCA_ref(df,sources='./sources/',
# tsca_fn = 'TSCAINV_092019.csv'):
# #print('Adding TSCA to CAS table')
# tscadf = pd.read_csv(sources+tsca_fn)
# tsca_cas = tscadf.CASRN.unique().tolist()
# df['is_on_TSCA'] = df.bgCAS.isin(tsca_cas)
# return df
# =============================================================================
def add_Prop65_ref(df,sources='./sources/',
p65_fn = 'p65list12182020.csv'):
#print('Adding California Prop 65 to CAS table')
p65df = pd.read_csv(sources+p65_fn,encoding='iso-8859-1')
p65_cas = p65df['CAS No.'].unique().tolist()
df['is_on_prop65'] = df.bgCAS.isin(p65_cas)
return df
# =============================================================================
# def add_CWA_primary_ref(df,sources='./sources/',
# cwa_fn = 'sara_sdwa_cwa.csv'):
# # this file is used to provide the CWA priority list
# #print('Adding SDWA/CWA lists to CAS table')
# cwadf = pd.read_csv(sources+cwa_fn)
# cwadf = cwadf[cwadf.Legislation=='CWA'] # keep only CWA
# cwa_cas = cwadf['CASNo'].unique().tolist()
# df['is_on_EPA_priority'] = df.bgCAS.isin(cwa_cas)
# return df
# =============================================================================
def add_diesel_list(df):
print(' -- processing epa diesel list')
cas = ['68334-30-5','68476-34-6','68476-30-2','68476-31-3','8008-20-6']
df['is_on_diesel'] = df.bgCAS.isin(cas)
return df
def add_UVCB_list(df,sources='./sources/'):
print(' -- processing TSCA UVCB list')
uvcb = pd.read_csv(sources+'TSCA_UVCB_202202.csv')
cas = uvcb.CASRN.unique().tolist()
df['is_on_UVCB'] = df.bgCAS.isin(cas)
return df
def add_NPDWR_list(df,sources='./sources/'):
# add listed curated by Angelica
print(' -- processing NPDWR list')
npdwr = pd.read_csv(sources+'NationalPrimaryDrinkingWaterRegulations_machine_readable_FEB2022.csv')
cas = npdwr[npdwr.CASRN.notna()].CASRN.unique().tolist()
df['is_on_NPDWR'] = df.bgCAS.isin(cas)
return df
def add_CompTox_refs(df,sources='./sources/'):
ctfiles = {'CWA': 'Chemical List CWA311HS-2022-03-31.csv',
'DWSHA' : 'Chemical List EPADWS-2022-03-31.csv',
'AQ_CWA': 'Chemical List WATERQUALCRIT-2022-03-31.csv',
'HH_CWA': 'Chemical List NWATRQHHC-2022-03-31.csv',
'IRIS': 'Chemical List IRIS-2022-03-31.csv',
'PFAS_list': 'Chemical List PFASMASTER-2022-04-01.csv',
'volatile_list': 'Chemical List VOLATILOME-2022-04-01.csv'}
reffn = 'CCD-Batch-Search_2022-04-01_12_32_54.csv'
for lst in ctfiles.keys():
print(f' -- processing {lst}')
ctdf = pd.read_csv(sources+ctfiles[lst],low_memory=False,
dtype={'CASRN':'str'})
# ctdf['DTXSID'] = ctdf.DTXSID.str[-13:]
clst= ctdf.CASRN.unique().tolist()
df['is_on_'+lst] = df.bgCAS.isin(clst)
# now add the epa ref numbers and names
refdf = | pd.read_csv(sources+reffn) | pandas.read_csv |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_time(self):
rng = | pd.date_range('1/1/2000', freq='12min', periods=10) | pandas.date_range |
# sentiment_analysis.py
# Data pre-processing for sentiment analysis of Nature articles.
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
def getTopByYear(data):
df = {}
cols = data.head()
for col in cols:
colName = str(col)
df[colName] = data[col][0]
return pd.DataFrame(df)
def getSentiment(data):
newData = data.T
cols = data.columns
newData["Year"] = np.asarray(cols)
newData.columns = ["Average Sentiment", " Positive Articles", "Year"]
newData.set_index("Year")
return newData.melt("Year", var_name="Legend", value_name="Percent")
def public():
# Load data
top10 = pd.read_json("data/top10_nature.json")
sentiment = pd.read_json("data/natureArticlesSentiment.json")
# Build Data
topByYear = getTopByYear(top10)
topByYear.index += 1
sentByYear = getSentiment(sentiment)
# Build slider
top10s = {
"2000": "2000",
"2001": "2001",
"2002": "2002",
"2003": "2003",
"2004": "2004",
"2005": "2005",
"2006": "2006",
"2007": "2007",
"2008": "2008",
"2009": "2009",
"2010": "2010",
"2011": "2011",
"2012": "2012",
"2013": "2013",
"2014": "2014",
"2015": "2015",
"2016": "2016",
"2017": "2017",
"2018": "2018",
"2019": "2019",
"2020": "2020",
}
tops = st.select_slider(
"Select the year for the 'Top 10 Words' found in news articles",
list(top10s.keys()),
)
sentChart = (
alt.Chart(sentByYear)
.mark_line()
.encode(
x=alt.X("Year:N"),
y=alt.Y("Percent:Q", axis=alt.Axis(format="%")),
color=alt.Color(
"Legend:N",
legend=alt.Legend(orient="bottom"),
scale=alt.Scale(scheme="redyellowblue"),
),
)
.properties(title="What is the perception of AI in the media?")
)
line = alt.Chart( | pd.DataFrame({" ": [tops]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import numba
from vtools.functions.filter import cosine_lanczos
def get_smoothed_resampled(df, cutoff_period='2H', resample_period='1T', interpolate_method='pchip'):
"""Resample the dataframe (indexed by time) to the regular period of resample_period using the interpolate method
Furthermore the cosine lanczos filter is used with a cutoff_period to smooth the signal to remove high frequency noise
Args:
df (DataFrame): A single column dataframe indexed by datetime
cutoff_period (str, optional): cutoff period for cosine lanczos filter. Defaults to '2H'.
resample_period (str, optional): Resample to regular period. Defaults to '1T'.
interpolate_method (str, optional): interpolation for resampling. Defaults to 'pchip'.
Returns:
DataFrame: smoothed and resampled dataframe indexed by datetime
"""
dfb = df.resample(resample_period).fillna(method='backfill')
df = df.resample(resample_period).interpolate(method=interpolate_method)
df[dfb.iloc[:, 0].isna()] = np.nan
return cosine_lanczos(df, cutoff_period)
@numba.jit(nopython=True)
def lmax(arr):
'''Local maximum: Returns value only when centered on maximum
'''
idx = np.argmax(arr)
if idx == len(arr)/2:
return arr[idx]
else:
return np.NaN
@numba.jit(nopython=True)
def lmin(arr):
'''Local minimum: Returns value only when centered on minimum
'''
idx = np.argmin(arr)
if idx == len(arr)/2:
return arr[idx]
else:
return np.NaN
def periods_per_window(moving_window_size: str, period_str: str) -> int:
"""Number of period size in moving window
Args:
moving_window_size (str): moving window size as a string e.g 7H for 7 hour
period_str (str): period as str e.g. 1T for 1 min
Returns:
int: number of periods in the moving window rounded to an integer
"""
return int(pd.Timedelta(moving_window_size)/pd.to_timedelta(pd.tseries.frequencies.to_offset(period_str)))
def tidal_highs(df, moving_window_size='7H'):
"""Tidal highs (could be upto two highs in a 25 hr period)
Args:
df (DataFrame): a time series with a regular frequency
moving_window_size (str, optional): moving window size to look for highs within. Defaults to '7H'.
Returns:
DataFrame: an irregular time series with highs at resolution of df.index
"""
period_str = df.index.freqstr
periods = periods_per_window(moving_window_size, period_str)
dfmax = df.rolling(moving_window_size, min_periods=periods).apply(lmax, raw=True)
dfmax = dfmax.shift(periods=-(periods//2-1))
dfmax = dfmax.dropna()
dfmax.columns = ['max']
return dfmax
def tidal_lows(df, moving_window_size='7H'):
"""Tidal lows (could be upto two lows in a 25 hr period)
Args:
df (DataFrame): a time series with a regular frequency
moving_window_size (str, optional): moving window size to look for lows within. Defaults to '7H'.
Returns:
DataFrame: an irregular time series with lows at resolution of df.index
"""
period_str = df.index.freqstr
periods = periods_per_window(moving_window_size, period_str)
dfmin = df.rolling(moving_window_size, min_periods=periods).apply(lmin, raw=True)
dfmin = dfmin.shift(periods=-(periods//2-1))
dfmin = dfmin.dropna()
dfmin.columns = ['min']
return dfmin
def get_tidal_hl(df, cutoff_period='2H', resample_period='1T', interpolate_method='pchip', moving_window_size='7H'):
"""Get Tidal highs and lows
Args:
df (DataFrame): A single column dataframe indexed by datetime
cutoff_period (str, optional): cutoff period for cosine lanczos filter. Defaults to '2H'.
resample_period (str, optional): Resample to regular period. Defaults to '1T'.
interpolate_method (str, optional): interpolation for resampling. Defaults to 'pchip'.
moving_window_size (str, optional): moving window size to look for lows within. Defaults to '7H'.
Returns:
tuple of DataFrame: Tidal high and tidal low time series
"""
dfs = get_smoothed_resampled(df, cutoff_period, resample_period, interpolate_method)
return tidal_highs(dfs), tidal_lows(dfs)
get_tidal_hl_rolling = get_tidal_hl # for older refs. #FIXME
def get_tidal_amplitude(dfh, dfl):
"""Tidal amplitude given tidal highs and lows
Args:
dfh (DataFrame): Tidal highs time series
dfl (DataFrame): Tidal lows time series
Returns:
DataFrame: Amplitude timeseries, at the times of the low following the high being used for amplitude calculation
"""
dfamp = pd.concat([dfh, dfl], axis=1)
dfamp = dfamp[['min']].dropna().join(dfamp[['max']].ffill())
return pd.DataFrame(dfamp['max']-dfamp['min'], columns=['amplitude'])
def get_value_diff(df, percent_diff=False):
'''
Get the difference of values of each element in the dataframe
The times in the dataframe may or may not coincide as this is a slice of irregularly sampled time series
On any error, the returned value is np.nan
'''
try:
arr = [df[c].dropna() for c in df.columns]
if percent_diff:
value_diff = 100.0 * (arr[0].values[0]-arr[1].values[0])/arr[1].values[0]
else:
value_diff = arr[0].values[0]-arr[1].values[0]
return value_diff
except:
return np.nan
def get_tidal_amplitude_diff(dfamp1, dfamp2, percent_diff=False):
"""Get the difference of values within +/- 4H of values in the two amplitude arrays
Args:
dfamp1 (DataFrame): Amplitude time series
dfamp2 (DataFrame): Amplitude time series
percent_diff (bool, optional): If true do percent diff. Defaults to False.
Returns:
DataFrame: Difference dfamp1-dfamp2 or % Difference (dfamp1-dfamp2)/dfamp2*100 for values within +/- 4H of each other
"""
dfamp = pd.concat([dfamp1, dfamp2], axis=1).dropna(how='all')
dfamp.columns = ['2', '1']
tdelta = '4H'
sliceamp = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfamp.index]
ampdiff = [get_value_diff(dfamp[sl], percent_diff) for sl in sliceamp]
return pd.DataFrame(ampdiff, index=dfamp.index)
def get_index_diff(df):
'''
Get the difference of index values of each element in the dataframe
The times in the dataframe may or may not coincide
The difference is in Timedelta and is converted to minutes
On any error, the returned value is np.nan
'''
try:
arr = [df[c].dropna() for c in df.columns]
tidal_phase_diff = (arr[0].index[0]-arr[1].index[0]).total_seconds()/60.
return tidal_phase_diff
except:
return np.nan
def get_tidal_phase_diff(dfh2, dfl2, dfh1, dfl1):
"""Calculates the phase difference between df2 and df1 tidal highs and lows
Scans +/- 4 hours in df1 to get the highs and lows in that windows for df2 to
get the tidal highs and lows at the times of df1
Args:
dfh2 (DataFrame): Timeseries of tidal highs
dfl2 (DataFrame): Timeseries of tidal lows
dfh1 (DataFrame): Timeseries of tidal highs
dfl1 (DataFRame): Timeseries of tidal lows
Returns:
DataFrame: Phase difference (dfh2-dfh1) and (dfl2-dfl1) in minutes
"""
'''
'''
tdelta = '4H'
sliceh1 = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfh1.index]
slicel1 = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfl1.index]
dfh21 = pd.concat([dfh2, dfh1], axis=1)
dfh21.columns = ['2', '1']
dfl21 = pd.concat([dfl2, dfl1], axis=1)
dfl21.columns = ['2', '1']
high_phase_diff, low_phase_diff = [get_index_diff(dfh21[sl]) for sl in sliceh1], [
get_index_diff(dfl21[sl]) for sl in slicel1]
merged_diff = pd.merge(pd.DataFrame(high_phase_diff, index=dfh1.index), pd.DataFrame(
low_phase_diff, index=dfl1.index), how='outer', left_index=True, right_index=True)
return merged_diff.iloc[:, 0].fillna(merged_diff.iloc[:, 1])
def get_tidal_hl_zerocrossing(df, round_to='1T'):
'''
Finds the tidal high and low times using zero crossings of the first derivative.
This works for all situations but is not robust in the face of noise and perturbations in the signal
'''
zc, zi = zerocross(df)
if round_to:
zc = pd.to_datetime(zc).round(round_to)
return zc
def zerocross(df):
'''
Calculates the gradient of the time series and identifies locations where gradient changes sign
Returns the time rounded to nearest minute where the zero crossing happens (based on linear derivative assumption)
'''
diffdfv = pd.Series(np.gradient(df[df.columns[0]].values), index=df.index)
indi = np.where((np.diff(np.sign(diffdfv))) & (diffdfv[1:] != 0))[0]
# Find the zero crossing by linear interpolation
zdb = diffdfv[indi].index
zda = diffdfv[indi+1].index
x = diffdfv.index
y = diffdfv.values
dx = x[indi+1] - x[indi]
dy = y[indi+1] - y[indi]
zc = -y[indi] * (dx/dy) + x[indi]
return zc, indi
##---- FUNCTIONS CACHED BELOW THIS LINE PERHAPS TO USE LATER? ---#
def where_changed(df):
'''
'''
diff = np.diff(df[df.columns[0]].values)
wdiff = np.where(diff != 0)[0]
wdiff = np.insert(wdiff, 0, 0) # insert the first value i.e. zero index
return df.iloc[wdiff+1, :]
def where_same(dfg, df):
'''
return dfg only where its value is the same as df for the same time stamps
i.e. the interesection locations with df
'''
dfall = | pd.concat([dfg, df], axis=1) | pandas.concat |
import warnings
from pkg_resources import resource_filename
from tqdm import tqdm
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from sklearn.externals import joblib
# import concise
from mmsplice.utils import logit, predict_deltaLogitPsi, \
predict_pathogenicity, predict_splicing_efficiency, encodeDNA, \
read_ref_psi_annotation, delta_logit_PSI_to_delta_PSI, \
mmsplice_ref_modules, mmsplice_alt_modules, df_batch_writer
from mmsplice.exon_dataloader import SeqSpliter
from mmsplice.mtsplice import MTSplice, tissue_names
from mmsplice.layers import GlobalAveragePooling1D_Mask0, ConvDNA
ACCEPTOR_INTRON = resource_filename('mmsplice', 'models/Intron3.h5')
DONOR = resource_filename('mmsplice', 'models/Donor.h5')
EXON = resource_filename('mmsplice', 'models/Exon.h5')
EXON3 = resource_filename('mmsplice', 'models/Exon_prime3.h5')
ACCEPTOR = resource_filename('mmsplice', 'models/Acceptor.h5')
DONOR_INTRON = resource_filename('mmsplice', 'models/Intron5.h5')
LINEAR_MODEL = joblib.load(resource_filename(
'mmsplice', 'models/linear_model.pkl'))
LOGISTIC_MODEL = joblib.load(resource_filename(
'mmsplice', 'models/Pathogenicity.pkl'))
EFFICIENCY_MODEL = joblib.load(resource_filename(
'mmsplice', 'models/splicing_efficiency.pkl'))
custom_objects = {
'ConvDNA': ConvDNA
}
class MMSplice(object):
"""
Load modules of mmsplice model, perform prediction on batch of dataloader.
Args:
acceptor_intronM: acceptor intron model,
score acceptor intron sequence.
acceptorM: accetpor splice site model. Score acceptor sequence
with 50bp from intron, 3bp from exon.
exonM: exon model, score exon sequence.
donorM: donor splice site model, score donor sequence
with 13bp in the intron, 5bp in the exon.
donor_intronM: donor intron model, score donor intron sequence.
"""
def __init__(self,
acceptor_intronM=ACCEPTOR_INTRON,
acceptorM=ACCEPTOR,
exonM=EXON,
donorM=DONOR,
donor_intronM=DONOR_INTRON,
seq_spliter=None,
deep=True):
self.spliter = seq_spliter or SeqSpliter()
self.acceptor_intronM = load_model(
acceptor_intronM, compile=False,
custom_objects=custom_objects)
self.acceptorM = load_model(acceptorM, compile=False,
custom_objects=custom_objects)
self.exonM = load_model(exonM, compile=False, custom_objects={
"GlobalAveragePooling1D_Mask0": GlobalAveragePooling1D_Mask0,
'ConvDNA': ConvDNA
})
self.donorM = load_model(donorM, compile=False,
custom_objects=custom_objects)
self.donor_intronM = load_model(donor_intronM, compile=False,
custom_objects=custom_objects)
self.deep = deep
def predict_on_batch(self, batch):
warnings.warn(
"`self.predict_on_batch` is deprecated,"
" use `self.predict_modular_scores_on_batch instead`",
DeprecationWarning
)
return self.predict_modular_scores_on_batch(batch)
def predict_modular_scores_on_batch(self, batch):
'''
Perform prediction on batch of dataloader.
Args:
batch: batch of dataloader.
Returns:
np.matrix of modular predictions
as [[acceptor_intronM, acceptor, exon, donor, donor_intron]]
'''
score = np.concatenate([
self.acceptor_intronM.predict(batch['acceptor_intron']),
logit(self.acceptorM.predict(batch['acceptor'])),
self.exonM.predict(batch['exon']),
logit(self.donorM.predict(batch['donor'])),
self.donor_intronM.predict(batch['donor_intron'])
], axis=1)
return score
def predict(self, *args, **kwargs):
warnings.warn(
"self.predict is deprecated, use self.predict_on_seq instead",
DeprecationWarning
)
return self.predict_on_seq(*args, **kwargs)
def predict_on_seq(self, seq, overhang=(100, 100)):
"""
Performe prediction of overhanged exon sequence string.
Args:
seq (str): sequence of overhanged exon.
overhang (Tuple[int, int]): overhang of seqeunce.
Returns:
np.array of modular predictions
as [[acceptor_intronM, acceptor, exon, donor, donor_intron]].
"""
batch = self.spliter.split(seq, overhang)
batch = {k: encodeDNA([v]) for k, v in batch.items()}
return self.predict_modular_scores_on_batch(batch)[0]
def _predict_batch(self, batch, optional_metadata=None):
optional_metadata = optional_metadata or []
X_ref = self.predict_modular_scores_on_batch(
batch['inputs']['seq'])
X_alt = self.predict_modular_scores_on_batch(
batch['inputs']['mut_seq'])
ref_pred = pd.DataFrame(X_ref, columns=mmsplice_ref_modules)
alt_pred = pd.DataFrame(X_alt, columns=mmsplice_alt_modules)
df = pd.DataFrame({
'ID': batch['metadata']['variant']['annotation'],
'exons': batch['metadata']['exon']['annotation'],
})
for key in optional_metadata:
for k, v in batch['metadata'].items():
if key in v:
df[key] = v[key]
df['delta_logit_psi'] = predict_deltaLogitPsi(X_ref, X_alt)
df = | pd.concat([df, ref_pred, alt_pred], axis=1) | pandas.concat |
# Copyright 2020 The Merlin Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
import joblib
import mlflow
import numpy as np
import pandas as pd
import pytest
from merlin.model import PyFuncV2Model
from mlflow.pyfunc import log_model
from sklearn.datasets import load_iris
from merlinpyspark.config import ModelConfig
from merlinpyspark.model import create_model_udf
from merlinpyspark.spec.prediction_job_pb2 import Model, ResultType, ModelType
from pandas.testing import assert_frame_equal
class IrisModel(PyFuncV2Model):
def initialize(self, artifacts: dict):
self._model = joblib.load(artifacts["model_path"])
def infer(self, model_input: pd.DataFrame) -> Union[np.ndarray,
pd.Series,
pd.DataFrame]:
return self._model.predict(model_input)
@pytest.mark.ci
def test_pyfuncv2_model(spark_session):
log_model("model", python_model=IrisModel(), artifacts={
"model_path": "test-model/model.joblib"}, code_path=["merlinpyspark", "test"],
conda_env="test-model/conda.yaml")
model_path = os.path.join(mlflow.get_artifact_uri(), "model")
iris = load_iris()
columns = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
pdf = pd.DataFrame(iris.data, columns=columns)
df = spark_session.createDataFrame(pdf)
model_cfg_proto = Model(type=ModelType.PYFUNC_V2,
uri=model_path,
result=Model.ModelResult(type=ResultType.DOUBLE))
model_udf = create_model_udf(spark_session, ModelConfig(model_cfg_proto), columns)
iris_model = mlflow.pyfunc.load_model(model_path)
df = df.withColumn("prediction",
model_udf("sepal_length", "sepal_width", "petal_length",
"petal_width"))
result_pandas = df.toPandas()
r = result_pandas.drop(columns=["prediction"])
| assert_frame_equal(r, pdf) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Imports
import random
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import torch.nn as nn
import torch.optim as optim
import torchvision.utils as vutils
import matplotlib.animation as animation
from IPython.display import HTML
import model_v4_small as model
import torch
import keijzer_exogan as ke
# initialize random seeds
manualSeed = 999
random.seed(manualSeed)
torch.manual_seed(manualSeed)
#get_ipython().run_line_magic('matplotlib', 'inline')
#get_ipython().run_line_magic('config', 'InlineBackend.print_figure_kwargs={\'facecolor\' : "w"} # Make sure the axis background of plots is white, this is usefull for the black theme in JupyterLab')
# Initialize default seaborn layout
sns.set_palette(sns.hls_palette(8, l=.3, s=.8))
sns.set(style='ticks')
"""
Local variables
"""
workers = 0 # Number of workers for dataloader, 0 when to_vram is enabled
batch_size = 1 # using one image ofcourse
image_size = 32
nz = 100 # size of latent vector
n_iters = 1000 #25*10**3 # number of iterations to do for inpainting
torch.backends.cudnn.benchmark=True # Uses udnn auto-tuner to find the best algorithm to use for your hardware, speeds up training by almost 50%
lr = 1e-1
lamb1 = 1 #1e4
lamb2 = 1e-1 # 1 , total_loss = lamb1*loss_context + lamb2*loss_perceptual
beta1 = 0.5 # Beta1 hyperparam for Adam optimizers
selected_gpus = [3] # Number of GPUs available. Use 0 for CPU mode.
#n_images = 500
inpaint_n_times = 15
save_array_results = True
load_array_results = False
filename = 'debug_0_1000_1e-1_15_wgan_simple' # 0:100 lamb1=10, lamb2=1
# debug_0_5000_1_1e-1_* c is exogan data with original brian mask, d is with binary mask
# In[2]:
path = '/datb/16011015/ExoGAN_data/selection//' #notice how you dont put the last folder in here...
# # Load all ASPAs
images = np.load(path+'last_chunks_25_percent_images_v4.npy').astype('float32')
np.random.shuffle(images)
len(images)
# np.save(path+'last_chunks_mini_selection.npy', images[:3000])
# # Load smaller selection of ASPAs
# In[3]:
#images = np.load(path+'last_chunks_25_percent_images_v4.1.npy') # 4.1 is a random selection of 5k images
images = np.load(path+'last_chunks_25_percent_images_v4.2.npy')
print('Loaded %s images' % len(images))
print('Batch size: ', batch_size)
# Number of training epochs
# Learning rate for optimizers
ngpu = len(selected_gpus)
print('Number of GPUs used: ', ngpu)
"""
Load data and prepare DataLoader
"""
shuffle = False
if shuffle:
np.random.shuffle(images) # shuffles the images
images = images[0:1000]
n_images = len(images)
#images = images[:int(len(images)*0.005)]
print('Number of images: ', n_images)
dataset = ke.numpy_dataset(data=images, to_vram=True) # to_vram pins it to all GPU's
#dataset = numpy_dataset(data=images, to_vram=True, transform=transforms.Compose([transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])) # to_vram pins it to all GPU's
# Create the dataloader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=workers, pin_memory=False)
# In[4]:
"""
Load and setup models
"""
# Initialize cuda
device = torch.device("cuda:"+str(selected_gpus[0]) if (torch.cuda.is_available() and ngpu > 0) else "cpu")
# Load models, set to evaluation mode since training is not needed (this also allows batchsize 1 to work with batchnorm2d layers)
netG = model.Generator(ngpu).eval().to(device)
netD = model.Discriminator(ngpu).eval().to(device)
# Apply weights
print('Loading weights...')
try:
# Load saved weights
netG.load_state_dict(torch.load('gan_data//weights//netG_state_dict_wgan_model_v4_small', map_location=device)) #net.module..load_... for parallel model , net.load_... for single gpu model
netD.load_state_dict(torch.load('gan_data//weights//netD_state_dict_wgan_model_v4_small', map_location=device))
except:
print('Could not load saved weights.')
sys.exit()
"""
Define input training stuff (fancy this up)
"""
G = netG
D = netD
z = torch.randn(1, nz, 1, 1, requires_grad=True, device=device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
G = nn.DataParallel(G, device_ids=selected_gpus, output_device=device)
D = nn.DataParallel(D, device_ids=selected_gpus, output_device=device)
#z = nn.DataParallel(z, device_ids=selected_gpus, output_device=device)
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999)) # should be sgd
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
print('done')
# # Show generated images
# In[ ]:
from sklearn.preprocessing import MinMaxScaler
z_tests = [torch.randn(1, nz, 1, 1, device=device) for _ in range(9)]
#plt.figure(figsize=(10,10))
for i in range(9):
img = G(z_tests[i]).detach().cpu()[0, 0, :, :]
#plt.subplot(3,3,i+1)
#scaler = MinMaxScaler((0, 1.2))
#img = scaler.fit_transform(img)
#plt.imshow(img, cmap='gray', vmin=-1.2, vmax=1.2)
#plt.imshow(img, cmap='gray')
#plt.tight_layout()
img.min(), img.max(), img.mean(), img.std()
# # Show first 9 selected images
# In[ ]:
#plt.figure(figsize=(10,10))
for i in range(9):
try:
img = images[i]
#plt.subplot(3,3,i+1)
#plt.imshow(img[0, :, :], cmap='gray', vmin=-1.2, vmax=1.2)
except:
pass
#plt.tight_layout()
img.min(), img.max(), img.mean(), img.std()
# # Visualizing the weights
# In[ ]:
weights = [param.data.cpu().numpy().flatten() for param in netD.parameters()]
"""
plt.figure(figsize=(10,10))
for i,layer_weights in enumerate(weights):
print('Layer: %s \t n_weights: %s \t std: %.4f \t mean: %.4f' % (i, len(layer_weights), layer_weights.std(), layer_weights.mean()))
plt.subplot(3,2,i+1)
plt.title('netD layer %s weights' % i)
plt.hist(layer_weights, bins=100)
plt.grid()
plt.tight_layout()
"""
# In[ ]:
weights = [param.data.cpu().numpy().flatten() for param in netG.parameters()] # where param.data are the weights of the i-th layer
"""
plt.figure(figsize=(10,10))
for i,layer_weights in enumerate(weights):
print('Layer: %s \t n_weights: %s \t std: %.4f \t mean: %.4f' % (i, len(layer_weights), layer_weights.std(), layer_weights.mean()))
plt.subplot(3,2,i+1)
plt.title('netG layer %s weights' % i)
plt.hist(layer_weights, bins=100)
plt.grid()
plt.tight_layout()
"""
# # Inpainting
# The corrupted image $y$ is mapped to the closest $z$ in the latent representation space, this mapping is denoted as $\hat{z}$.
#
# $\hat{z} = \operatorname{arg\,min}_z \{ \mathcal{L}_c(z |y, M) + \mathcal{L}_p (z) \}$
#
# where
#
# $\mathcal{L}_c(z |y, M) = || M \bigodot G(z) - M \bigodot y||_1 = || M \bigodot (G(z)-y) ||_1 $
#
# with $\mathcal{L}_c$ being contextual loss and $M$ being a binary mask with the same size as $y$,
#
# $\mathcal{L}_p (z) = \lambda \operatorname{log}(1-D(G(z)))$
#
# with $\mathcal{L}_p$ being perceptual loss and $D$ being the discriminator.
#
# Once $G(\hat{z})$ is generated, the final solution $\hat{x}$ is calculated as
#
# $\hat{x} = \operatorname{arg\, min}_x ||\nabla x - \nabla G(\hat{z}) ||^2_2$
#
# (substitute $x_i = y_i$ for $M_i = 1$).
#
# -----
#
# $|| ... ||$ is done by `torch.norm()`.
# $... \bigodot ...$ is done by `torch.mul()`.
# -----
# TODO: Implement $\hat{x} = \operatorname{arg\, min}_x ||\nabla x - \nabla G(\hat{z}) ||^2_2$
# Currently $\hat{x} = G(\hat{z}) \bigodot (1 -M)+y$
# ## Create the mask
# In[ ]:
def create_mask():
mask = np.full([1,1,32,32], 1) # init array with 0.5's
mask = torch.from_numpy(mask).to(device)
#mask = torch.ones([1, 1, 32, 32]).to(device) # create mask with 1's in the shape of image
#print("mask.shape", mask.shape)
# use a random 'easy' mask
# set all params to 0
mask[:, :, :16, 25:] = 0
# set noise to 0
mask[:, :, 18:, :] = 0
"""Weighted mask"""
# Normalization factors
mask[:, :, 16:18, :] = 6 #6
# Planet mass
mask[:, :, :16, 25:26] = 0
mask = mask.float() # make sure dtype is float, torch was complaining during inpainting that this is a double
return mask
# In[ ]:
m = create_mask().cpu()[0, 0, :, :]
#plt.imshow(m, cmap='gray', vmin=0, vmax=2)
# # Inpaiting functions
# In[ ]:
def save_inpainting_results():
# save real aspa's
all_reals = []
for selected_aspa in range(len(real_images)):
reals = np.array([real_images[selected_aspa][i].detach().cpu().numpy()[0, 0, :, :] for i in range(inpaint_n_times)])
all_reals.append(reals)
all_reals = np.array(all_reals)
np.save('gan_data//val_errors//'+filename+'_reals.npy', all_reals)
# save inpained aspa's
all_inpainteds = []
for selected_aspa in range(len(real_images)):
inpainteds = np.array([final_inpainted_images[selected_aspa][i].detach().cpu().numpy()[0, 0, :, :] for i in range(inpaint_n_times)])
all_inpainteds.append(inpainteds)
all_inpainteds = np.array(all_inpainteds)
np.save('gan_data//val_errors//'+filename+'_inpainteds.npy', all_inpainteds)
np.save('gan_data//val_errors//'+filename+'_n_iterations.npy', n_iteration)
np.save('gan_data//val_errors//'+filename+'_contextual_losses.npy', contextual_losses)
np.save('gan_data//val_errors//'+filename+'_perceptual_losses.npy', perceptual_losses)
return
# ## Inpainting loop
# 22.33 iters / s
# In[ ]:
# Lists to keep track of progress
real_images = []
masked_images= []
#inpainted_images = []
final_inpainted_images = [] # last n inpainted images, one index location for each input image [[aspa1, aspa1, aspa1], [aspa2,aspa2,aspa2]] .... where aspa1, aspa1, aspa1 are 3 unique inpaintings
n_iteration = []
perceptual_losses = []
contextual_losses = []
MSELoss = nn.MSELoss()
L1Loss = nn.L1Loss() # MAE
SmoothL1Loss = nn.SmoothL1Loss()
"""
Inpainting
"""
t3 = time.time()
past_s_image = 0
for i, data in enumerate(dataloader, 0): # batches per epoch
real_images_n_times = []
final_inpainted_images_n_times = [] # list of (n) last inpainted image(s), for one aspa
t1 = time.time()
for j in range(inpaint_n_times): # inpaint n times per image
z = torch.randn(1, nz, 1, 1, requires_grad=True, device=device)
opt = optim.Adam([z], lr=lr)
real_cpu = data.to(device)
b_size = real_cpu.size(0) # this is one ofc, it's one image we're trying to inpaint
#print("data.shape: ", data.shape)
image = data.to(device) # select the image (Channel, Height, Width), this is the original unmasked input image
real_images_n_times.append(image)
#print("image.shape: ", image.shape)
"""Mask the image"""
mask = create_mask()
masked_image = torch.mul(image, mask).to(device) #image bigodot mask
masked_images.append(masked_image)
#print('masked image shape', masked_image.shape)
#plt.imshow(masked_image.detach().cpu()[0, 0, :, :], cmap='gray') # plot the masked image
# what's v and m?
v = torch.tensor(0, dtype=torch.float32, device=device)
m = torch.tensor(0, dtype=torch.float32, device=device)
"""Start the inpainting process"""
early_stopping_n_iters = 0
early_stopping_min_loss = 999999999999 # set to random high number to initialize
if j != 0:
n_iteration.append(iteration)
for iteration in range(n_iters):
if z.grad is not None:
z.grad.data.zero_()
G.zero_grad()
D.zero_grad()
image_generated = G(z) # generated image G(z)
image_generated_masked = torch.mul(image_generated, mask) # G(z) bigodot M
image_generated_inpainted = torch.mul(image_generated, (1-mask))+masked_image
#if (iteration % 100 == 0):
# inpainted_images.append(image_generated_inpainted)
#print("image_generated_inpainted.shape : ",image_generated_inpainted.shape)
t = image_generated_inpainted.detach().cpu()[0, 0, :, :]
# TODO: why does this already look real?
#plt.imshow(t, cmap='gray') # plot the masked image
"""Calculate losses"""
loss_context = lamb1*torch.norm(image_generated_masked-masked_image, p=1) #what's p=1?
#loss_context = lamb1*MSELoss(image_generated_masked,masked_image)
#loss_context = L1Loss(image_generated_masked, masked_image)*10
#loss_context = SmoothL1Loss(image_generated_masked, masked_image)*10
discriminator_output = netD(image_generated_inpainted) - 0.005 # -0.005 offset so loss_perceptual doesn't become 1 when D(G(z)) == 1.000000
#print("Discriminator output: ", discriminator_output)
labels = torch.full((b_size,), 1, device=device)
loss_perceptual = lamb2*torch.log(1-discriminator_output)
#if loss_perceptual == -np.inf:
# #print('loss perceptual == -np.inf()')
# loss_perceptual = torch.tensor(-10, dtype=torch.float32, device=device)
#print(loss_perceptual.data.cpu().numpy().flatten()[0])
total_loss = loss_context + loss_perceptual
#total_loss = loss_context + 10*discriminator_output
# grab the values from losses for printing
loss_perceptual = loss_perceptual.data.cpu().numpy().flatten()[0]
#loss_perceptual = 0
loss_context = loss_context.data.cpu().numpy().flatten()[0]
total_loss.sum().backward() # TODO: find out why .sum() is needed (why does the loss tensor have 2 elements?)
opt.step()
total_loss = total_loss.data.cpu().numpy().flatten()[0]
"""Early stopping""" # TODO:
if iteration > 0:
delta_loss = early_stopping_min_loss - total_loss
delta_iters = iteration - iter1
if (delta_loss < 0.1) or (total_loss > early_stopping_min_loss):
early_stopping_n_iters += 1
else:
#print('set to zero')
early_stopping_n_iters = 0
if early_stopping_n_iters > 1000:
#n_iteration.append(iteration)
#break
#z = z_best
#early_stopping_n_iters = 0
#print('z copied')
pass
loss1 = total_loss
iter1 = iteration
if total_loss < early_stopping_min_loss:
early_stopping_min_loss = total_loss
best_inpained_image = image_generated.detach().cpu()
contextual_loss_best = loss_context
perceptual_loss_best = loss_perceptual
early_stopping_n_iters = 0
z_best = z
#print('min loss: ', early_stopping_min_loss)
t2 = time.time()
"""Calculate ETA"""
#t_per_iter = t2 - t1 # time per iteration in seconds
past_time = t2 - t3
#eta = t_per_iter * (n_iters - iteration) + t_per_iter* (len(images)-i+1) * n_iters # time left to finish epoch/image + time left to finish all epochs/images in SECONDS
#eta_h = (eta/ 60) // 60 # divisor integer
#eta_m = eta % 60 # get remainer
past_m = past_time / 120
past_s = past_time % 60
if (iteration % 50 == 0):
print("\r image [{}/{}] inpainting [{}/{}] iteration : {:4} , context_loss: {:.3f}, perceptual_loss: {:3f}, total_loss: {:3f}, min L: {:3f}, {:1f}, D(G(z)): {:3f}, Run time: {:.0f}m {:.0f}s, s per image {:.0f}s".format(i+1,
len(images), j+1, inpaint_n_times, iteration, loss_context,loss_perceptual, total_loss,early_stopping_min_loss, early_stopping_n_iters, discriminator_output.data.cpu().numpy().flatten()[0], past_m, past_s, past_s_image),end="")
"""NaN monitor"""
#if (loss_context or loss_perceptual == np.nan()) and iteration >64:
# print(r'='*10 + ' NaN '+ '='*10)
# print(loss_context, loss_percept ual)
#break+
final_inpainted_images_n_times.append(best_inpained_image.detach().cpu())
past_s_image = (t2-t1) % 60
final_inpainted_images.append(final_inpainted_images_n_times)
real_images.append(real_images_n_times)
contextual_losses.append(contextual_loss_best)
perceptual_losses.append(perceptual_loss_best)
if save_array_results:
save_inpainting_results()
# In[ ]:
perceptual_losses
# # Error of one ASPA
# In[ ]:
selected_aspa = 0
reals = [real_images[selected_aspa][i].detach().cpu().numpy()[0, 0, :, :] for i in range(inpaint_n_times)]
inpainteds = [final_inpainted_images[selected_aspa][i].detach().cpu().numpy()[0, 0, :, :] for i in range(inpaint_n_times)]
# In[ ]:
# :16, :25 is the spectrum location within the ASPA
real = reals[0][:16, :25].flatten()
inpainted = inpainteds[0][:16, :25].flatten()
"""
plt.figure(figsize=(15,5))
plt.plot(real, 'x-', c='r', linewidth=0.5)
plt.plot(inpainted, '.-', linewidth=0.5)
"""
# In[ ]:
# Pixel difference
# In[ ]:
#plt.plot(inpainted-real, '.-')
# In[ ]:
i = 0
xhat,yhat = ke.decode_spectrum_from_aspa(reals[i])
x,y = ke.decode_spectrum_from_aspa(inpainteds[i])
"""
plt.plot(xhat, yhat, label='real', c='r')
plt.plot(x,y,label='inpainted')
plt.gca().set_xscale('log')
plt.legend()
"""
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
reals[0].shape
# In[ ]:
reals = [ke.decode_params_from_aspa(aspa_real) for aspa_real in reals]
inpainteds = [ke.decode_params_from_aspa(aspa_inpainted) for aspa_inpainted in inpainteds]
# In[ ]:
# Initialize ExoGAN params with zero's
inpainted_params = {
'planet_mass': [],
'temp_profile': [],
'ch4_mixratio': [],
'planet_radius': [],
'h2o_mixratio': [],
'co2_mixratio': [],
'co_mixratio': []
}
# In[ ]:
# iterate over all params
for i,param in enumerate(inpainted_params):
# iterate over all inpainted values (of above param)
for j,inpainted in enumerate(inpainteds):
y_hat = reals[j][param] # real value
y = inpainted[param] # inpainted value
percentage_error = ((y - y_hat) / y_hat)*100
inpainted_params[param] += [percentage_error]
# In[ ]:
df = | pd.DataFrame(inpainted_params) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
| assert_frame_equal(p_orig['ItemA'], df) | pandas.util.testing.assert_frame_equal |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
import numpy as np
import pandas as pd
from ... import opcodes as OperandDef
from ...core import OutputType
from ...filesystem import open_file
from ...operands import OperandStage
from ...serialize import KeyField, AnyField, StringField, ListField, \
BoolField, Int32Field, Int64Field, DictField
from ...tensor.core import TensorOrder
from ...tensor.operands import TensorOperand, TensorOperandMixin
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index
class DataFrameToCSV(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.TO_CSV
_input = KeyField('input')
_path = AnyField('path')
_sep = StringField('sep')
_na_rep = StringField('na_rep')
_float_format = StringField('float_format')
_columns = ListField('columns')
_header = AnyField('header')
_index = BoolField('index')
_index_label = AnyField('index_label')
_mode = StringField('mode')
_encoding = StringField('encoding')
_compression = AnyField('compression')
_quoting = Int32Field('quoting')
_quotechar = StringField('quotechar')
_line_terminator = StringField('line_terminator')
_chunksize = Int64Field('chunksize')
_date_format = StringField('date_format')
_doublequote = BoolField('doublequote')
_escapechar = StringField('escapechar')
_decimal = StringField('decimal')
_storage_options = DictField('storage_options')
# for chunk
_output_stat = BoolField('output_stat')
def __init__(self, path=None, sep=None, na_rep=None, float_format=None,
columns=None, header=None, index=None, index_label=None,
mode=None, encoding=None, compression=None, quoting=None,
quotechar=None, line_terminator=None, chunksize=None, date_format=None,
doublequote=None, escapechar=None, decimal=None, output_stat=None,
storage_options=None, stage=None, output_types=None, **kw):
super().__init__(_path=path, _sep=sep, _na_rep=na_rep, _float_format=float_format,
_columns=columns, _header=header, _index=index, _index_label=index_label,
_mode=mode, _encoding=encoding, _compression=compression, _quoting=quoting,
_quotechar=quotechar, _line_terminator=line_terminator, _chunksize=chunksize,
_date_format=date_format, _doublequote=doublequote,
_escapechar=escapechar, _decimal=decimal, _output_stat=output_stat,
_storage_options=storage_options, _output_types=output_types, _stage=stage, **kw)
@property
def input(self):
return self._input
@property
def path(self):
return self._path
@property
def sep(self):
return self._sep
@property
def na_rep(self):
return self._na_rep
@property
def float_format(self):
return self._float_format
@property
def columns(self):
return self._columns
@property
def header(self):
return self._header
@property
def index(self):
return self._index
@property
def index_label(self):
return self._index_label
@property
def mode(self):
return self._mode
@property
def encoding(self):
return self._encoding
@property
def compression(self):
return self._compression
@property
def quoting(self):
return self._quoting
@property
def quotechar(self):
return self._quotechar
@property
def line_terminator(self):
return self._line_terminator
@property
def chunksize(self):
return self._chunksize
@property
def date_format(self):
return self._date_format
@property
def doublequote(self):
return self._doublequote
@property
def escapechar(self):
return self._escapechar
@property
def decimal(self):
return self._decimal
@property
def storage_options(self):
return self._storage_options
@property
def one_file(self):
# if wildcard in path, write csv into multiple files
return '*' not in self._path
@property
def output_stat(self):
return self._output_stat
@property
def output_limit(self):
return 1 if not self.output_stat else 2
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
@classmethod
def tile(cls, op: 'DataFrameToCSV'):
in_df = op.input
out_df = op.outputs[0]
if in_df.ndim == 2:
# make sure only 1 chunk on the column axis
in_df = in_df.rechunk({1: in_df.shape[1]})._inplace_tile()
one_file = op.one_file
out_chunks = [], []
for chunk in in_df.chunks:
chunk_op = op.copy().reset_key()
if not one_file:
index_value = parse_index(chunk.index_value.to_pandas()[:0], chunk)
if chunk.ndim == 2:
out_chunk = chunk_op.new_chunk([chunk], shape=(0, 0),
index_value=index_value,
columns_value=out_df.columns_value,
dtypes=out_df.dtypes,
index=chunk.index)
else:
out_chunk = chunk_op.new_chunk([chunk], shape=(0,),
index_value=index_value,
dtype=out_df.dtype,
index=chunk.index)
out_chunks[0].append(out_chunk)
else:
chunk_op._output_stat = True
chunk_op._stage = OperandStage.map
# bytes of csv
kws = [{
'shape': (),
'dtype': np.dtype(np.str_),
'index': chunk.index,
'order': TensorOrder.C_ORDER,
'output_type': OutputType.scalar,
'type': 'csv',
}, {
'shape': (),
'dtype': np.dtype(np.intp),
'index': chunk.index,
'order': TensorOrder.C_ORDER,
'output_type': OutputType.scalar,
'type': 'stat',
}]
chunks = chunk_op.new_chunks([chunk], kws=kws, output_limit=len(kws))
out_chunks[0].append(chunks[0])
out_chunks[1].append(chunks[1])
if not one_file:
out_chunks = out_chunks[0]
else:
stat_chunk = DataFrameToCSVStat(path=op.path, dtype=np.dtype(np.int64),
storage_options=op.storage_options).new_chunk(
out_chunks[1], shape=(len(out_chunks[0]),), order=TensorOrder.C_ORDER)
new_out_chunks = []
for c in out_chunks[0]:
op = DataFrameToCSV(stage=OperandStage.agg, path=op.path,
storage_options=op.storage_options,
output_types=op.output_types)
if out_df.ndim == 2:
out_chunk = op.new_chunk(
[c, stat_chunk], shape=(0, 0), dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
index=c.index)
else:
out_chunk = op.new_chunk(
[c, stat_chunk], shape=(0,), dtype=out_df.dtype,
index_value=out_df.index_value, index=c.index)
new_out_chunks.append(out_chunk)
out_chunks = new_out_chunks
new_op = op.copy()
params = out_df.params.copy()
if out_df.ndim == 2:
params.update(dict(chunks=out_chunks, nsplits=((0,) * in_df.chunk_shape[0], (0,))))
else:
params.update(dict(chunks=out_chunks, nsplits=((0,) * in_df.chunk_shape[0],)))
return new_op.new_tileables([in_df], **params)
def __call__(self, df):
index_value = parse_index(df.index_value.to_pandas()[:0], df)
if df.ndim == 2:
columns_value = parse_index(df.columns_value.to_pandas()[:0], store_data=True)
return self.new_dataframe([df], shape=(0, 0), dtypes=df.dtypes[:0],
index_value=index_value, columns_value=columns_value)
else:
return self.new_series([df], shape=(0,), dtype=df.dtype, index_value=index_value)
@classmethod
def _to_csv(cls, op, df, path, header=None):
if header is None:
header = op.header
df.to_csv(path, sep=op.sep, na_rep=op.na_rep, float_format=op.float_format,
columns=op.columns, header=header, index=op.index, index_label=op.index_label,
mode=op.mode, encoding=op.encoding, compression=op.compression, quoting=op.quoting,
quotechar=op.quotechar, line_terminator=op.line_terminator, chunksize=op.chunksize,
date_format=op.date_format, doublequote=op.doublequote, escapechar=op.escapechar,
decimal=op.decimal)
@classmethod
def _execute_map(cls, ctx, op):
out = op.outputs[0]
df = ctx[op.input.key]
sio = StringIO()
header = op.header if out.index[0] == 0 else False
# do not output header if index of chunk > 0
cls._to_csv(op, df, sio, header=header)
ret = sio.getvalue().encode(op.encoding or 'utf-8')
ctx[op.outputs[0].key] = ret
ctx[op.outputs[1].key] = len(ret)
@classmethod
def _execute_agg(cls, ctx, op):
out = op.outputs[0]
i = out.index[0]
path = cls._get_path(op.path, i)
csv_bytes, offsets = [ctx[inp.key] for inp in op.inputs]
offset_start = offsets[i]
# write csv bytes into file
with open_file(path, mode='r+b', storage_options=op.storage_options) as f:
f.seek(offset_start)
f.write(csv_bytes)
ctx[out.key] = pd.DataFrame() if out.ndim == 2 else pd.Series([], dtype=out.dtype)
@classmethod
def _get_path(cls, path, i):
if '*' not in path:
return path
return path.replace('*', str(i))
@classmethod
def execute(cls, ctx, op):
if op.stage == OperandStage.map:
cls._execute_map(ctx, op)
elif op.stage == OperandStage.agg:
cls._execute_agg(ctx, op)
else:
assert op.stage is None
df = ctx[op.input.key]
out = op.outputs[0]
path = cls._get_path(op.path, op.outputs[0].index[0])
with open_file(path, mode='w', storage_options=op.storage_options) as f:
cls._to_csv(op, df, f)
ctx[out.key] = pd.DataFrame() if out.ndim == 2 else | pd.Series([], dtype=out.dtype) | pandas.Series |
# -*- coding: utf-8 -*-#
"""
File: Auto_Loan_ML.py
Author: <NAME>
Date: 3/15/20
Desc: Analysis of GM Financial Consumer Automobile Receivables Trust Data Tape
Prediction of Delinquency via Tree-Based Feature Importance Methods
"""
""" ======================= Import dependencies ========================== """
import numpy as np
import pandas as pd
import os
import glob
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as sm
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import accuracy_score, classification_report
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
""" ====================== Function definitions ========================== """
def getIndexes(dfObj, value):
# Empty list
listOfPos = []
# isin() method will return a dataframe with boolean values, True at the positions where element exists
result = dfObj.isin([value])
# any() method will return a boolean series
seriesObj = result.any()
# Get list of columns where element exists
columnNames = list(seriesObj[seriesObj == True].index)
# Iterate over the list of columns and extract the row index where element exists
for col in columnNames:
rows = list(result[col][result[col] == True].index)
for row in rows:
listOfPos.append((row, col))
# This list contains a list tuples with
# the index of element in the dataframe
return listOfPos
def plot_feature_importances(feature_importances, title, feature_names):
# Normalize the importance values
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# Sort the index values and flip them so that they are arranged in decreasing order of importance
index_sorted = np.flipud(np.argsort(feature_importances))
# Center the location of the labels on the X-axis (for display purposes only)
pos = np.arange(index_sorted.shape[0]) + 0.5
# Plot the bar graph
feature_names_ord = [x for _, x in sorted(zip(index_sorted, feature_names))]
print(feature_names_ord[:10])
plt.figure()
plt.bar(pos[:10], feature_importances[index_sorted][:10], align='center')
plt.xticks(pos[:10], feature_names_ord[:10], fontsize=12, rotation=45)
plt.xlabel("Feature Names", fontdict={'size': 18}, labelpad=27)
plt.ylabel('Relative Importance')
plt.title(title)
plt.show()
def z_score(df_std, norm):
assert isinstance(df_std, pd.DataFrame)
for column in norm:
df_std[column] = pd.to_numeric(df_std[column], errors='coerce', downcast='float')
df_std[column] = (df_std[column] - df_std[column].mean()) / df_std[column].std()
return df_std
def clean_dataset(df, num_cols):
assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame"
df.dropna(inplace=True)
result = df.copy()
indices_to_keep = ~df[df.columns[~df.columns.isin(num_cols)]].isin([np.nan, np.inf, -np.inf]).any(1)
df = df[df.columns[~df.columns.isin(num_cols)]].astype(np.float64)
for numz in num_cols:
df[numz] = result[numz]
return df[indices_to_keep]
def cleaner(df):
assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame"
df = df.replace(to_replace=['None', '-'], value=np.nan).dropna()
df = df.replace(to_replace='false', value=0)
df = df.replace(to_replace=['true'], value=1)
df = df.replace(to_replace=['1; 2'], value=1)
df = df.replace(to_replace=['2; 1'], value=2)
df = df.replace([np.inf, -np.inf], np.nan).dropna()
return df
def clean_time_series(df, timez):
assert isinstance(df, pd.DataFrame)
for col in timez:
df[col] = (df[col]/np.timedelta64(1, 'D')).astype(np.float64)
df[col] = df[col][~((df[col] - df[col].mean()).abs() > 3.5 * df[col].std())]
df = df.dropna()
return df
""" ====================== Class definitions ========================== """
class RandomForestExample:
def __init__(self, path):
# reading the data from the csv file
self.path = path
def getDFS(self, path):
all_files = glob.iglob(os.path.join(path + "*.csv"))
return pd.concat(( | pd.read_csv(f, dtype='unicode') | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
name = ['IP', 'app', 'daytime', 'platform', 'channel_type', 'channel', 'user_id',
'device_id', 'system_version', 'brand', 'model', 'version', 'event_id', 'para']
# 如果不是csv(默认逗号分隔)的文件 就需要加sep指定分隔符,否则会分割出\t, 要设定header=None,否则默认使用第一行的数据当做列名
f1 = pd.DataFrame( | pd.read_csv('/Users/yuanfang/Desktop/download/logs/1/2/3/2/2019/3/26/2/logs12405.log',
sep='\t', header=None, names=name) | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = | Series(["three"], index=[3]) | pandas.Series |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
| pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False) | pandas.testing.assert_frame_equal |
import streamlit as st
# Essentials
import numpy as np
import pandas as pd
import datetime
import random
# Plots
import matplotlib.pyplot as plt
import seaborn as sns
# Models
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.linear_model import ElasticNet, ElasticNetCV
import xgboost
from xgboost.sklearn import XGBRegressor
import lightgbm
from lightgbm import LGBMRegressor
# Misc
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
import pickle
import joblib
import folium
import branca.colormap as cm
from streamlit_folium import folium_static
import bs4
from bs4 import BeautifulSoup as bs
import requests
import json
import re
import base64
def main():
| pd.set_option('display.max_colwidth', None) | pandas.set_option |
import os
import ast
import pandas as pd
import numpy as np
from datetime import datetime
import time
import logging
level_config = {'debug': logging.DEBUG, 'info': logging.INFO}
FILE_SIZE = 500
BYTES_PER_PKT = 1500.0*8
MILLISEC_IN_SEC = 1000.0
EXP_LEN = 1000 # millisecond
class Metric:
def __init__(self,name,mi=1., lbd=1., mi_s=1.,log_level='debug'):
self.name = name
self.mi = mi
self.lbd = lbd
self.mi_s = mi_s
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
self.logger = logging.getLogger(__name__)
def calc(self,listRate,listRebuffer):
pass
def tabulation(self,listQoE,scores = pd.DataFrame(),abrRule = 'abr Rule',prefix=''):
scores_tmp = pd.DataFrame()
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores_tmp['Average value'] = np.asarray([i[0] for i in listQoE])
scores_tmp['Metrics'] = [ self.name for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = pd.DataFrame()
scores_tmp['Average value'] = np.asarray([i[1] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Bitrate Utility' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp['Average value'] = np.asarray([i[2] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Smoothness Penalty' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
from plotnine import *
import pandas as pd
df_inflasi = | pd.read_csv('https://storage.googleapis.com/dqlab-dataset/inflasi.csv') | pandas.read_csv |
"""Make it easier to work with the CheXpert .csv files.
- Create explicit columns for patient ID, study nubmer, and view number, extracted from the file
paths.
- Adjust the column data types to reduce memory usage.
- Add a column for age groups to help cross-sectional analysis.
- Labels are encoded as integers, including the "no mention" (encoded as an empty string in the
validation set is converted for consistency.
Using from the command line: ``python3 -m preprocess > chexpert.csv``
From another module::
import chexpert_dataset as cd
cxdata = cd.CheXpertDataset()
cxdata.fix_dataset() # optional
cxdata.df.head()
IMPORTANT: because we are using categories, set observed=True when using groupby with the
categorical columns to avoid surprises (https://github.com/pandas-dev/pandas/issues/17594)
"""
# pylint: disable=too-few-public-methods
import logging
import os
import re
import pandas as pd
import imagesize
# Dataset invariants that must hold when we manipulate it (groupby, pivot_table, filters, etc.)
# Numbers come from analyzing the .csv files shipped with the dataset (see chexpert_csv_eda.py)
# If `assert` start to fail in the code, either the code is broken or the dataset has changed
PATIENT_NUM_TRAINING = 64_540
PATIENT_NUM_VALIDATION = 200
PATIENT_NUM_TOTAL = PATIENT_NUM_VALIDATION + PATIENT_NUM_TRAINING
STUDY_NUM_TRAINING = 187_641
STUDY_NUM_VALIDATION = 200
STUDY_NUM_TOTAL = STUDY_NUM_TRAINING + STUDY_NUM_VALIDATION
IMAGE_NUM_TRAINING = 223_414
IMAGE_NUM_VALIDATION = 234
IMAGE_NUM_TOTAL = IMAGE_NUM_VALIDATION + IMAGE_NUM_TRAINING
# Number of unique combinations of "patient id/age group"
# This number is larger than the number of patients because some patients have studies over multiple
# years, crossing age group - if we group by age group we need to take this into account when
# checking the consistency of the datasets we are working with
# See how it was calcuated in chexpert_statistics.py
PATIENT_NUM_TOTAL_BY_AGE_GROUP = 66_366
# Labels as used in the DataFrame
LABEL_POSITIVE = 1
LABEL_NEGATIVE = 0
LABEL_UNCERTAIN = -1
LABEL_NO_MENTION = -99
# Observations (must match the names in the .csv files)
OBSERVATION_NO_FINDING = 'No Finding'
OBSERVATION_PATHOLOGY = sorted(['Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',
'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',
'Pneumothorax', 'Pleural Effusion', 'Pleural Other'])
OBSERVATION_OTHER = [OBSERVATION_NO_FINDING, 'Fracture', 'Support Devices']
OBSERVATION_ALL = OBSERVATION_OTHER + OBSERVATION_PATHOLOGY
# Names of some commonly-used columns already in the dataset
COL_SEX = 'Sex'
COL_AGE = 'Age'
COL_FRONTAL_LATERAL = 'Frontal/Lateral'
COL_AP_PA = 'AP/PA'
# Names of the columns added with this code
COL_PATIENT_ID = 'Patient ID'
COL_STUDY_NUMBER = 'Study Number'
COL_VIEW_NUMBER = 'View Number'
COL_AGE_GROUP = 'Age Group'
COL_TRAIN_VALIDATION = 'Training/Validation'
# Values of columns added with this code
TRAINING = 'Training'
VALIDATION = 'Validation'
class CheXpertDataset:
"""An augmented version of the CheXPert dataset.
Create one DataFrame that combines the train.csv and valid.csv files, then augments it. The
combined DataFrame appends the following columns to the existing dataset columns:
- Patient number
- Study number
- View number
- Age group (MeSH age group - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1794003/)
- "Train" or "Validation" image
It also normalizes the labels to 0, 1, and -1 by converting floating point labels to integer
(e.g. 0.0 to 0) and by filling in empty label columns with 0.
"""
def __init__(self, directory: str = None, add_image_size: bool = False, verbose: bool = True):
"""Populate the augmented dataset.
Once the class is initialized, the augmented dataset is available as a Pandas DataFrame in
the ``df`` class variable.
Args:
directory (str, optional): The directory where the dataset is saved, or ``None`` to
search for the directory. Defaults to None.
add_image_size (bool, optional): Add the image size (takes a few seconds). Defaults to
False.
verbose (bool, optional): Turn verbose logging on/off. Defaults to off.
"""
self.__init_logger(verbose)
self.__directory = directory
self.__add_image_size = add_image_size
self.__df = self.__get_augmented_chexpert()
@property
def df(self):
"""Return the DataFrame that contains the training and validation test sets.
Make a copy before modifying it. This code does not return a copy to increase performace.
"""
return self.__df
def fix_dataset(self):
"""Fix issues with the dataset (in place).
See code for what is fixed.
"""
# There is one record with sex 'Unknown'. There is only one image for that patient, so we
# don't have another record where the sex could be copied from. Change it to "Female"
# (it doesn't matter much which sex we pick because it is one record out of 200,000+).
self.df.loc[self.df.Sex == 'Unknown', ['Sex']] = 'Female'
self.df.Sex.cat.remove_unused_categories()
@ staticmethod
def find_directory() -> str:
"""Determine the directory where the dataset is stored.
There are two versions of the dataset, small and large. They are stored in
CheXpert-v1.0-small and CheXpert-v1.0-large respectively. To make the code generic, this
function finds out what version is installed.
Note: assumes that 1) only one of the versions is installed and 2) that it is at the same
level where this code is being executed.
Returns:
str: The name of the images directory or an empty string if it can't find one.
"""
for entry in os.scandir('.'):
if entry.is_dir() and re.match(r'CheXpert-v\d\.\d-', entry.name):
return entry.name
return ''
def __init_logger(self, verbose: bool):
"""Init the logger.
Args:
verbose (bool): Turn verbose logging on/off.
"""
self.__ch = logging.StreamHandler()
self.__ch.setFormatter(logging.Formatter('%(message)s'))
self.__logger = logging.getLogger(__name__)
self.__logger.addHandler(self.__ch)
self.__logger.setLevel(logging.INFO if verbose else logging.ERROR)
def __get_augmented_chexpert(self) -> pd.DataFrame:
"""Get and augmented vresion of the CheXpert dataset.
Add columns described in the file header and compacts the DataFrame to use less memory.
Raises:
RuntimeError: Cannot find the dataset directory and no directory was specified.
Returns:
pd.DataFrame: The dataset with the original and augmented columns.
"""
directory = CheXpertDataset.find_directory() \
if self.__directory is None else self.__directory
if not directory:
raise RuntimeError('Cannot find the CheXpert directory')
self.__logger.info('Using the dataset in %s', directory)
df = pd.concat(pd.read_csv(os.path.join(directory, f)) for f in ['train.csv', 'valid.csv'])
# Convert the "no mention" label to an integer representation
# IMPORTANT: assumes this is the only case of NaN after reading the .csv files
self.__logger.info('Converting "no mention" to integer')
df.fillna(LABEL_NO_MENTION, inplace=True)
# Add the patient ID column by extracting it from the filename
# Assume that the 'Path' column follows a well-defined format and extract from "patientNNN"
self.__logger.info('Adding patient ID')
df[COL_PATIENT_ID] = df.Path.apply(lambda x: int(x.split('/')[2][7:]))
# Add the study number column, also assuming that the 'Path' column is well-defined
self.__logger.info('Adding study number')
df[COL_STUDY_NUMBER] = df.Path.apply(lambda x: int(x.split('/')[3][5:]))
# Add the view number column, also assuming that the 'Path' column is well-defined
self.__logger.info('Adding view number')
view_regex = re.compile('/|_')
df[COL_VIEW_NUMBER] = df.Path.apply(lambda x: int(re.split(view_regex, x)[4][4:]))
# Add the MeSH age group column
# Best reference I found for that: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1794003/
# We have only complete years, so we can't use 'newborn'
# Also prefix with zero because visualizers sort by ASCII code, not numeric value
self.__logger.info('Adding age group')
bins = [0, 2, 6, 13, 19, 45, 65, 80, 120]
ages = ['(0-1) Infant', '(02-5) Preschool', '(06-12) Child', '(13-18) Adolescent',
'(19-44) Adult', '(45-64) Middle age', '(65-79) Aged', '(80+) Aged 80']
df[COL_AGE_GROUP] = | pd.cut(df.Age, bins=bins, labels=ages, right=False) | pandas.cut |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# Built-ins
from collections import OrderedDict, defaultdict
import sys, datetime, copy, warnings
# External
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.stats import entropy, mannwhitneyu
from scipy.spatial.distance import squareform, pdist
from itertools import combinations
# soothsayer_utils
from soothsayer_utils import assert_acceptable_arguments, is_symmetrical, is_graph, is_nonstring_iterable, dict_build, dict_filter, is_dict, is_dict_like, is_color, is_number, write_object, format_memory, format_header, check_packages
try:
from . import __version__
except ImportError:
__version__ = "ImportError: attempted relative import with no known parent package"
# ensemble_networkx
from ensemble_networkx import Symmetric, condensed_to_dense
# ==========
# Conversion
# ==========
# Polar to cartesian coordinates
def polar_to_cartesian(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return(x, y)
# Cartesian to polar coordinates
def cartesian_to_polar(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return(r, theta)
# =============
# Normalization
# =============
# Normalize MinMax
def normalize_minmax(x, feature_range=(0,1)):
"""
Adapted from the following source:
* https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
"""
x_std = (x - x.min())/(x.max() - x.min())
return x_std * (feature_range[1] - feature_range[0]) + feature_range[0]
# =======================================================
# Hive
# =======================================================
class Hive(object):
def __init__(self, data, name=None, node_type=None, edge_type=None, axis_type=None, description=None, tol=1e-10):
"""
Hive plots for undirected networks
Hive plots:
Should only be used with 2-3 axis unless intelligently ordered b/c the arcs will overlap.
Notes:
* Does not store networkx graph to overuse memory just use .to_networkx as generate them in real time.
Usage:
import soothsayer_utils as syu
import ensemble_networkx ax enx
import hive_networkx as hx
# Load data
X, y, colors = syu.get_iris_data(["X", "y", "colors"])
n, m = X.shape
# Get association matrix (n,n)
method = "pearson"
df_sim = X.T.corr(method=method)
ratio = 0.382
number_of_edges = int((n**2 - n)/2)
number_of_edges_negative = int(ratio*number_of_edges)
# Make half of the edges negative to showcase edge coloring (not statistically meaningful at all)
for a, b in zip(np.random.RandomState(0).randint(low=0, high=149, size=number_of_edges_negative), np.random.RandomState(1).randint(low=0, high=149, size=number_of_edges_negative)):
if a != b:
df_sim.values[a,b] = df_sim.values[b,a] = df_sim.values[a,b]*-1
# Create a Symmetric object from the association matrix
sym_iris = enx.Symmetric(data=df_sim, node_type="<NAME>", edge_type=method, name="iris", association="network")
# ====================================
# Symmetric(Name:iris, dtype: float64)
# ====================================
# * Number of nodes (iris sample): 150
# * Number of edges (correlation): 11175
# * Association: network
# * Memory: 174.609 KB
# --------------------------------
# | Weights
# --------------------------------
# (iris_1, iris_0) 0.995999
# (iris_0, iris_2) 0.999974
# (iris_3, iris_0) 0.998168
# (iris_0, iris_4) 0.999347
# (iris_0, iris_5) 0.999586
# ...
# (iris_148, iris_146) 0.988469
# (iris_149, iris_146) 0.986481
# (iris_147, iris_148) 0.995708
# (iris_149, iris_147) 0.994460
# (iris_149, iris_148) 0.999916
# Create NetworkX graph from the Symmetric object
graph_iris = sym_iris.to_networkx()
# # Create Hive
hive = hx.Hive(graph_iris, axis_type="species")
# Organize nodes by species for each axis
number_of_query_nodes = 3
axis_nodes = OrderedDict()
for species, _y in y.groupby(y):
axis_nodes[species] = _y.index[:number_of_query_nodes]
# Make sure there each node is specific to an axis (not fastest way, easiest to understand)
nodelist = list()
for name_axis, nodes in axis_nodes.items():
nodelist += nodes.tolist()
assert pd.Index(nodelist).value_counts().max() == 1, "Each node must be on only one axis"
# Add axis for each species
node_styles = dict(zip(['setosa', 'versicolor', 'virginica'], ["o", "p", "D"]))
for name_axis, nodes in axis_nodes.items():
hive.add_axis(name_axis, nodes, sizes=150, colors=colors[nodes], split_axis=True, node_style=node_styles[name_axis])
hive.compile()
# ===============================
# Hive(Name:iris, dtype: float64)
# ===============================
# * Number of nodes (iris sample): 150
# * Number of edges (pearson): 11175
# * Axes (species): ['setosa', 'versicolor', 'virginica']
# * Memory: 174.609 KB
# * Compiled: True
# ---------------------------
# | Axes
# ---------------------------
# 0. setosa (3) [iris_0, iris_1, iris_2]
# 1. versicolor (3) [iris_50, iris_51, iris_52]
# 2. virginica (3) [iris_100, iris_101, iris_102]
# Plot Hive
color_negative, color_positive = ('#278198', '#dc3a23')
edge_colors = hive.weights.map(lambda w: {True:color_negative, False:color_positive}[w < 0])
legend = dict(zip(["Positive", "Negative"], [color_positive, color_negative]))
fig, axes = hive.plot(func_edgeweight=lambda w: (w**10), edge_colors=edge_colors, style="light", show_node_labels=True, title="Iris", legend=legend)
"""
# Placeholders
self.nodes_in_hive = None
self.edges_in_hive = None
self.weights = None
# self.graph = None
self.name = name
self.node_type = node_type
self.edge_type = edge_type
# Propogate
if isinstance(data, pd.DataFrame):
data = self._from_pandas_adjacency(data, name, node_type, edge_type, tol) # -> Symmetric
if isinstance(data, Symmetric):
self._from_symmetric(data, name, node_type, edge_type)
if all([
(self.nodes_in_hive is None),
(self.edges_in_hive is None),
(self.weights is None),
]):
assert is_graph(data), "`data` must be either a pd.DataFrame adjacency, a Symmetric, or a networkx graph object" # Last resort, use this if Symmetric isn't provided
self._from_networkx(data)
# Initialize
self.axes = OrderedDict()
self.node_mapping_ = OrderedDict()
self.compiled = False
self.axis_type = axis_type
self.description = description
self.version = __version__
self.number_of_nodes_ = None
self.memory = self.weights.memory_usage()
self.__synthesized__ = datetime.datetime.utcnow()
def _from_pandas_adjacency(self, data, name, node_type, edge_type, tol):
# Convert pd.DataFrame into a Symmetric object
assert isinstance(data, pd.DataFrame), "Must be a 2-dimensional pandas DataFrame object"
assert is_symmetrical(data, tol=tol), "DataFrame must be symmetrical. Please force symmetry with (X + X.T)/2"
return Symmetric(data=data, name=name, node_type=node_type, edge_type=edge_type, association="network", nans_ok=False, tol=tol)
def _from_symmetric(self, data, name, node_type, edge_type):
# Propogate information from Symmetric
if name is None:
self.name = data.name
if node_type is None:
self.node_type = data.node_type
if edge_type is None:
self.edge_type = data.edge_type
self.nodes_in_hive = data.nodes
self.edges_in_hive = data.edges
self.weights = data.weights
# return data.to_networkx()
def _from_networkx(self, graph):
# Propogate information from graph
for attr in ["name", "node_type", "edge_type"]:
if getattr(self, attr) is None:
if attr in graph.graph:
value =graph.graph[attr]
if bool(value):
setattr(self, attr, value)
# if self.graph is None:
# self.graph = graph
if self.nodes_in_hive is None:
self.nodes_in_hive = pd.Index(sorted(graph.nodes()))
if (self.edges_in_hive is None) or (self.weights is None):
self.weights = dict()
for edge_data in graph.edges(data=True):
edge = frozenset(edge_data[:-1])
weight = edge_data[-1]["weight"]
self.weights[edge] = weight
self.weights = pd.Series(self.weights, name="Weights")#.sort_index()
self.edges_in_hive = pd.Index(self.weights.index, name="Edges")
# Built-ins
def __repr__(self):
pad = 4
header = format_header("Hive(Name:{}, dtype: {})".format(self.name, self.weights.dtype),line_character="=")
n = len(header.split("\n")[0])
fields = [
header,
pad*" " + "* Number of nodes ({}): {}".format(self.node_type, len(self.nodes_in_hive)),
pad*" " + "* Number of edges ({}): {}".format(self.edge_type, len(self.edges_in_hive)),
pad*" " + "* Axes ({}): {}".format(self.axis_type, list(self.axes.keys())),
pad*" " + "* Memory: {}".format(format_memory(self.memory)),
pad*" " + "* Compiled: {}".format(self.compiled),
]
if self.compiled:
for field in map(lambda line:pad*" " + line, format_header("| Axes", "-", n=n-pad).split("\n")):
fields.append(field)
for field in map(lambda line: pad*" " + str(line), repr(self.axes_preview_).split("\n")[:-1]):
fields.append(field)
return "\n".join(fields)
def __call__(self, name_axis=None):
return self.get_axis_data(name_axis=name_axis)
# def __getitem__(self, key):
# return self.weights[key]
# Add axis to HivePlot
def add_axis(self, name_axis, nodes, sizes=None, colors=None, split_axis:bool=False, node_style="o", scatter_kws=dict()):
"""
Add or update axis
nodes: Can be either an iterable of nodes or a dict-like with node positions {node:position}
"""
# Initialize axis container
self.axes[name_axis] = defaultdict(dict)
self.axes[name_axis]["colors"] = None
self.axes[name_axis]["sizes"] = None
self.axes[name_axis]["split_axis"] = split_axis
self.axes[name_axis]["node_style"] = node_style
self.axes[name_axis]["scatter_kws"] = scatter_kws
# Assign (preliminary) node positions
if is_nonstring_iterable(nodes) and not isinstance(nodes, pd.Series):
nodes = pd.Series(np.arange(len(nodes)), index=nodes)
if is_dict(nodes):
nodes = pd.Series(nodes)
nodes = nodes.sort_values()
assert set(nodes.index) <= set(self.nodes_in_hive), "All nodes in axis should be in the Hive and they aren't..."
# Set values
self.axes[name_axis]["node_positions"] = pd.Series(nodes, name=(name_axis, "node_positions"))
self.axes[name_axis]["nodes"] = pd.Index(nodes.index, name=(name_axis, "nodes"))
self.axes[name_axis]["number_of_nodes"] = nodes.size
# Group node with axis
self.node_mapping_.update(dict_build([(name_axis, self.axes[name_axis]["nodes"])]))
# Assign component colors
if colors is None:
colors = "white"
if is_color(colors):
colors = dict_build([(colors, self.axes[name_axis]["nodes"])])
if is_dict(colors):
colors = pd.Series(colors)
if not is_color(colors):
if is_nonstring_iterable(colors) and not isinstance(colors, pd.Series):
colors = pd.Series(colors, index=self.axes[name_axis]["nodes"])
self.axes[name_axis]["colors"] = pd.Series(colors[self.axes[name_axis]["nodes"]], name=(name_axis, "node_colors"))
# Assign component sizes
if sizes is None:
sizes = 100
if is_number(sizes):
sizes = dict_build([(sizes, self.axes[name_axis]["nodes"])])
if is_dict(sizes):
sizes = pd.Series(sizes)
self.axes[name_axis]["sizes"] = pd.Series(sizes[nodes.index], name=(name_axis, "node_sizes"))
# Compile the data for plotting
def compile(self, axes_theta_degrees=None, split_theta_degree=None, inner_radius=None, theta_center=90, axis_normalize=True, axis_maximum=1000):
"""
inner_radius should be similar units to axis_maximum
"""
number_of_axes = len(self.axes)
if split_theta_degree is None:
split_theta_degree = (360/number_of_axes)*0.16180339887
self.split_theta_degree = split_theta_degree
self.axis_maximum = axis_maximum
if inner_radius is None:
if axis_normalize:
inner_radius = (1/5)*self.axis_maximum
else:
inner_radius = 3
self.inner_radius = inner_radius
self.outer_radius = self.axis_maximum - self.inner_radius
self.theta_center = theta_center
# Adjust all of the node_positions
for i, query_axis in enumerate(self.axes):
# If the axis is normalized, force everything between the minimum position and the `outer_radius` (that is, the axis_maximum - inner_radius. This ensures the axis_maximum is actually what is defined)
if axis_normalize:
node_positions = self.axes[query_axis]["node_positions"]
self.axes[query_axis]["node_positions_normalized"] = normalize_minmax(node_positions, feature_range=(min(node_positions), self.outer_radius) )
else:
self.axes[query_axis]["node_positions_normalized"] = self.axes[query_axis]["node_positions"].copy()
# Offset the node positions by the inner radius
self.axes[query_axis]["node_positions_normalized"] = self.axes[query_axis]["node_positions_normalized"] + self.inner_radius
# Axis thetas
if axes_theta_degrees is not None:
assert hasattr(axes_theta_degrees, "__iter__"), "`axes_theta_degrees` must be either None or an iterable of {} angles in degrees".format(number_of_axes)
assert len(axes_theta_degrees) == number_of_axes, "`axes_theta_degrees` must be either None or an iterable of {} angles in degrees".format(number_of_axes)
if axes_theta_degrees is None:
axes_theta_degrees = list()
for i in range(number_of_axes):
theta_add = (360/number_of_axes)*i
axes_theta_degrees.append(theta_add)
# Adjust all of the axes angles
for i, query_axis in enumerate(self.axes):
# If the axis is in single mode
theta_add = axes_theta_degrees[i] #(360/number_of_axes)*i
if not self.axes[query_axis]["split_axis"]:
# If the query axis is the first then the `theta_add` will be 0
self.axes[query_axis]["theta"] = np.array([self.theta_center + theta_add])
else:
self.axes[query_axis]["theta"] = np.array([self.theta_center + theta_add - split_theta_degree,
self.theta_center + theta_add + split_theta_degree])
self.axes[query_axis]["theta"] = np.deg2rad(self.axes[query_axis]["theta"])
self.axes_theta_degrees_ = dict(zip(self.axes.keys(), axes_theta_degrees))
# Nodes
self.nodes_ = list()
for axes_data in self.axes.values():
self.nodes_ += list(axes_data["nodes"])
assert len(self.nodes_) == len(set(self.nodes_)), "Axes cannot contain duplicate nodes"
self.number_of_nodes_ = len(self.nodes_)
# Edges
self.edges_ = list(map(frozenset, combinations(self.nodes_, r=2)))
self.number_of_edges_ = len(self.edges_)
# Axes
self.number_of_axes_ = number_of_axes
self.axes_preview_ = pd.Series(dict(zip(self.axes.keys(), map(lambda data:list(data["nodes"]), self.axes.values()))), name="Axes preview")
self.axes_preview_.index = self.axes_preview_.index.map(lambda name_axis: "{}. {} ({})".format(self.axes_preview_.index.get_loc(name_axis), name_axis, len(self.axes_preview_[name_axis])))
# Compile
self.compiled = True
def _get_quadrant_info(self, theta_representative):
# 0/360
if theta_representative in np.deg2rad([0,360]):
horizontalalignment = "left"
verticalalignment = "center"
quadrant = 0
# 90
if theta_representative == np.deg2rad(90):
horizontalalignment = "center"
verticalalignment = "bottom"
quadrant = 90
# 180
if theta_representative == np.deg2rad(180):
horizontalalignment = "right"
verticalalignment = "center"
quadrant = 180
# 270
if theta_representative == np.deg2rad(270):
horizontalalignment = "center"
verticalalignment = "top"
quadrant = 270
# Quadrant 1
if np.deg2rad(0) < theta_representative < np.deg2rad(90):
horizontalalignment = "left"
verticalalignment = "bottom"
quadrant = 1
# Quadrant 2
if np.deg2rad(90) < theta_representative < np.deg2rad(180):
horizontalalignment = "right"
verticalalignment = "bottom"
quadrant = 2
# Quadrant 3
if np.deg2rad(180) < theta_representative < np.deg2rad(270):
horizontalalignment = "right"
verticalalignment = "top"
quadrant = 3
# Quadrant 4
if np.deg2rad(270) < theta_representative < np.deg2rad(360):
horizontalalignment = "left"
verticalalignment = "top"
quadrant = 4
return quadrant, horizontalalignment, verticalalignment
def plot(self,
title=None,
# Arc style
arc_style="curved",
# Show components
show_axis=True,
show_nodes=True,
show_edges=True,
show_border = False,
show_axis_labels=True,
show_node_labels=False,
show_polar_grid=False,
show_cartesian_grid=False,
node_label_mapping=None,
# Colors
axis_color=None,
edge_colors=None,
background_color=None,
# Alphas
edge_alpha=0.5,
node_alpha=0.8,
axis_alpha=0.618,
# Keywords
title_kws=dict(),
axis_kws=dict(),
axis_label_kws=dict(),
node_label_kws=dict(),
node_label_line_kws=dict(),
node_kws=dict(),
edge_kws=dict(),
legend_kws=dict(),
legend_label_kws=dict(),
# Figure
style="dark",
edge_linestyle="-",
axis_linestyle="-",
node_label_linestyle=":",
legend_markerstyle="s",
legend=None,
# polar=True,
ax_polar=None,
ax_cartesian=None,
clip_edgeweight=5,
granularity=100,
func_edgeweight=None,
figsize=(10,10),
# Padding
pad_axis_label = "infer",
pad_node_label = 5,
# pad_node_label_line = 0,
# node_label_position_vertical_axis="right",
):
if node_label_mapping is None:
node_label_mapping = dict()
polar = True #! Address this in future versions
assert self.compiled == True, "Please `compile` before plotting"
accepted_arc_styles = {"curved", "linear"}
assert_acceptable_arguments(arc_style, accepted_arc_styles)
if arc_style == "linear":
granularity = 2
if style in ["dark", "black", "night", "sith"]:
style = "dark_background"
if style in ["light", "white", "day", "jedi"] :
style = "seaborn-white"
with plt.style.context(style):
# Create figure
if ax_polar is not None:
fig = plt.gcf()
figsize = fig.get_size_inches()
# Polar canvas
if ax_polar is None:
fig = plt.figure(figsize=figsize)
ax_polar = plt.subplot(111, polar=polar)
# Cartesian canvas
if ax_cartesian is None:
ax_cartesian = fig.add_axes(ax_polar.get_position(), frameon=False, polar=False)
if polar == True:
y = 0.95
if polar == False:
y = 1.1
# Remove clutter from plot
ax_polar.grid(show_polar_grid)
ax_polar.set_xticklabels([])
ax_polar.set_yticklabels([])
ax_cartesian.grid(show_cartesian_grid)
ax_cartesian.set_xticklabels([])
ax_cartesian.set_yticklabels([])
if not show_border: # Not using ax.axis('off') becuase it removes facecolor
for spine in ax_polar.spines.values():
spine.set_visible(False)
for spine in ax_cartesian.spines.values():
spine.set_visible(False)
node_padding = " "*pad_node_label
# Default colors
if axis_color is None:
if style == "dark_background":
axis_color = "white"
axis_label_color = "white"
else:
axis_color = "darkslategray"
axis_label_color = "black"
if background_color is not None:
ax_polar.set_facecolor(background_color)
ax_cartesian.set_facecolor(background_color)
# Title
_title_kws = {"fontweight":"bold", "y":y}
_title_kws.update(title_kws)
if "fontsize" not in _title_kws:
_title_kws["fontsize"] = figsize[0] * np.sqrt(figsize[0])/2 + 2
# Axis labels
_axis_label_kws = {"fontweight":None, "color":axis_label_color}
_axis_label_kws.update(axis_label_kws)
if "fontsize" not in _axis_label_kws:
_axis_label_kws["fontsize"] = figsize[0] * np.sqrt(figsize[0])/2
# Node labels
_node_label_kws = {"fontsize":12}
_node_label_kws.update(node_label_kws)
_node_label_line_kws = {"linestyle":node_label_linestyle, "color":axis_color}
_node_label_line_kws.update(node_label_line_kws)
# Axis plotting
_axis_kws = {"linewidth":3.382, "alpha":axis_alpha, "color":axis_color, "linestyle":axis_linestyle, "zorder":0}
_axis_kws.update(axis_kws)
# Edge plotting
_edge_kws = {"alpha":edge_alpha, "linestyle":edge_linestyle} # "zorder", _node_kws["zorder"]+1}
_edge_kws.update(edge_kws)
# Node plotting
_node_kws = {"linewidth":1.618, "edgecolor":axis_color, "alpha":node_alpha,"zorder":2}
_node_kws.update(node_kws)
# Legend plotting
_legend_label_kws = {"marker":legend_markerstyle, "markeredgecolor":axis_color, "markeredgewidth":1, "linewidth":0}
_legend_label_kws.update(legend_label_kws)
_legend_kws = {'fontsize': 15, 'frameon': True, 'facecolor': background_color, 'edgecolor': axis_color, 'loc': 'center left', 'bbox_to_anchor': (1.1, 0.5), "markerscale":1.6180339887}
_legend_kws.update(legend_kws)
# Edge info
edges = self.weights[self.edges_].abs()
if func_edgeweight is not None:
edges = func_edgeweight(edges)
if clip_edgeweight is not None:
edges = np.clip(edges, a_min=None, a_max=clip_edgeweight)
if edge_colors is None:
edge_colors = axis_color
if is_color(edge_colors):
edge_colors = dict_build([(edge_colors, edges.index)])
if is_dict(edge_colors):
edge_colors = pd.Series(edge_colors)
if not is_color(edge_colors):
if is_nonstring_iterable(edge_colors) and not isinstance(edge_colors, pd.Series):
edge_colors = pd.Series(edge_colors, index=edges.index)
edge_colors = pd.Series(edge_colors[edges.index], name="edge_colors").to_dict()
# Axes label pads
if pad_axis_label is None:
pad_axis_label = 0
if pad_axis_label == "infer":
pad_axis_label = list()
for i, (name_axis, axes_data) in enumerate(self.axes.items()):
node_positions = axes_data["node_positions_normalized"]
pad_axis_label.append(0.06180339887*(node_positions.max() - node_positions.min()))
if isinstance(pad_axis_label, (int,float)):
pad_axis_label = [pad_axis_label]*self.number_of_axes_
assert hasattr(pad_axis_label, "__iter__"), "`pad_axis_label` must be either None, 'infer', a scalar, or an iterable of {} pads".format(self.number_of_axes_)
assert len(pad_axis_label) == self.number_of_axes_, "`pad_axis_label` must be either None, 'infer', a scalar, or an iterable of {} pads".format(self.number_of_axes_)
# ================
# Plot edges
# ================
# Draw edges
if show_edges:
for (edge, weight) in edges.iteritems():
if abs(weight) > 0:
node_A, node_B = edge
name_axis_A = self.node_mapping_[node_A]
name_axis_B = self.node_mapping_[node_B]
# Check axis
intraaxis_edge = (name_axis_A == name_axis_B)
# Within axis edges
if intraaxis_edge:
name_consensus_axis = name_axis_A
# Plot edges on split axis
if self.axes[name_consensus_axis]["split_axis"]:
# print(type(edge), edge, edge in edge_colors)
color = edge_colors[edge]
# Draw edges between same axis
# Node A -> B
ax_polar.plot([*self.axes[name_consensus_axis]["theta"]], # Unpack
[self.axes[name_consensus_axis]["node_positions_normalized"][node_A], self.axes[name_consensus_axis]["node_positions_normalized"][node_B]],
c=color,
linewidth=weight,
**_edge_kws,
)
# Node B -> A
ax_polar.plot([*self.axes[name_consensus_axis]["theta"]], # Unpack
[self.axes[name_consensus_axis]["node_positions_normalized"][node_B], self.axes[name_consensus_axis]["node_positions_normalized"][node_A]],
c=color,
linewidth=weight,
**_edge_kws,
)
# Between axis
if not intraaxis_edge:
axes_ordered = list(self.axes.keys())
terminal_axis_edge = False
# Last connected to the first
if (name_axis_A == axes_ordered[-1]):
if (name_axis_B == axes_ordered[0]):
thetas = [self.axes[name_axis_A]["theta"].max(), self.axes[name_axis_B]["theta"].min()]
radii = [self.axes[name_axis_A]["node_positions_normalized"][node_A], self.axes[name_axis_B]["node_positions_normalized"][node_B]]
terminal_axis_edge = True
# First connected to the last
if (name_axis_A == axes_ordered[0]):
if (name_axis_B == axes_ordered[-1]):
thetas = [self.axes[name_axis_B]["theta"].max(), self.axes[name_axis_A]["theta"].min()]
radii = [self.axes[name_axis_B]["node_positions_normalized"][node_B], self.axes[name_axis_A]["node_positions_normalized"][node_A]]
terminal_axis_edge = True
if not terminal_axis_edge:
if axes_ordered.index(name_axis_A) < axes_ordered.index(name_axis_B):
thetas = [self.axes[name_axis_A]["theta"].max(), self.axes[name_axis_B]["theta"].min()]
if axes_ordered.index(name_axis_A) > axes_ordered.index(name_axis_B):
thetas = [self.axes[name_axis_A]["theta"].min(), self.axes[name_axis_B]["theta"].max()]
radii = [self.axes[name_axis_A]["node_positions_normalized"][node_A], self.axes[name_axis_B]["node_positions_normalized"][node_B]]
# Radii node positions
#
# Necessary to account for directionality of edge.
# If this doesn't happen then there is a long arc
# going counter clock wise instead of clockwise
# If straight lines were plotted then it would be thetas and radii before adjusting for the curve below
if terminal_axis_edge:
theta_end_rotation = thetas[0]
theta_next_rotation = thetas[1] + np.deg2rad(360)
thetas = [theta_end_rotation, theta_next_rotation]
# Create grid for thetas
t = np.linspace(start=thetas[0], stop=thetas[1], num=granularity)
# Get radii for thetas
radii = interp1d(thetas, radii)(t)
thetas = t
ax_polar.plot(thetas,
radii,
c=edge_colors[edge],
linewidth=weight,
**_edge_kws,
)
# ===================
# Plot axis and nodes
# ===================
for i, (name_axis, axes_data) in enumerate(self.axes.items()):
# Retrieve
node_positions = axes_data["node_positions_normalized"]
colors = axes_data["colors"].tolist() # Needs `.tolist()` for Matplotlib version < 2.0.0
sizes = axes_data["sizes"].tolist()
# Positions
# =========
# Get a theta value for each node on the axis
if not axes_data["split_axis"]:
theta_single = np.repeat(axes_data["theta"][0], repeats=node_positions.size)
theta_vectors = [theta_single]
# Split the axis so within axis interactions can be visualized
if axes_data["split_axis"]:
theta_split_A = np.repeat(axes_data["theta"][0], repeats=node_positions.size)
theta_split_B = np.repeat(axes_data["theta"][1], repeats=node_positions.size)
theta_vectors = [theta_split_A, theta_split_B]
theta_representative = np.mean(axes_data["theta"])
# Quadrant
# =======
quadrant, horizontalalignment, verticalalignment = self._get_quadrant_info(theta_representative)
# Plot axis
# =========
if show_axis:
for theta in axes_data["theta"]:
ax_polar.plot(
2*[theta],
[min(node_positions), max(node_positions)],
**_axis_kws,
)
# Plot axis labels
# ================
if show_axis_labels:
ax_polar.text(
s = name_axis,
x = theta_representative,
y = node_positions.size + node_positions.max() + pad_axis_label[i],
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
**_axis_label_kws,
)
# Plot nodes
# ========
if show_nodes:
for theta in theta_vectors:
# Filled
ax_polar.scatter(
theta,
node_positions,
c=axes_data["colors"],
s=axes_data["sizes"],
marker=axes_data["node_style"],
**_node_kws,
)
# Empty
ax_polar.scatter(
theta,
node_positions,
facecolors='none',
s=axes_data["sizes"],
marker=axes_data["node_style"],
alpha=1,
zorder=_node_kws["zorder"]+1,
# zorder=-1,
edgecolor=_node_kws["edgecolor"],
linewidth=_node_kws["linewidth"],
)
# Plot node labels
# ================
index_labels = node_positions.index
if is_nonstring_iterable(show_node_labels):
index_labels = pd.Index(show_node_labels) & index_labels
show_node_labels = True
if show_node_labels:
if not polar:
warnings.warn("`show_node_labels` is not available in version: {}".format(__version__))
else:
horizontalalignment_nodelabels = None
for name_node, r in node_positions[index_labels].iteritems():
#! Address this in future version
# # Vertical axis case
# vertical_axis_left = (quadrant in {90,270}) and (node_label_position_vertical_axis == "left")
# vertical_axis_right = (quadrant in {90,270}) and (node_label_position_vertical_axis == "right")
# if vertical_axis_left:
# horizontalalignment_nodelabels = "right" # These are opposite b/c nodes should be on the left which means padding on the right
# if vertical_axis_right:
# horizontalalignment_nodelabels = "left" # Vice versa
# Pad on the right and push label to left
# if (quadrant == 3) or vertical_axis_left:
# node_label = "{}{}".format(name_node,node_padding)
# theta_anchor_padding = max(axes_data["theta"])
# # Pad on left and push label to the right
# if (quadrant == 4) or vertical_axis_right:
# node_label = "{}{}".format(node_padding,name_node)
# theta_anchor_padding = min(axes_data["theta"])
# theta_anchor is where the padding ends up
# Relabel node
name_node = node_label_mapping.get(name_node, name_node)
# Pad on the right and push label to left
if quadrant in {2,3, 180} :
node_label = "{}{}".format(name_node,node_padding)
theta_anchor_padding = max(axes_data["theta"])
x, y = polar_to_cartesian(r, theta_anchor_padding)
xs_line = [-self.axis_maximum, x]
x_text = -self.axis_maximum
horizontalalignment_nodelabels = "right"
# Pad on the right and push label to left
if quadrant in {0, 1,4, 90, 270} :
node_label = "{}{}".format(node_padding,name_node)
theta_anchor_padding = min(axes_data["theta"])
x, y = polar_to_cartesian(r, theta_anchor_padding)
xs_line = [x, self.axis_maximum]
x_text = self.axis_maximum
horizontalalignment_nodelabels = "left"
# Node label line
ax_cartesian.plot(
xs_line,
[y, y],
**_node_label_line_kws,
)
if all([
not axes_data["split_axis"],
quadrant in {0,180},
]):
warnings.warn("Cannot plot node labels when axis is not split for angles 0 or 180 in version: {}".format(__version__))
else:
# Node label text
ax_cartesian.text(
x=x_text,
y=y,
s=node_label,
horizontalalignment=horizontalalignment_nodelabels,
verticalalignment="center",
**_node_label_kws,
)
# Adjust limits
# ===========
r_max = max(ax_polar.get_ylim())
if title is not None:
fig.suptitle(title, **_title_kws)
ax_cartesian.set_xlim(-r_max, r_max)
ax_cartesian.set_ylim(-r_max, r_max)
# Plot Legend
# ===========
if legend is not None:
assert is_dict_like(legend), "`legend` must be dict-like"
handles = list()
for label, color in legend.items():
handle = plt.Line2D([0,0],[0,0], color=color, **_legend_label_kws)
handles.append(handle)
ax_cartesian.legend(handles, legend.keys(), **_legend_kws)
return fig, [ax_polar, ax_cartesian]
# Axis data
def get_axis_data(self, name_axis=None, field=None):
if name_axis is None:
print("Available axes:", set(self.axes.keys()), file=sys.stderr)
else:
assert name_axis in self.axes, "{} is not in the axes".format(name_axis)
df = pd.DataFrame(dict_filter(self.axes[name_axis], ["colors", "sizes", "node_positions", "node_positions_normalized"]))
if self.compiled:
df["theta"] = [self.axes[name_axis]["theta"]]*df.shape[0]
df.index.name = name_axis
if field is not None:
return df[field]
else:
return df
# Connections
def get_axis_connections(self, name_axis=None, sort_by=None, ascending=False, return_multiindex=False):
assert self.compiled == True, "Please `compile` before getting connections"
if name_axis is not None:
assert name_axis in self.axes, "{} is not in the available axes for `name_axis`. Please add and recompile or choose one of the available axes:\n{}".format(name_axis, list(self.axes.keys()))
df_dense = condensed_to_dense(self.weights, index=self.nodes_)
df_connections = df_dense.groupby(self.node_mapping_, axis=1).sum()
if name_axis is not None:
idx_axis_nodes = self.axes[name_axis]["nodes"]
df_connections = df_connections.loc[idx_axis_nodes,:]
df_connections.index.name = name_axis
if sort_by is not None:
assert sort_by in self.axes, f"{sort_by} is not in the available axes for `sort_by`. Please add and recompile or choose one of the available axes:\n{self.axes.keys()}"
df_connections = df_connections.sort_values(by=sort_by, axis=0, ascending=ascending)
if return_multiindex:
df_connections.index = pd.MultiIndex.from_tuples(df_connections.index.map(lambda id_node: (self.node_mapping_[id_node], id_node)))
return df_connections
# Stats
# =====
def compare(self, data, func_stats=mannwhitneyu, name_stat=None, tol=1e-10):
"""
Compare the connections between 2 Hives or adjacencies using the specified axes assignments.
"""
assert self.compiled == True, "Please `compile` before comparing adjacencies"
assert_acceptable_arguments(type(data), {pd.DataFrame, Symmetric, Hive})
if isinstance(data, (Hive, Symmetric)):
df_dense__query = condensed_to_dense(data.weights)
if isinstance(data, pd.DataFrame):
assert is_symmetric(data, tol=tol)
df_dense__query = data
assert set(self.nodes_) <= set(df_dense__query.index), "`data` must contain all nodes from reference Hive"
df_dense__reference = self.to_dense()
d_statsdata = OrderedDict()
# Get nodes
d_statsdata = OrderedDict()
for id_node in df_dense__reference.index:
# Get axis groups
stats_axes_data = list()
for name_axis in self.axes:
idx_axis_nodes = self.axes[name_axis]["nodes"]
n = self.axes[name_axis]["number_of_nodes"]
# Get comparison data
u = df_dense__reference.loc[id_node,idx_axis_nodes]
v = df_dense__query.loc[id_node,idx_axis_nodes]
# Get stats
stat, p = func_stats(u,v)
if name_stat is None:
if hasattr(func_stats, "__name__"):
name_stat = func_stats.__name__
else:
name_stat = str(func_stats)
# Store data
row = pd.Series(OrderedDict([
((name_axis, "number_of_nodes"), n),
((name_axis, "∑(reference)"), u.sum()),
((name_axis, "∑(query)"), v.sum()),
((name_axis, name_stat), stat),
((name_axis, "p_value"), p)
]))
stats_axes_data.append(row)
# Build pd.DataFrame
d_statsdata[id_node] = | pd.concat(stats_axes_data) | pandas.concat |
import pandas as pd
from sklearn.preprocessing import LabelEncoder, StandardScaler
import gc
import numpy as np
train_data = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\train-v3.csv')
val_data = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\valid-v3.csv')
test_data = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\test-v3.csv')
# 合併所有資料,對年月日做onehot預處理
test_data['price'] = -1
data = pd.concat((train_data, val_data, test_data))
cate_feature = ['sale_yr', 'sale_month', 'sale_day']
a = ['sale_yr', 'sale_month']
for item in a:
data[item] = LabelEncoder().fit_transform(data[item])
item_dummies = pd.get_dummies(data[item])
item_dummies.columns = [
item + str(i + 1) for i in range(item_dummies.shape[1])
]
data = | pd.concat([data, item_dummies], axis=1) | pandas.concat |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, | Timestamp('2000-01-03 00:00:00') | pandas.Timestamp |
import os
import pandas as pd
from bs4 import BeautifulSoup
DATA_FOLDER = "../data/Energy_Price"
RESULT_FILENAME = "../data/sm_price/price_time.csv"
# Script to collect data in dataframes and save it in the data folder
def load_xml(data_file):
print(data_file)
with open(data_file, 'r') as src:
soup = BeautifulSoup(src, 'lxml')
return soup
def get_dataframes(data_files):
dframe = | pd.DataFrame(columns=["lmp_value", "time"]) | pandas.DataFrame |
from pybaseball import schedule_and_record
import pandas as pd
import numpy as np
import datetime
import pickle
def get_games(date):
"""
takes a datetime object
returns a list of game matchups as strings
"""
year = date.year
date = pd.to_datetime(date)
teams = ['OAK', 'LAD', 'TOR', 'PHI', 'ATL', 'LAA', 'BAL', 'HOU', 'BOS',
'CIN', 'SD', 'TEX', 'PIT', 'COL', 'STL', 'CHW', 'CHC', 'TB',
'MIN', 'DET', 'ARI', 'SF', 'KC', 'WSN', 'SEA', 'MIA', 'NYY',
'MIL', 'CLE', 'NYM']
dates_games = []
for team in teams:
data = schedule_and_record(year, team)
data['year'] = np.ones(data.shape[0], int) * year
data['month'] = data['Date'].str.split(' ').apply(lambda x: x[1])
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_mapping = dict(zip(months, list(range(1,13))))
data['month'] = data['month'].map(month_mapping)
data['day'] = data['Date'].str.split(' ').apply(lambda x: x[2])
data['Date'] = (data['year'].astype(str) + ', ' +
data['month'].astype(str) + ', ' +
data['day'].astype(str))
data['Date'] = | pd.to_datetime(data['Date']) | pandas.to_datetime |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
| tm.assert_series_equal(s, expected) | pandas._testing.assert_series_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
| pdt.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 23:16:15 2020
@author: Eli
"""
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import cross_val_predict
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import confusion_matrix
from sklearn import svm
from joblib import dump
import os
import pandas as pd
import numpy as np
class Classifier:
"""
Simple class for keeping track of data and running
crossvalidation on sklearn classifiers
"""
def __init__(self, model):
self.data = []
self.set_model(model)
def set_model(self, model):
"""
Sets the model for classification
Parameters
----------
model : sklearn classifier
Returns
-------
None.
"""
self.model = model
def load_data(self, data, cols = None):
"""
Loads data and appends it to the internal dataset
Parameters
----------
data : pd dataframe or path to load one
feature matrix, where the column 'label' has the class
cols : list, optional
list of columns to keep. If none is given then keeps all
Returns
-------
None.
"""
if isinstance(data, str):
data = pd.read_csv(data)
data_to_append = data.copy()
#get column subset if needed
if cols is not None:
cols_ = cols[:]
if 'user_id' not in cols:
cols_.append('user')
if 'label' not in cols:
cols_.append('label')
if 'dataset' not in cols:
cols_.append('dataset')
data_to_append = data_to_append[cols_]
self.data.append(data_to_append)
def crossval(self, split_col = 'user', cols = None, col_score_split=['user','label'],
n_jobs = 1):
"""
Creates a crossvalidated classifier
Parameters
----------
split_col : str , optional
column to perform the crossvalidation over
cols : list, optional
list of columns to use in the classifier. If none is given then keeps all
col_score_split: list of str
list of columns to calculate the score breakdown on
n_jobs: int
number of cores to give to sklearn. Colin set to 2, eli and kai
with tiny computers set to 1
Returns
-------
a dictionary of accuracy breakdowns by different categories
"""
#concatenate all of the data together
if len(self.data) > 1:
all_data = pd.concat(self.data, axis=0, ignore_index=True, copy=False)
elif len(self.data) == 1:
all_data = self.data[0]
else:
raise ValueError("I gots no data :'(")
#select columns
y = all_data['label'].values
groups = all_data[split_col].values
cv = GroupKFold(n_splits=len(np.unique(groups)))
if cols is None:
cols_ = [c for c in all_data.columns if c not in ['label','dataset','user']]
else:
cols_ = cols
X = all_data[cols_].to_numpy()
print("Beginning model evaluation...")
# scores = cross_validate(estimator = self.model,
# X = X, y = y, groups=groups,
# cv=cv,
# return_train_score=False,
# return_estimator=True, n_jobs=2)
preds = cross_val_predict(estimator=self.model,
X=X, y=y, groups=groups,
cv=cv, n_jobs=n_jobs)
# scores are in the order of the groups, so the first row out is the
# result of training on the other groups, and testing on the first group
#self.scores = scores
self.preds = preds
self.y_true = y
#do a score breakdown by unique value
scores = {}
for col in col_score_split:
unique_vals = np.unique(all_data[col])
accuracy = np.zeros(len(unique_vals))
for i,val in enumerate(unique_vals):
entries = all_data[col] == val
accuracy[i] = np.sum(self.preds[entries] == y[entries])/np.sum(entries)
scores[col] = pd.DataFrame({col:unique_vals,'accuracy':accuracy})
return scores
def save_crossval_model(self, save_path):
dump(self.scores, save_path)
def wrapper(path_in, split_col, savePath, col_score_split=['user','label'],
n_jobs = 1):
"""
wrapper for cross val classifier: applies 3 models, to the accerleration, and gyroscope data
Parameters
----------
path : string or list of str, dataframe or list of dataframe
path to the csv of interest for running the classifiers.
split_col: string
which column to use for cross validataion
savePath: string
where to save the csv
col_score_split: list of str
list of columns to calculate the score breakdown on
n_jobs: int
number of cores to give to sklearn. Colin set to 2, eli and kai
with tiny computers set to 1
Returns
-------
1) list of score breakdown dataframes
2) list of predictions for each model
"""
if isinstance(path_in, str):
data = [pd.read_csv(path_in)]
elif isinstance(path_in, pd.core.frame.DataFrame):
data = [path_in]
elif isinstance(path_in, list):
if isinstance(path_in[0], str):
data = [pd.read_csv(p) for p in path_in]
else:
data = [p for p in path_in]
modelList = []
model = KNeighborsClassifier(n_neighbors=30)
modelList.append(model)
model = ExtraTreesClassifier(n_estimators=100)
modelList.append(model)
# model = svm.SVC()
# modelList.append(model)
# modelNames=['extra-Trees']
modelNames = ['k-NN', 'extra-Trees']#, 'SVC']
scoreDf_list = [pd.DataFrame() for x in col_score_split]
preds_list = []
y_true_list = []
for i_col, col in enumerate(col_score_split):
unique_vals = []
for d_set in data:
unique_vals = unique_vals + list(np.unique(d_set[col]))
scoreDf_list[i_col][col] = list(np.unique(unique_vals))+['mean','stdev']
for idx, model in enumerate(modelList):
clf = Classifier(model)
for d_set in data:
all_feats = d_set.columns
# acc_feats = sorted([f for f in all_feats if 'a_' in f]) #or 'yaw_' in f or 'pitch_' in f or 'roll_' in f]
# clf.load_data(d_set, all_feats)
clf.load_data(d_set)
scores = clf.crossval(split_col=split_col,col_score_split=col_score_split)
preds_list.append(clf.preds)
y_true_list.append(clf.y_true)
for i_col, col in enumerate(col_score_split):
accuracy = scores[col]['accuracy'].values
scoreDf_list[i_col][modelNames[idx]] = list(accuracy)+[np.mean(accuracy), np.std(accuracy)]
for i in range(len(scoreDf_list)):
scoreDf_list[i].to_csv(savePath+"_accuracy_"+col_score_split[i]+'.csv', index = False)
for i,m_name in enumerate(modelNames):
np.savetxt(savePath+"_pred_"+m_name+'.csv',preds_list[i],fmt="%s")
labels=np.unique(y_true_list[i])
#rows are truth columns are predicted
confusion_mat = confusion_matrix(y_true_list[i], preds_list[i],labels =labels)
confusion_mat = | pd.DataFrame(data=confusion_mat, index=labels,columns=labels) | pandas.DataFrame |
import os
import argparse
import numpy as np
import pandas as pd
import nibabel as nib
from ukbb_cardiac.common.cardiac_utils import get_frames
from ukbb_cardiac.common.image_utils import np_categorical_dice
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output_csv', metavar='csv_name', default='DM_table.csv', required=True)
args = parser.parse_args()
print('Creating accuracy spreadsheet file ...')
if os.path.exists(args.output_csv):
os.remove(args.output_csv)
# Record ED ES frames to csv
init = {'Data': [],
'EDLV': [],
'EDLVM': [],
'EDRV': [],
'ESLV': [],
'ESLVM': [],
'ESRV': [],
}
df = pd.DataFrame(init)
root = './demo_image'
folder_list = sorted(os.listdir(root))
for folder in folder_list:
folder_dir = os.path.join(root, folder)
if os.path.exists('{0}/{1}_seg_sa_ED.nii.gz'.format(folder_dir, folder) and ('{0}/{1}_seg_sa_ES.nii.gz'.format(folder_dir, folder))
and ('{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder))):
seg_sa_ED = '{0}/{1}_seg_sa_ED.nii.gz'.format(folder_dir, folder)
seg_sa_ES = '{0}/{1}_seg_sa_ES.nii.gz'.format(folder_dir, folder)
seg_sa_ground_truth = '{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder)
##seg_sa_ED ='{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder) # To see Dice metric between same segmentations is 1
seg_gt = nib.load(seg_sa_ground_truth).get_fdata()
fr = get_frames(seg_gt, 'sa')
seg_ED_gt = seg_gt[:, :, :, fr['ED']]
seg_ES_gt = seg_gt[:, :, :, fr['ES']]
dice_arr = np.zeros(6)
ind = 0
frames = ['ED','ES']
segm = ['LV','LV Myocardium','RV']
for fr in frames:
print('\nFor image {0}, Comparison between: {1} \n'.format(folder, fr))
seg_model = nib.load(seg_sa_ED).get_fdata() if fr == 'ED' else nib.load(seg_sa_ES).get_fdata()
##if fr == 'ED' : seg_model = seg_model[:,:,:,0] # To see Dice metric between same segmentations is 1
for i in range(1,4): # Loop for all segmentations
print('Calculate Dice metric for ',segm[i - 1])
total_seg_ED = np.sum(seg_ED_gt == i, axis=(0, 1, 2))
print('Seg num (', segm[i-1],') in ground truth ED: ',np.max(total_seg_ED))
total_seg_ES = np.sum(seg_ES_gt == i, axis=(0, 1, 2))
print('Seg num (', segm[i-1],') in ground truth ES: ',np.max(total_seg_ES))
total_seg = np.sum(seg_model == i, axis=(0, 1, 2))
print('Seg num in model: ', np.max(total_seg))
#denom = seg_ED_gt.shape[0]* seg_ED_gt.shape[1]* seg_ED_gt.shape[2]
if fr == 'ED':
dice_metric = np_categorical_dice(seg_model, seg_ED_gt, i) if (total_seg + total_seg_ED > 0) else 0
else:
dice_metric = np_categorical_dice(seg_model, seg_ES_gt, i) if (total_seg + total_seg_ES > 0) else 0
print("Dice metric for {0}: %".format(fr) , dice_metric * 100,'\n')
dice_arr[ind] = dice_metric * 100
ind += 1
print('{0} finished'.format(folder))
frames_dict = {'Data': [folder],
'EDLV': [dice_arr[0]],
'EDLVM': [dice_arr[1]],
'EDRV': [dice_arr[2]],
'ESLV': [dice_arr[3]],
'ESLVM': [dice_arr[4]],
'ESRV': [dice_arr[5]],
}
df1 = | pd.DataFrame(frames_dict) | pandas.DataFrame |
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
import os
from pathlib import Path
import numpy as np
import asyncio
import aiohttp
# TODO: crawl by dates range ex) crawl news from when to when
# TODO: Use asyncio to speed up crawler
class Naver_Crawler:
"""
For Korean Stocks
"""
def __init__(self, company_code: str):
self.base_url = 'https://finance.naver.com'
self.company_code = company_code
# assert type(self.company_code) == str
root_dir = os.path.dirname(__file__)
# print(Path(__file__).resolve().parent.parent)
base_dir = os.path.join(root_dir, "crawled_result")
self.company_dir_path = os.path.join(base_dir, self.company_code)
# print(self.company_dir_path)
@staticmethod
async def fetch(session, url):
async with session.get(url) as response:
return await response.text()
async def crawl_price_history(self, maxpage):
async with aiohttp.ClientSession() as session:
url = 'https://finance.naver.com/item/sise_day.nhn?code={code}&page={page}'
futures = [asyncio.ensure_future(self.fetch(session, url.format(code=self.company_code, page=i)))
for i in range(1, maxpage)]
res = await asyncio.gather(*futures)
return res
def crawl_news(self, maxpage=None, page_to_csv=False, full_pages_to_csv=True):
"""
Example URL:
https://finance.naver.com/item/news.nhn?code=095570&page=2
:param maxpage: (int or None) Crawl to `maxpage`page
:param page_to_csv: (Bool) Set True if you want csv for separate pages, otherwise False
:param full_pages_to_csv: (Bool) Set True if you want all pages' result into one csv, otherwise False
:return: (pd.DataFrame) crawled result
"""
# Path Handling
news_dir_path = os.path.join(self.company_dir_path, 'News')
if full_pages_to_csv:
fullPage_dir_path = Path(os.path.join(news_dir_path, 'fullPage'))
fullPage_dir_path.mkdir(parents=True, exist_ok=True)
page = 1
# Tracking first and last page for file name
firstpage = page
last_read_page = None
# Get Last page number
url = self.base_url + '/item/news_news.nhn?code=' + self.company_code
page_nav = BeautifulSoup(requests.get(url).text, 'html.parser').select('.pgRR')[0]
last_page = page_nav.find('a')['href']
match = re.search(r'page=', last_page)
last_page = int(last_page[match.end():])
del match
maxpage = last_page if maxpage is None else maxpage
assert type(maxpage) == int and maxpage <= last_page
result_df = None
while page <= maxpage:
print(f'News Current page: {page}')
url = self.base_url + '/item/news_news.nhn?code=' + self.company_code + '&page=' + str(page)
html_text = requests.get(url).text
html = BeautifulSoup(html_text, "html.parser")
# 1. ==Date==
dates = html.select('.date')
date_result = [date.get_text() for date in dates]
# 2. ==Source==
sources = html.select('.info')
source_result = [source.get_text() for source in sources]
# 3. ==Title==
titles = html.select('.title')
title_result = []
for title in titles:
title = title.get_text()
title = re.sub('\n', '', title)
title_result.append(title)
# 4. ==Link==
links = html.select('.title')
link_result = []
article_body_result = []
for link in links:
article_url = self.base_url + link.find('a')['href']
link_result.append(article_url)
# 5. ==Body==
article_html_text = requests.get(article_url).text
article_html = BeautifulSoup(article_html_text, "html.parser")
body = article_html.find('div', id='news_read')
# print(body)
body = body.text # type --> string
body = body.replace("\n", "").replace("\t", "")
# TODO: Reminder! body 내 특수문자 다 없애기 -> 모델에 넣을떄 하자
article_body_result.append(body)
# 6. TODO: ==Reaction==
# reaction_space = article_html.find('ul', class_='u_likeit_layer _faceLayer')
#
# good_reaction_count = int(reaction_space.find('li', class_='u_likeit_list good') \
# .find('span', class_='u_likeit_list_count _count').text)
#
# warm_reaction_count = int(reaction_space.find('li', class_='u_likeit_list warm') \
# .find('span', class_='u_likeit_list_count _count').text)
#
# sad_reaction_count = int(reaction_space.find('li', class_='u_likeit_list sad') \
# .find('span', class_='u_likeit_list_count _count').text)
#
# angry_reaction_count = int(reaction_space.find('li', class_='u_likeit_list angry') \
# .find('span', class_='u_likeit_list_count _count').text)
#
# want_reaction_count = int(reaction_space.find('li', class_='u_likeit_list want') \
# .find('span', class_='u_likeit_list_count _count').text)
# print(reaction_space)
# print("="*20)
# 7. TODO: ==Commentary==
# comments = article_html.find_all(
# lambda tag: tag.name == 'span' and tag.get('class') == 'u_cbox_contents')
# print(comments)
# To Dataframe and To CSV (optional)
page_result = {
"Date": date_result, "Source": source_result, "Title": title_result,
"Link": link_result, "Body": article_body_result,
# "good_count": good_reaction_count,
# "warm_count": warm_reaction_count,
# "sad_count": sad_reaction_count,
# "angry_count": angry_reaction_count,
# "want_count": want_reaction_count
}
page_df = | pd.DataFrame(page_result) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
assert isnull(grouped['B'].first()['foo'])
assert isnull(grouped['B'].last()['foo'])
assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name, 0
assert expected.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = | DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 17:22:51 2019
Work flow: to obtain the TD products for use with ZWD (after download):
1)use fill_fix_all_10mins_IMS_stations() after copying the downloaded TD
2)use IMS_interpolating_to_GNSS_stations_israel(dt=None, start_year=2019(latest))
3)use resample_GNSS_TD(path=ims_path) to resample all TD
@author: ziskin
"""
from PW_paths import work_yuval
from pathlib import Path
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
ims_10mins_path = ims_path / '10mins'
awd_path = work_yuval/'AW3D30'
axis_path = work_yuval/'axis'
cwd = Path().cwd()
# fill missing data:
#some_missing = ds.tmin.sel(time=ds['time.day'] > 15).reindex_like(ds)
#
#In [20]: filled = some_missing.groupby('time.month').fillna(climatology.tmin)
#
#In [21]: both = xr.Dataset({'some_missing': some_missing, 'filled': filled})
# kabr, nzrt, katz, elro, klhv, yrcm, slom have ims stations not close to them!
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124', 'nizn': 'EZUZ'}
ims_units_dict = {
'BP': 'hPa',
'NIP': 'W/m^2',
'Rain': 'mm',
'TD': 'deg_C',
'WD': 'deg',
'WS': 'm/s',
'U': 'm/s',
'V': 'm/s',
'G': ''}
def save_daily_IMS_params_at_GNSS_loc(ims_path=ims_path,
param_name='WS', stations=[x for x in gnss_ims_dict.keys()]):
import xarray as xr
from aux_gps import save_ncfile
param = xr.open_dataset(
ims_path / 'IMS_{}_israeli_10mins.nc'.format(param_name))
ims_stns = [gnss_ims_dict.get(x) for x in stations]
param = param[ims_stns]
param = param.resample(time='D', keep_attrs=True).mean(keep_attrs=True)
inv_dict = {v: k for k, v in gnss_ims_dict.items()}
for da in param:
param = param.rename({da: inv_dict.get(da)})
filename = 'GNSS_{}_daily.nc'.format(param_name)
save_ncfile(param, ims_path, filename)
return param
def produce_bet_dagan_long_term_pressure(path=ims_path, rate='1H',
savepath=None, fill_from_jerusalem=True):
import xarray as xr
from aux_gps import xr_reindex_with_date_range
from aux_gps import get_unique_index
from aux_gps import save_ncfile
from aux_gps import anomalize_xr
# load manual old measurements and new 3 hr ones:
bd_man = xr.open_dataset(
path / 'IMS_hourly_03hr.nc')['BET-DAGAN-MAN_2520_ps']
bd_auto = xr.open_dataset(path / 'IMS_hourly_03hr.nc')['BET-DAGAN_2523_ps']
bd = xr.concat(
[bd_man.dropna('time'), bd_auto.dropna('time')], 'time', join='inner')
bd = get_unique_index(bd)
bd = bd.sortby('time')
bd = xr_reindex_with_date_range(bd, freq='1H')
# remove dayofyear mean, interpolate and reconstruct signal to fill it with climatology:
climatology = bd.groupby('time.dayofyear').mean(keep_attrs=True)
bd_anoms = anomalize_xr(bd, freq='DOY')
bd_inter = bd_anoms.interpolate_na(
'time', method='cubic', max_gap='24H', keep_attrs=True)
# bd_inter = bd.interpolate_na('time', max_gap='3H', method='cubic')
bd_inter = bd_inter.groupby('time.dayofyear') + climatology
bd_inter = bd_inter.reset_coords(drop=True)
# load 10-mins new measurements:
bd_10 = xr.open_dataset(path / 'IMS_BP_israeli_hourly.nc')['BET-DAGAN']
bd_10 = bd_10.dropna('time').sel(
time=slice(
'2019-06-30T00:00:00',
None)).resample(
time='1H').mean()
bd_inter = xr.concat([bd_inter, bd_10], 'time', join='inner')
bd_inter = get_unique_index(bd_inter)
bd_inter = bd_inter.sortby('time')
bd_inter.name = 'bet-dagan'
bd_inter.attrs['action'] = 'interpolated from 3H'
if fill_from_jerusalem:
print('filling missing gaps from 2018 with jerusalem')
jr_10 = xr.load_dataset(
path / 'IMS_BP_israeli_hourly.nc')['JERUSALEM-CENTRE']
climatology = bd_inter.groupby('time.dayofyear').mean(keep_attrs=True)
jr_10_anoms = anomalize_xr(jr_10, 'DOY')
bd_anoms = anomalize_xr(bd_inter, 'DOY')
bd_anoms = xr.concat(
[bd_anoms.dropna('time'), jr_10_anoms.dropna('time')], 'time', join='inner')
bd_anoms = get_unique_index(bd_anoms)
bd_anoms = bd_anoms.sortby('time')
bd_anoms = xr_reindex_with_date_range(bd_anoms, freq='5T')
bd_anoms = bd_anoms.interpolate_na(
'time', method='cubic', max_gap='2H')
bd_anoms.name = 'bet-dagan'
bd_anoms.attrs['action'] = 'interpolated from 3H'
bd_anoms.attrs['filled'] = 'using Jerusalem-centre'
bd_anoms.attrs['long_name'] = 'Pressure Anomalies'
bd_anoms.attrs['units'] = 'hPa'
bd_inter = bd_anoms.groupby('time.dayofyear') + climatology
bd_inter = bd_inter.resample(
time='1H', keep_attrs=True).mean(keep_attrs=True)
# if savepath is not None:
# yr_min = bd_anoms.time.min().dt.year.item()
# yr_max = bd_anoms.time.max().dt.year.item()
# filename = 'IMS_BD_anoms_5min_ps_{}-{}.nc'.format(
# yr_min, yr_max)
# save_ncfile(bd_anoms, savepath, filename)
# return bd_anoms
if savepath is not None:
# filename = 'IMS_BD_hourly_ps.nc'
yr_min = bd_inter.time.min().dt.year.item()
yr_max = bd_inter.time.max().dt.year.item()
filename = 'IMS_BD_hourly_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_inter, savepath, filename)
bd_anoms = anomalize_xr(bd_inter, 'DOY', units='std')
filename = 'IMS_BD_hourly_anoms_std_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_anoms, savepath, filename)
bd_anoms = anomalize_xr(bd_inter, 'DOY')
filename = 'IMS_BD_hourly_anoms_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_anoms, savepath, filename)
return bd_inter
def transform_wind_speed_direction_to_u_v(path=ims_path, savepath=ims_path):
import xarray as xr
import numpy as np
WS = xr.load_dataset(path / 'IMS_WS_israeli_10mins.nc')
WD = xr.load_dataset(path / 'IMS_WD_israeli_10mins.nc')
# change angles to math:
WD = 270 - WD
U = WS * np.cos(np.deg2rad(WD))
V = WS * np.sin(np.deg2rad(WD))
print('updating attrs...')
for station in WS:
attrs = WS[station].attrs
attrs.update(channel_name='U')
attrs.update(units='m/s')
attrs.update(field_name='zonal velocity')
U[station].attrs = attrs
attrs.update(channel_name='V')
attrs.update(field_name='meridional velocity')
V[station].attrs = attrs
if savepath is not None:
filename = 'IMS_U_israeli_10mins.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in U.data_vars}
U.to_netcdf(savepath / filename, 'w', encoding=encoding)
filename = 'IMS_V_israeli_10mins.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in V.data_vars}
V.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return
def perform_harmonic_analysis_all_IMS(path=ims_path, var='BP', n=4,
savepath=ims_path):
import xarray as xr
from aux_gps import harmonic_analysis_xr
from aux_gps import keep_iqr
ims = xr.load_dataset(path / 'IMS_{}_israeli_10mins.nc'.format(var))
sites = [x for x in gnss_ims_dict.values()]
ims_actual_sites = [x for x in ims if x in sites]
ims = ims[ims_actual_sites]
if var == 'NIP':
ims = xr.merge([keep_iqr(ims[x]) for x in ims])
max_nip = ims.to_array('site').max()
ims /= max_nip
dss_list = []
for site in ims:
da = ims[site]
da = keep_iqr(da)
print('performing harmonic analysis for IMS {} field at {} site:'.format(var, site))
dss = harmonic_analysis_xr(da, n=n, anomalize=True, normalize=False)
dss_list.append(dss)
dss_all = xr.merge(dss_list)
dss_all.attrs['field'] = var
dss_all.attrs['units'] = ims_units_dict[var]
if savepath is not None:
filename = 'IMS_{}_harmonics_diurnal.nc'.format(var)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in dss_all.data_vars}
dss_all.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return dss_all
def align_10mins_ims_to_gnss_and_save(ims_path=ims_path, field='G7',
gnss_ims_dict=gnss_ims_dict,
savepath=work_yuval):
import xarray as xr
d = dict(zip(gnss_ims_dict.values(), gnss_ims_dict.keys()))
gnss_list = []
for station, gnss_site in d.items():
print('loading IMS station {}'.format(station))
ims_field = xr.load_dataset(
ims_path / 'IMS_{}_israeli_10mins.nc'.format(field))[station]
gnss = ims_field.load()
gnss.name = gnss_site
gnss.attrs['IMS_station'] = station
gnss_list.append(gnss)
gnss_sites = xr.merge(gnss_list)
if savepath is not None:
filename = 'GNSS_IMS_{}_israeli_10mins.nc'.format(field)
print('saving {} to {}'.format(filename, savepath))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in gnss_sites.data_vars}
gnss_sites.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return gnss_sites
def produce_10mins_gustiness(path=ims_path, rolling=5):
import xarray as xr
from aux_gps import keep_iqr
from aux_gps import xr_reindex_with_date_range
ws = xr.load_dataset(path / 'IMS_WS_israeli_10mins.nc')
stations = [x for x in ws.data_vars]
g_list = []
for station in stations:
print('proccesing station {}'.format(station))
attrs = ws[station].attrs
g = ws[station].rolling(time=rolling, center=True).std(
) / ws[station].rolling(time=rolling, center=True).mean()
g = keep_iqr(g)
g = xr_reindex_with_date_range(g, freq='10min')
g.name = station
g.attrs = attrs
g_list.append(g)
G = xr.merge(g_list)
filename = 'IMS_G{}_israeli_10mins.nc'.format(rolling)
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in G.data_vars}
G.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done resampling!')
return G
def produce_10mins_absolute_humidity(path=ims_path):
from sounding_procedures import wrap_xr_metpy_mixing_ratio
from aux_gps import dim_intersection
import xarray as xr
P = xr.load_dataset(path / 'IMS_BP_israeli_10mins.nc')
stations = [x for x in P.data_vars]
T = xr.open_dataset(path / 'IMS_TD_israeli_10mins.nc')
T = T[stations].load()
RH = xr.open_dataset(path / 'IMS_RH_israeli_10mins.nc')
RH = RH[stations].load()
mr_list = []
for station in stations:
print('proccesing station {}'.format(station))
p = P[station]
t = T[station]
rh = RH[station]
new_time = dim_intersection([p, t, rh])
p = p.sel(time=new_time)
rh = rh.sel(time=new_time)
t = t.sel(time=new_time)
mr = wrap_xr_metpy_mixing_ratio(p, t, rh, verbose=True)
mr_list.append(mr)
MR = xr.merge(mr_list)
filename = 'IMS_MR_israeli_10mins.nc'
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in MR.data_vars}
MR.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done resampling!')
return MR
def produce_wind_frequency_gustiness(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', plot=True):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from aux_gps import keep_iqr
ws = xr.open_dataset(path / 'IMS_WS_israeli_10mins.nc')[station]
ws.load()
ws = ws.sel(time=ws['time.season'] == season)
gustiness = ws.rolling(time=5).std() / ws.rolling(time=5).mean()
gustiness = keep_iqr(gustiness)
gustiness_anoms = gustiness.groupby(
'time.month') - gustiness.groupby('time.month').mean('time')
gustiness_anoms = gustiness_anoms.reset_coords(drop=True)
G = gustiness_anoms.groupby('time.hour').mean('time')
wd = xr.open_dataset(path / 'IMS_WD_israeli_10mins.nc')[station]
wd.load()
wd.name = 'WD'
wd = wd.sel(time=wd['time.season'] == season)
all_Q = wd.groupby('time.hour').count()
Q1 = wd.where((wd >= 0) & (wd < 90)).dropna('time')
Q2 = wd.where((wd >= 90) & (wd < 180)).dropna('time')
Q3 = wd.where((wd >= 180.1) & (wd < 270)).dropna('time')
Q4 = wd.where((wd >= 270) & (wd < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_freq = 100.0 * (Q.groupby('time.hour').count() / all_Q)
if plot:
fig, ax = plt.subplots(figsize=(16, 8))
for q in Q_freq['Q']:
Q_freq.sel(Q=q).plot(ax=ax)
ax.set_title(
'Relative wind direction frequency in {} IMS station in {} season'.format(
station, season))
ax.set_ylabel('Relative frequency [%]')
ax.set_xlabel('Time of day [UTC]')
ax.set_xticks(np.arange(0, 24, step=1))
ax.legend([r'0$\degree$-90$\degree$', r'90$\degree$-180$\degree$',
r'180$\degree$-270$\degree$', r'270$\degree$-360$\degree$'], loc='upper left')
ax.grid()
ax2 = ax.twinx()
G.plot.line(ax=ax2, color='k', marker='o')
ax2.axhline(0, color='k', linestyle='--')
ax2.legend(['{} Gustiness anomalies'.format(station)],
loc='upper right')
ax2.set_ylabel('Gustiness anomalies')
return
def produce_gustiness(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', pw_station='tela', temp=False,
ax=None):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from aux_gps import keep_iqr
from aux_gps import groupby_date_xr
from matplotlib.ticker import FixedLocator
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1-y2)/2, v2)
adjust_yaxis(ax1, (y2-y1)/2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny*(maxy+dy)/(miny+dy)
else:
nmaxy = maxy
nminy = maxy*(miny+dy)/(maxy+dy)
ax.set_ylim(nminy+v, nmaxy+v)
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
print('loading {} IMS station...'.format(station))
g = xr.open_dataset(path / 'IMS_G_israeli_10mins.nc')[station]
g.load()
g = g.sel(time=g['time.season'] == season)
date = groupby_date_xr(g)
# g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
g_anoms = g.groupby(date) - g.groupby(date).mean('time')
g_anoms = g_anoms.reset_coords(drop=True)
G = g_anoms.groupby('time.hour').mean('time')
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
G.plot(ax=ax, color='b', marker='o')
ax.set_title(
'Gustiness {} IMS station in {} season'.format(
station, season))
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.grid()
if pw_station is not None:
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_50_homogenized.nc')[pw_station]
pw.load().dropna('time')
pw = pw.sel(time=pw['time.season'] == season)
date = groupby_date_xr(pw)
pw = pw.groupby(date) - pw.groupby(date).mean('time')
pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
pw.plot.line(ax=axpw, color='k', marker='o')
axpw.axhline(0, color='k', linestyle='--')
axpw.legend(['{} PW anomalies'.format(
pw_station.upper())], loc='upper right')
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
if temp:
axt = ax.twinx()
axt.spines["right"].set_position(("axes", 1.05))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(axt)
# Second, show the right spine.
axt.spines["right"].set_visible(True)
p3, = T.plot.line(ax=axt, marker='s', color='m',
label="Temperature")
axt.yaxis.label.set_color(p3.get_color())
axt.tick_params(axis='y', colors=p3.get_color())
axt.set_ylabel('Temperature anomalies [$C\degree$]')
return G
def produce_relative_frequency_wind_direction(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', with_weights=False,
pw_station='tela', temp=False,
plot=True):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
wd = xr.open_dataset(path / 'IMS_WD_israeli_10mins.nc')[station]
wd.load()
wd.name = 'WD'
wd = wd.sel(time=wd['time.season'] == season)
all_Q = wd.groupby('time.hour').count()
Q1 = wd.where((wd >= 0) & (wd < 90)).dropna('time')
Q2 = wd.where((wd >= 90) & (wd < 180)).dropna('time')
Q3 = wd.where((wd >= 180.1) & (wd < 270)).dropna('time')
Q4 = wd.where((wd >= 270) & (wd < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_freq = 100.0 * (Q.groupby('time.hour').count() / all_Q)
T = xr.open_dataset(path / 'IMS_TD_israeli_10mins.nc')[station]
T.load()
T = T.groupby('time.month') - T.groupby('time.month').mean('time')
T = T.reset_coords(drop=True)
T = T.sel(time=T['time.season'] == season)
T = T.groupby('time.hour').mean('time')
if with_weights:
ws = xr.open_dataset(path / 'IMS_WS_israeli_10mins.nc')[station]
ws.load()
ws = ws.sel(time=ws['time.season'] == season)
ws.name = 'WS'
wind = xr.merge([ws, wd])
wind = wind.dropna('time')
all_Q = wind['WD'].groupby('time.hour').count()
Q1 = wind['WS'].where(
(wind['WD'] >= 0) & (wind['WD'] < 90)).dropna('time')
Q2 = wind['WS'].where(
(wind['WD'] >= 90) & (wind['WD'] < 180)).dropna('time')
Q3 = wind['WS'].where(
(wind['WD'] >= 180) & (wind['WD'] < 270)).dropna('time')
Q4 = wind['WS'].where(
(wind['WD'] >= 270) & (wind['WD'] < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_ratio = (Q.groupby('time.hour').count() / all_Q)
Q_mean = Q.groupby('time.hour').mean() / Q.groupby('time.hour').max()
Q_freq = 100 * ((Q_mean * Q_ratio) / (Q_mean * Q_ratio).sum('Q'))
if plot:
fig, ax = plt.subplots(figsize=(16, 8))
for q in Q_freq['Q']:
Q_freq.sel(Q=q).plot(ax=ax)
ax.set_title(
'Relative wind direction frequency in {} IMS station in {} season'.format(
station, season))
ax.set_ylabel('Relative frequency [%]')
ax.set_xlabel('Time of day [UTC]')
ax.legend([r'0$\degree$-90$\degree$', r'90$\degree$-180$\degree$',
r'180$\degree$-270$\degree$', r'270$\degree$-360$\degree$'], loc='upper left')
ax.set_xticks(np.arange(0, 24, step=1))
ax.grid()
if pw_station is not None:
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_50_homogenized.nc')[pw_station]
pw.load().dropna('time')
pw = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw = pw.reset_coords(drop=True)
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
pw.plot.line(ax=axpw, color='k', marker='o')
axpw.axhline(0, color='k', linestyle='--')
axpw.legend(['{} PW anomalies'.format(
pw_station.upper())], loc='upper right')
axpw.set_ylabel('PW anomalies [mm]')
if temp:
axt = ax.twinx()
axt.spines["right"].set_position(("axes", 1.05))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(axt)
# Second, show the right spine.
axt.spines["right"].set_visible(True)
p3, = T.plot.line(ax=axt, marker='s',
color='m', label="Temperature")
axt.yaxis.label.set_color(p3.get_color())
axt.tick_params(axis='y', colors=p3.get_color())
axt.set_ylabel('Temperature anomalies [$C\degree$]')
return Q_freq
def plot_closest_line_from_point_to_israeli_coast(point, ax=None, epsg=None,
path=gis_path, color='k',
ls='-', lw=1.0):
import matplotlib.pyplot as plt
from shapely.geometry import LineString
from pyproj import Geod
"""returns the distance in kms"""
coast_gdf = get_israeli_coast_line(path=path, epsg=epsg)
coast_pts = coast_gdf.geometry.unary_union
point_in_coast = get_closest_point_from_a_line_to_a_point(point, coast_pts)
AB = LineString([point_in_coast, point])
if ax is None:
fig, ax = plt.subplots()
ax.plot(*AB.xy, color='k', linestyle=ls, linewidth=lw)
geod = Geod(ellps="WGS84")
distance = geod.geometry_length(AB) / 1000.0
return distance
def get_closest_point_from_a_line_to_a_point(point, line):
from shapely.ops import nearest_points
p1, p2 = nearest_points(point, line)
return p2
def get_israeli_coast_line(path=gis_path, minx=34.0, miny=30.0, maxx=36.0,
maxy=34.0, epsg=None):
"""use epsg=2039 to return in meters"""
from shapely.geometry import box
import geopandas as gpd
# create bounding box using shapely:
bbox = box(minx, miny, maxx, maxy)
# read world coast lines:
coast = gpd.read_file(gis_path / 'ne_10m_coastline.shp')
# clip:
gdf = gpd.clip(coast, bbox)
if epsg is not None:
gdf = gdf.to_crs('epsg:{}'.format(epsg))
return gdf
def clip_raster(fp=awd_path/'Israel_Area.tif',
out_tif=awd_path/'israel_dem.tif',
minx=34.0, miny=29.0, maxx=36.5, maxy=34.0):
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that
rasterio wants them"""
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
import rasterio
from rasterio.plot import show
from rasterio.plot import show_hist
from rasterio.mask import mask
from shapely.geometry import box
import geopandas as gpd
from fiona.crs import from_epsg
import pycrs
print('reading {}'.format(fp))
data = rasterio.open(fp)
# create bounding box using shapely:
bbox = box(minx, miny, maxx, maxy)
# insert the bbox into a geodataframe:
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=from_epsg(4326))
# re-project with the same projection as the data:
geo = geo.to_crs(crs=data.crs.data)
# get the geometry coords:
coords = getFeatures(geo)
# clipping is done with mask:
out_img, out_transform = mask(dataset=data, shapes=coords, crop=True)
# copy meta data:
out_meta = data.meta.copy()
# parse the epsg code:
epsg_code = int(data.crs.data['init'][5:])
# update the meta data:
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": pycrs.parse.from_epsg_code(epsg_code).to_proj4()})
# save to disk:
print('saving {} to disk.'.format(out_tif))
with rasterio.open(out_tif, "w", **out_meta) as dest:
dest.write(out_img)
print('Done!')
return
def create_israel_area_dem(path):
"""merge the raw DSM tif files from AW3D30 model of Israel area togather"""
from aux_gps import path_glob
import rasterio
from rasterio.merge import merge
src_files_to_mosaic = []
files = path_glob(path, '*DSM*.tif')
for fp in files:
src = rasterio.open(fp)
src_files_to_mosaic.append(src)
mosaic, out_trans = merge(src_files_to_mosaic)
out_meta = src.meta.copy()
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans,
"crs": src.crs
}
)
with rasterio.open(path/'Israel_Area.tif', "w", **out_meta) as dest:
dest.write(mosaic)
return
def parse_cv_results(grid_search_cv):
from aux_gps import process_gridsearch_results
"""parse cv_results from GridsearchCV object"""
# only supports neg-abs-mean-error with leaveoneout
from sklearn.model_selection import LeaveOneOut
if (isinstance(grid_search_cv.cv, LeaveOneOut)
and grid_search_cv.scoring == 'neg_mean_absolute_error'):
cds = process_gridsearch_results(grid_search_cv)
cds = - cds
return cds
def IMS_interpolating_to_GNSS_stations_israel(dt='2013-10-19T22:00:00',
stations=None,
lapse_rate='auto',
method='okrig',
variogram='spherical',
n_neighbors=3,
start_year='1996',
cut_days_ago=3,
plot=False,
verbose=False,
savepath=ims_path,
network='soi-apn',
axis_path=axis_path,
ds_td=None):
"""interpolate the IMS 10 mins field(e.g., TD) to the location
of the GNSS sites in ISRAEL(use dt=None for this). other dt is treated
as datetime str and will give the "snapshot" for the field for just this
datetime"""
from pykrige.rk import Krige
import pandas as pd
from aux_gps import path_glob
import xarray as xr
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import geopandas as gpd
from sklearn.neighbors import KNeighborsRegressor
from axis_process import read_axis_stations
# import time
def pick_model(method, variogram, n_neighbors):
if method == 'okrig':
if variogram is not None:
model = Krige(method='ordinary', variogram_model=variogram,
verbose=verbose)
else:
model = Krige(method='ordinary', variogram_model='linear',
verbose=verbose)
elif method == 'knn':
if n_neighbors is None:
model = KNeighborsRegressor(n_neighbors=5, weights='distance')
else:
model = KNeighborsRegressor(
n_neighbors=n_neighbors, weights='distance')
else:
raise Exception('{} is not supported yet...'.format(method))
return model
def prepare_Xy(ts_lr_neutral, T_lats, T_lons):
import numpy as np
df = ts_lr_neutral.to_frame()
df['lat'] = T_lats
df['lon'] = T_lons
# df = df.dropna(axis=0)
c = np.linspace(
df['lat'].min(),
df['lat'].max(),
df['lat'].shape[0])
r = np.linspace(
df['lon'].min(),
df['lon'].max(),
df['lon'].shape[0])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(ts_lr_neutral)
X = np.column_stack([rr[vals, vals], cc[vals, vals]])
# rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = ts_lr_neutral[vals]
return X, y
def neutrilize_t(ts_vs_alt, lapse_rate):
ts_lr_neutral = (ts_vs_alt +
lapse_rate *
ts_vs_alt.index /
1000.0)
return ts_lr_neutral
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
# try:
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
# except TypeError as e:
# print('{}, dt: {}'.format(e, dt))
# print(ts_vs_alt)
# return
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# import time
dt = | pd.to_datetime(dt) | pandas.to_datetime |
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import copy
from utils import get_dataloader
from defense import RFA, Krum, WeightDiffClippingDefense, AddNoise
from models.vgg import get_vgg_model, logger
import pandas as pd
from torch.nn.utils import parameters_to_vector, vector_to_parameters
import datasets
'''
Neural Network Architecture
'''
class Net(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
output = self.fc2(x)
#output = F.log_softmax(x, dim=1)
return output
'''
Function to get the file name
'''
def get_results_filename(poison_type, attack_method, model_replacement, project_frequency, defense_method, norm_bound, prox_attack, attacker_pool_size, fixed_pool=False, model_arch="vgg9"):
filename = "{}_{}_{}".format(poison_type, model_arch, attack_method)
if fixed_pool:
filename += "_fixed_pool"
if model_replacement:
filename += "_with_replacement"
else:
filename += "_without_replacement"
if attack_method == "pgd":
filename += "_1_{}".format(project_frequency)
if prox_attack:
filename += "_prox_attack"
if defense_method in ("norm-clipping", "norm-clipping-adaptive", "weak-dp"):
filename += "_{}_m_{}".format(defense_method, norm_bound)
elif defense_method in ("krum", "multi-krum", "rfa"):
filename += "_{}".format(defense_method)
filename += 'attack_pool_size_' + str(attacker_pool_size)
filename += "_acc_results.csv"
return filename
'''
Calculates L2 norm between gs_model and vanilla_model.
'''
def calc_norm_diff(gs_model, vanilla_model):
norm_diff = 0
for p_index, _ in enumerate(gs_model.parameters()):
norm_diff += torch.norm(list(gs_model.parameters())[p_index] - list(vanilla_model.parameters())[p_index]) ** 2
norm_diff = torch.sqrt(norm_diff).item()
return norm_diff
'''
XXX - aggregation of updates received from all the clients.
XXX - What other federated aggregation algorithms can be implemented?
'''
def fed_avg_aggregator(net_list, net_freq, device, model="lenet"):
#net_avg = VGG('VGG11').to(device)
if model == "lenet":
net_avg = Net(num_classes=10).to(device)
elif model in ("vgg9", "vgg11", "vgg13", "vgg16"):
net_avg = get_vgg_model(model).to(device)
whole_aggregator = []
for p_index, p in enumerate(net_list[0].parameters()):
# initial
params_aggregator = torch.zeros(p.size()).to(device)
for net_index, net in enumerate(net_list):
# we assume the adv model always comes to the beginning
params_aggregator = params_aggregator + net_freq[net_index] * list(net.parameters())[p_index].data
whole_aggregator.append(params_aggregator)
for param_index, p in enumerate(net_avg.parameters()):
p.data = whole_aggregator[param_index]
return net_avg
'''
1. Does some training: but is it benign or adversarial (most likely adversarial) (XXX)?
'''
def estimate_wg(model, device, train_loader, optimizer, epoch, log_interval, criterion):
logger.info("Prox-attack: Estimating wg_hat")
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
'''
train function for both honest nodes and adversary.
NOTE: this trains only for one epoch
NOTE: this function is not stateless as it changes the value of variable model.
'''
def train(model, device, train_loader, optimizer, epoch, log_interval, criterion, pgd_attack=False, eps=5e-4, model_original=None,
proj="l_2", project_frequency=1, adv_optimizer=None, prox_attack=False, wg_hat=None):
"""
train function for both honest nodes and adversary.
NOTE: this trains only for one epoch
"""
model.train()
# get learning rate
for param_group in optimizer.param_groups:
eta = param_group['lr']
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
if pgd_attack:
adv_optimizer.zero_grad()
output = model(data)
#loss = F.nll_loss(output, target)
loss = criterion(output, target)
if prox_attack:
wg_hat_vec = parameters_to_vector(list(wg_hat.parameters()))
model_vec = parameters_to_vector(list(model.parameters()))
prox_term = torch.norm(wg_hat_vec - model_vec)**2
loss = loss + prox_term
loss.backward()
if not pgd_attack:
optimizer.step()
else:
if proj == "l_inf":
w = list(model.parameters())
n_layers = len(w)
# adversarial learning rate
eta = 0.001
for i in range(len(w)):
# uncomment below line to restrict proj to some layers
if True:#i == 6 or i == 8 or i == 10 or i == 0 or i == 18:
w[i].data = w[i].data - eta * w[i].grad.data
# projection step
m1 = torch.lt(torch.sub(w[i], model_original[i]), -eps)
m2 = torch.gt(torch.sub(w[i], model_original[i]), eps)
w1 = (model_original[i] - eps) * m1
w2 = (model_original[i] + eps) * m2
w3 = (w[i]) * (~(m1+m2))
wf = w1+w2+w3
w[i].data = wf.data
else:
# do l2_projection
adv_optimizer.step()
w = list(model.parameters())
w_vec = parameters_to_vector(w)
model_original_vec = parameters_to_vector(model_original)
# make sure you project on last iteration otherwise, high LR pushes you really far
if (batch_idx%project_frequency == 0 or batch_idx == len(train_loader)-1) and (torch.norm(w_vec - model_original_vec) > eps):
# project back into norm ball
w_proj_vec = eps*(w_vec - model_original_vec)/torch.norm(
w_vec-model_original_vec) + model_original_vec
# plug w_proj back into model
vector_to_parameters(w_proj_vec, w)
# for i in range(n_layers):
# # uncomment below line to restrict proj to some layers
# if True:#i == 16 or i == 17:
# w[i].data = w[i].data - eta * w[i].grad.data
# if torch.norm(w[i] - model_original[i]) > eps/n_layers:
# # project back to norm ball
# w_proj= (eps/n_layers)*(w[i]-model_original[i])/torch.norm(
# w[i]-model_original[i]) + model_original[i]
# w[i].data = w_proj
if batch_idx % log_interval == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
'''
Tests the accuracy of the model on test dataset and tast dataset.
'''
def test(model, device, test_loader, test_batch_size, criterion, mode="raw-task", dataset="cifar10", poison_type="fashion"):
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
if dataset in ("mnist", "emnist"):
target_class = 7
if mode == "raw-task":
classes = [str(i) for i in range(10)]
elif mode == "targetted-task":
if poison_type == 'ardis':
classes = [str(i) for i in range(10)]
else:
classes = ["T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot"]
elif dataset == "cifar10":
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# target_class = 2 for greencar, 9 for southwest
if poison_type in ("howto", "greencar-neo"):
target_class = 2
else:
target_class = 9
model.eval()
test_loss = 0
correct = 0
backdoor_correct = 0
backdoor_tot = 0
final_acc = 0
task_acc = None
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output, 1)
c = (predicted == target).squeeze()
#test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
test_loss += criterion(output, target).item()
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
# check backdoor accuracy
if poison_type == 'ardis':
backdoor_index = torch.where(target == target_class)
target_backdoor = torch.ones_like(target[backdoor_index])
predicted_backdoor = predicted[backdoor_index]
backdoor_correct += (predicted_backdoor == target_backdoor).sum().item()
backdoor_tot = backdoor_index[0].shape[0]
# logger.info("Target: {}".format(target_backdoor))
# logger.info("Predicted: {}".format(predicted_backdoor))
#for image_index in range(test_batch_size):
for image_index in range(len(target)):
label = target[image_index]
class_correct[label] += c[image_index].item()
class_total[label] += 1
test_loss /= len(test_loader.dataset)
if mode == "raw-task":
for i in range(10):
logger.info('Accuracy of %5s : %.2f %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
if i == target_class:
task_acc = 100 * class_correct[i] / class_total[i]
logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
final_acc = 100. * correct / len(test_loader.dataset)
elif mode == "targetted-task":
if dataset in ("mnist", "emnist"):
for i in range(10):
logger.info('Accuracy of %5s : %.2f %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
if poison_type == 'ardis':
# ensure 7 is being classified as 1
logger.info('Backdoor Accuracy of %.2f : %.2f %%' % (
target_class, 100 * backdoor_correct / backdoor_tot))
final_acc = 100 * backdoor_correct / backdoor_tot
else:
# trouser acc
final_acc = 100 * class_correct[1] / class_total[1]
elif dataset == "cifar10":
logger.info('#### Targetted Accuracy of %5s : %.2f %%' % (classes[target_class], 100 * class_correct[target_class] / class_total[target_class]))
final_acc = 100 * class_correct[target_class] / class_total[target_class]
return final_acc, task_acc
'''
Base Class for two different FL trainer classes
'''
class FederatedLearningTrainer:
def __init__(self, *args, **kwargs):
self.hyper_params = None
def run(self, client_model, *args, **kwargs):
raise NotImplementedError()
'''
Main Class for FL: Frequency Federated Learning
'''
class FrequencyFederatedLearningTrainer(FederatedLearningTrainer):
def __init__(self, arguments=None, *args, **kwargs):
#self.poisoned_emnist_dataset = arguments['poisoned_emnist_dataset']
self.vanilla_model = arguments['vanilla_model']
self.net_avg = arguments['net_avg']
self.net_dataidx_map = arguments['net_dataidx_map']
self.num_nets = arguments['num_nets']
self.part_nets_per_round = arguments['part_nets_per_round']
self.fl_round = arguments['fl_round']
self.local_training_period = arguments['local_training_period']
self.adversarial_local_training_period = arguments['adversarial_local_training_period']
self.args_lr = arguments['args_lr']
self.args_gamma = arguments['args_gamma']
self.attacking_fl_rounds = arguments['attacking_fl_rounds']
self.poisoned_emnist_train_loader = arguments['poisoned_emnist_train_loader']
self.clean_train_loader = arguments['clean_train_loader']
self.vanilla_emnist_test_loader = arguments['vanilla_emnist_test_loader']
self.targetted_task_test_loader = arguments['targetted_task_test_loader']
self.batch_size = arguments['batch_size']
self.test_batch_size = arguments['test_batch_size']
self.log_interval = arguments['log_interval']
self.device = arguments['device']
self.num_dps_poisoned_dataset = arguments['num_dps_poisoned_dataset']
self.defense_technique = arguments["defense_technique"]
self.norm_bound = arguments["norm_bound"]
self.attack_method = arguments["attack_method"]
self.dataset = arguments["dataset"]
self.model = arguments["model"]
self.criterion = nn.CrossEntropyLoss()
self.eps = arguments['eps']
self.poison_type = arguments['poison_type']
self.model_replacement = arguments['model_replacement']
self.project_frequency = arguments['project_frequency']
self.adv_lr = arguments['adv_lr']
self.prox_attack = arguments['prox_attack']
self.attack_case = arguments['attack_case']
self.stddev = arguments['stddev']
self.attacker_pool_size = arguments['attacker_pool_size']
logger.info("Posion type! {}".format(self.poison_type))
if self.poison_type == 'ardis':
self.ardis_dataset = datasets.get_ardis_dataset()
# exclude first 66 points because they are part of the adversary
if self.attack_case == 'normal-case':
self.ardis_dataset.data = self.ardis_dataset.data[66:]
elif self.attack_case == 'almost-edge-case':
self.ardis_dataset.data = self.ardis_dataset.data[66:132]
elif self.poison_type == 'southwest':
self.ardis_dataset = datasets.get_southwest_dataset(attack_case=self.attack_case)
else:
self.ardis_dataset=None
if self.attack_method == "pgd":
self.pgd_attack = True
else:
self.pgd_attack = False
if arguments["defense_technique"] == "no-defense":
self._defender = None
elif arguments["defense_technique"] == "norm-clipping" or arguments["defense_technique"] == "norm-clipping-adaptive":
self._defender = WeightDiffClippingDefense(norm_bound=arguments['norm_bound'])
elif arguments["defense_technique"] == "weak-dp":
# doesn't really add noise. just clips
# XXX: check the algorithm for weak dp.
self._defender = WeakDPDefense(norm_bound=arguments['norm_bound'])
elif arguments["defense_technique"] == "krum":
self._defender = Krum(mode='krum', num_workers=self.part_nets_per_round, num_adv=1)
elif arguments["defense_technique"] == "multi-krum":
self._defender = Krum(mode='multi-krum', num_workers=self.part_nets_per_round, num_adv=1)
elif arguments["defense_technique"] == "rfa":
self._defender = RFA()
else:
NotImplementedError("Unsupported defense method !")
'''
steps:
server: picks a defence method e.g. norm clipping with value from a distribution.
clients:
if client is honest:
does normal training
else:
does adversarial training
server:
collect weight updates from all the nodes.
apply defense technique.
'''
def run_modified(self):
# init the variables
main_task_acc = []
raw_task_acc = []
backdoor_task_acc = []
fl_iter_list = []
adv_norm_diff_list = []
wg_norm_list = []
model_to_begin = None
# iterate over all rounds
for flr in range(1, self.fl_round+1):
# 1. where is the model sent by the server?
# 2. sample the clients
g_user_indices = []
selected_node_indices = np.random.choice(self.num_nets, size=self.part_nets_per_round-1, replace=False)
num_data_points = [len(self.net_dataidx_map[i]) for i in selected_node_indices] # No of data points at each client.
total_num_dps_per_round = sum(num_data_points) + self.num_dps_poisoned_dataset # XXX
logger.info("FL round: {}, total num data points: {}, num dps poisoned: {}".format(flr, num_data_points, self.num_dps_poisoned_dataset))
net_freq = [self.num_dps_poisoned_dataset/ total_num_dps_per_round] + [num_data_points[i]/total_num_dps_per_round for i in range(self.part_nets_per_round-1)]
logger.info("Net freq: {}, FL round: {} with adversary".format(net_freq, flr))
# we need to reconstruct the net list at the beginning
net_list = [copy.deepcopy(self.net_avg) for _ in range(self.part_nets_per_round)]
logger.info("################## Starting fl round: {}".format(flr))
model_original = list(self.net_avg.parameters())
wg_server_clone = copy.deepcopy(self.net_avg)
wg_hat = None
v0 = torch.nn.utils.parameters_to_vector(model_original)
wg_norm_list.append(torch.norm(v0).item())
# start the FL process
# step 1: do the training.
for net_idx, net in enumerate(net_list):
is_adversarial = net_idx == 0
if is_adversarial:
global_user_idx = -1 # we assign "-1" as the indices of the attacker in global user indices
logger.info("@@@@@@@@ Working on client: {}, which is Attacker".format(net_idx))
else:
global_user_idx = selected_node_indices[net_idx-1]
dataidxs = self.net_dataidx_map[global_user_idx]
if self.attack_case == "edge-case":
train_dl_local, _ = get_dataloader(self.dataset, './data', self.batch_size,
self.test_batch_size, dataidxs) # also get the data loader
else:
NotImplementedError("Unsupported attack case ...")
logger.info("@@@@@@@@ Working on client: {}, which is Global user: {}".format(net_idx, global_user_idx))
g_user_indices.append(global_user_idx)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=self.args_lr*self.args_gamma**(flr-1), momentum=0.9, weight_decay=1e-4) # epoch, net, train_loader, optimizer, criterion
adv_optimizer = optim.SGD(net.parameters(), lr=self.adv_lr*self.args_gamma**(flr-1), momentum=0.9, weight_decay=1e-4) # looks like adversary needs same lr to hide with others
prox_optimizer = optim.SGD(wg_server_clone.parameters(), lr=self.args_lr*self.args_gamma**(flr-1), momentum=0.9, weight_decay=1e-4)
for param_group in optimizer.param_groups:
logger.info("Effective lr in FL round: {} is {}".format(flr, param_group['lr']))
if is_adversarial:
for e in range(1, self.adversarial_local_training_period+1):
train(net,
self.device,
self.poisoned_emnist_train_loader,
optimizer,
e,
log_interval=self.log_interval,
criterion=self.criterion,
pgd_attack=self.pgd_attack,
eps=self.eps,
model_original=model_original,
project_frequency=self.project_frequency,
adv_optimizer=adv_optimizer,
prox_attack=self.prox_attack,
wg_hat=wg_hat)
# XXX: This return value of these test calls is not getting utilised.
# final_acc_1, task_acc_1 = test(net,
# self.device,
# self.vanilla_emnist_test_loader,
# test_batch_size=self.test_batch_size,
# criterion=self.criterion,
# mode="raw-task",
# dataset=self.dataset,
# poison_type=self.poison_type)
#
# final_acc_2, task_acc_2 = test(net,
# self.device,
# self.targetted_task_test_loader,
# test_batch_size=self.test_batch_size,
# criterion=self.criterion,
# mode="targetted-task",
# dataset=self.dataset,
# poison_type=self.poison_type)
# at here we can check the distance between w_bad and w_g i.e. `\|w_bad - w_g\|_2`
# we can print the norm diff out for debugging
adv_norm_diff = calc_norm_diff(gs_model=net, vanilla_model=self.net_avg)
adv_norm_diff_list.append(adv_norm_diff)
else:
for e in range(1, self.local_training_period+1):
train(net, self.device, train_dl_local, optimizer, e, log_interval=self.log_interval, criterion=self.criterion)
honest_norm_diff = calc_norm_diff(gs_model=net, vanilla_model=self.net_avg)
# step 2: aggregation at the server.
# step 2: server applies the defense.: norm-clipping does not reject any input. It needs to change for the new approach.
if self.defense_technique == "no-defense":
pass
elif self.defense_technique == "norm-clipping":
for net_idx, net in enumerate(net_list):
self._defender.exec(client_model=net, global_model=self.net_avg)
elif self.defense_technique == "weak-dp":
# this guy is just going to clip norm. No noise added here XXX: Questionable code.
for net_idx, net in enumerate(net_list):
self._defender.exec(client_model=net,
global_model=self.net_avg,)
elif self.defense_technique == "krum":
net_list, net_freq = self._defender.exec(client_models=net_list,
num_dps=[self.num_dps_poisoned_dataset]+num_data_points,
g_user_indices=g_user_indices,
device=self.device)
elif self.defense_technique == "multi-krum":
net_list, net_freq = self._defender.exec(client_models=net_list,
num_dps=[self.num_dps_poisoned_dataset]+num_data_points,
g_user_indices=g_user_indices,
device=self.device)
elif self.defense_technique == "rfa":
net_list, net_freq = self._defender.exec(client_models=net_list,
net_freq=net_freq,
maxiter=500,
eps=1e-5,
ftol=1e-7,
device=self.device)
else:
NotImplementedError("Unsupported defense method !")
# step 3: aggregate the contributions from each client node.
# after local training periods
self.net_avg = fed_avg_aggregator(net_list, net_freq, device=self.device, model=self.model)
if self.defense_technique == "weak-dp":
# add noise to self.net_avg
# XXX: I think noise is to be added in clients directly.
noise_adder = AddNoise(stddev=self.stddev)
noise_adder.exec(client_model=self.net_avg,
device=self.device)
v = torch.nn.utils.parameters_to_vector(self.net_avg.parameters())
logger.info("############ Averaged Model : Norm {}".format(torch.norm(v)))
logger.info("Measuring the accuracy of the averaged global model, FL round: {} ...".format(flr))
overall_acc, raw_acc = test(self.net_avg, self.device, self.vanilla_emnist_test_loader, test_batch_size=self.test_batch_size, criterion=self.criterion, mode="raw-task", dataset=self.dataset, poison_type=self.poison_type)
backdoor_acc, _ = test(self.net_avg, self.device, self.targetted_task_test_loader, test_batch_size=self.test_batch_size, criterion=self.criterion, mode="targetted-task", dataset=self.dataset, poison_type=self.poison_type)
fl_iter_list.append(flr)
main_task_acc.append(overall_acc)
raw_task_acc.append(raw_acc)
backdoor_task_acc.append(backdoor_acc)
df = pd.DataFrame({'fl_iter': fl_iter_list,
'main_task_acc': main_task_acc,
'backdoor_acc': backdoor_task_acc,
'raw_task_acc':raw_task_acc,
'adv_norm_diff': adv_norm_diff_list,
'wg_norm': wg_norm_list
})
if self.poison_type == 'ardis':
# add a row showing initial accuracies
df1 = pd.DataFrame({'fl_iter': [0], 'main_task_acc': [88], 'backdoor_acc': [11], 'raw_task_acc': [0], 'adv_norm_diff': [0], 'wg_norm': [0]})
df = | pd.concat([df1, df]) | pandas.concat |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from google.cloud import bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# %reload_ext google.cloud.bigquery
# +
#######################################
print('Setting everything up...')
#######################################
import warnings
warnings.filterwarnings('ignore')
import pandas_gbq
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import os
import sys
from datetime import datetime
from datetime import date
from datetime import time
from datetime import timedelta
import time
DATASET = ''
plt.style.use('ggplot')
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
from IPython.display import HTML as html_print
def cstr(s, color='black'):
return "<text style=color:{}>{}</text>".format(color, s)
print('done.')
# -
cwd = os.getcwd()
cwd = str(cwd)
print(cwd)
# +
dic = {
'src_hpo_id': [
"saou_uab_selma", "saou_uab_hunt", "saou_tul", "pitt_temple",
"saou_lsu", "trans_am_meyers", "trans_am_essentia", "saou_ummc",
"seec_miami", "seec_morehouse", "seec_emory", "uamc_banner", "pitt",
"nyc_cu", "ipmc_uic", "trans_am_spectrum", "tach_hfhs", "nec_bmc",
"cpmc_uci", "nec_phs", "nyc_cornell", "ipmc_nu", "nyc_hh",
"ipmc_uchicago", "aouw_mcri", "syhc", "cpmc_ceders", "seec_ufl",
"saou_uab", "trans_am_baylor", "cpmc_ucsd", "ecchc", "chci", "aouw_uwh",
"cpmc_usc", "hrhc", "ipmc_northshore", "chs", "cpmc_ucsf", "jhchc",
"aouw_mcw", "cpmc_ucd", "ipmc_rush", "va", "saou_umc"
],
'HPO': [
"UAB Selma", "UAB Huntsville", "Tulane University", "Temple University",
"Louisiana State University",
"Reliant Medical Group (Meyers Primary Care)",
"Essentia Health Superior Clinic", "University of Mississippi",
"SouthEast Enrollment Center Miami",
"SouthEast Enrollment Center Morehouse",
"SouthEast Enrollment Center Emory", "Banner Health",
"University of Pittsburgh", "Columbia University Medical Center",
"University of Illinois Chicago", "Spectrum Health",
"Henry Ford Health System", "Boston Medical Center", "UC Irvine",
"Partners HealthCare", "Weill Cornell Medical Center",
"Northwestern Memorial Hospital", "Harlem Hospital",
"University of Chicago", "Marshfield Clinic",
"San Ysidro Health Center", "Cedars-Sinai", "University of Florida",
"University of Alabama at Birmingham", "Baylor", "UC San Diego",
"Eau Claire Cooperative Health Center", "Community Health Center, Inc.",
"UW Health (University of Wisconsin Madison)",
"University of Southern California", "HRHCare",
"NorthShore University Health System", "Cherokee Health Systems",
"UC San Francisco", "Jackson-Hinds CHC", "Medical College of Wisconsin",
"UC Davis", "Rush University",
"United States Department of Veterans Affairs - Boston",
"University Medical Center (UA Tuscaloosa)"
]
}
site_df = pd.DataFrame(data=dic)
site_df
# +
######################################
print('Getting the data from the database...')
######################################
site_map = pd.io.gbq.read_gbq('''
select distinct * from (
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_visit_occurrence`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_care_site`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_condition_occurrence`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_device_exposure`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_drug_exposure`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_location`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_measurement`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_note`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_observation`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_person`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_procedure_occurrence`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_provider`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_specimen`
UNION ALL
SELECT
DISTINCT(src_hpo_id) as src_hpo_id
FROM
`{}._mapping_visit_occurrence`
)
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(site_map.shape[0], 'records received.')
# -
site_df = pd.merge(site_map, site_df, how='outer', on='src_hpo_id')
site_df
Lipid = (40782589, 40795800, 40772572)
CBC = (40789356, 40789120, 40789179, 40772748, 40782735, 40789182, 40786033,
40779159)
CBCwDiff = (40785788, 40785796, 40779195, 40795733, 40795725, 40772531,
40779190, 40785793, 40779191, 40782561, 40789266)
CMP = (3049187, 3053283, 40775801, 40779224, 40782562, 40782579, 40785850,
40785861, 40785869, 40789180, 40789190, 40789527, 40791227, 40792413,
40792440, 40795730, 40795740, 40795754)
Physical_Measurement = (40654163, 40655804, 40654162, 40655805, 40654167,
40654164)
measurement_codes = Lipid + CBC + CBCwDiff + CMP + Physical_Measurement
# # Integration of Units for All Measurements:
#
# #### Getting the numbers for all of the unit concept IDs by site
# +
unit_concept_ids_by_site_query = """
CREATE TABLE `{DATASET}.sites_unit_counts`
OPTIONS (
expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 3 MINUTE)
)
AS
SELECT
DISTINCT
mm.src_hpo_id, COUNT(m.measurement_id) as number_total_units
FROM
`{DATASET}.unioned_ehr_measurement` m
JOIN
`{DATASET}._mapping_measurement` mm
ON
m.measurement_id = mm.measurement_id
GROUP BY 1
ORDER BY number_total_units DESC
""".format(DATASET = DATASET)
unit_concept_ids_by_site = pd.io.gbq.read_gbq(unit_concept_ids_by_site_query, dialect='standard')
# +
unit_concept_ids_by_site_query = """
SELECT
*
FROM
`{DATASET}.sites_unit_counts`
""".format(DATASET = DATASET)
unit_concept_ids_by_site = | pd.io.gbq.read_gbq(unit_concept_ids_by_site_query, dialect='standard') | pandas.io.gbq.read_gbq |
# SETUP
#
# Refs
# https://github.com/UKPLab/sentence-transformers
# https://towardsdatascience.com/nlp-extract-contextualized-word-embeddings-from-bert-keras-tf-67ef29f60a7b
# https://towardsdatascience.com/bert-for-dummies-step-by-step-tutorial-fb90890ffe03
# Standard includes
import csv
import pickle
import pandas as pd
import string
from tkinter import Tk, filedialog
# import nltk
# nltk.download('stopwords')
# nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from pandas import DataFrame
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import plot_precision_recall_curve
import matplotlib.pyplot as plt
import seaborn as sn
from sentence_transformers import SentenceTransformer
from sklearn.metrics import precision_recall_fscore_support, classification_report
# Select File
root = Tk()
root.filename = filedialog.askopenfilename()
file = root.filename
root.withdraw()
# (NLTK) Helper Settings
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
# (NLTK) Helper Functions
def clean(doc):
stop_free = " ".join([i for i in doc.split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized
# Settings & Variables
# TODO: Populate key_dict dynamically based on what the Label is...
#key_dict = {0: 'Social Relationships', 1: 'Health, Fatigue, or Physical Pain', 2: 'Emotional Turmoil', 3: 'Work',
# 4: 'Family Issues', 5: 'Everday Decision Making', 6: 'School', 7: 'Other', 8: 'Financial Problem'}
#key_dict = {1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: '10'}
Input_File = file
print(Input_File)
output_model_filename = 'finalized_model.sav'
output_probs = "output_probs.csv"
Label = "Multi-class"
Features = "BERT"
Algorithm = "SVC"
Sweep = False
# ----------------------------------------
# SCRIPT PROCESSING
# This is where the main processing takes place
# Read data from converted/compiled CSV (Assumes data is sorted by 'Set' column ascending)
# TODO: See how the TF_IDF features parse the list and use that here instead of relying on the ordering of 'Set'
df = pd.read_csv(Input_File)
TargetNamesStrings = [str(x) for x in df[Label].unique().tolist()]
TargetNames = df[Label].unique().tolist()
TargetNames.sort()
dataset = (df['Set'] == 0).sum()
class_report = open('scikit_report.txt', 'w')
class_report.write(str(Input_File) + '\n')
# Preview the first 5 lines of the loaded data
print(df.head())
class_report.write(str(df.head()))
class_report.write('\n')
# Cast labels
df[Label] = df[Label].astype(int)
# Read each document and clean it.
df["Sentence"] = df["Sentence"].apply(clean)
# Let's do some quick counts
# TODO: Make this dynamic so we don't have to interact with the code here to change # of labels above
CategoryLabels = list(df[Label])
label_sum = float(len(CategoryLabels))
print(" ")
class_report.write(" \n")
print("===============")
class_report.write("===============\n")
print("Data Distribution:")
class_report.write("Data Distribution:\n")
for label in TargetNames:
print(str(label) + ' ' + str(key_dict[label]) + ' contains:', CategoryLabels.count(label), round(float(CategoryLabels.count(label) / label_sum), 2))
class_report.write(str(label) + ' ' + str(key_dict[label]) + ' contains:' + ' ' + str(CategoryLabels.count(label)) + ' ' + str(round(float(CategoryLabels.count(label) / label_sum), 2)))
class_report.write('\n')
# Beginning to calculate features include BERT and TF-IDF; this process can be a bit of bottleneck
# TODO: Consider writing these variables to a file to "pre-compute" them if experiments are taking awhile
print(" ")
class_report.write(" \n")
print("===============")
class_report.write("===============\n")
print("Fitting Features: ")
class_report.write("Fitting Features: \n")
print(" ")
class_report.write('\n')
bert_dimension = 0
if Features == "All" or Features == "BERT":
# Create BERT Features and add to data frame
print('Fitting BERT Features')
class_report.write('Fitting BERT Features')
model = SentenceTransformer('bert-base-nli-mean-tokens')
sentences = df['Sentence'].tolist()
sentence_embeddings = model.encode(sentences)
encoded_values = pd.DataFrame(np.row_stack(sentence_embeddings))
FeatureNames = []
bert_dimension = encoded_values.shape[1]
for x in range(0, bert_dimension):
FeatureNames.append("BERT_" + str(x))
training_corpus = encoded_values.head(dataset)
test_corpus = encoded_values.tail((df['Set'] == 1).sum())
tf_dimension = 0
if Features == "All" or Features == "TF":
# Create TF-IDF Features and add to data frame
print('Fitting TF-IDF Features')
tf_train, tf_test = df[df['Set'] != 1], df[df['Set'] == 1]
tf_training_corpus = tf_train['Sentence'].values
tf_training_labels = tf_train[Label].values
tf_test_corpus = tf_test['Sentence'].values
tf_test_labels = tf_test[Label].values
tf_idf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=5000, stop_words='english')
tfidf = tf_idf_vectorizer.fit_transform(tf_training_corpus)
X = tf_idf_vectorizer.fit_transform(tf_training_corpus).todense()
featurized_training_data = []
for x in range(0, len(X)):
tfid_Features = np.array(X[x][0]).reshape(-1, )
featurized_training_data.append(tfid_Features)
FeatureNames = []
tf_dimension = X.shape[1]
for x in range(0, tf_dimension):
FeatureNames.append("TFIDF_" + str(x))
X = tf_idf_vectorizer.transform(tf_test_corpus).todense()
featurized_test_data = []
for x in range(0, len(X)):
tfid_Features = np.array(X[x][0]).reshape(-1, )
featurized_test_data.append(tfid_Features)
# Merge the feature data if 'All' or get the TF-IDF Features if 'TF'
if Features == 'All':
featurized_training_data_df = DataFrame(featurized_training_data, columns=FeatureNames)
training_corpus = pd.concat([training_corpus, featurized_training_data_df], axis=1)
test_corpus = test_corpus.reset_index()
test_corpus = test_corpus.drop(['index'], axis=1)
featurized_test_data_df = DataFrame(featurized_test_data, columns=FeatureNames)
test_corpus = pd.concat([test_corpus, featurized_test_data_df], axis=1)
elif Features == 'TF':
featurized_training_data_df = DataFrame(featurized_training_data, columns=FeatureNames)
training_corpus = featurized_training_data_df
featurized_test_data_df = DataFrame(featurized_test_data, columns=FeatureNames)
test_corpus = featurized_test_data_df
# Get the labels from the original data frame
temp1 = df.head(dataset)
temp2 = df.tail((df['Set'] == 1).sum())
training_labels = temp1[Label].values
test_labels = temp2[Label].values
training_labels = training_labels.astype(int)
test_labels = test_labels.astype(int)
# Create final dataset for Testing & Training by joining Labels
train = pd.DataFrame(training_corpus)
test = pd.DataFrame(test_corpus)
mapping = dict(zip(np.unique(training_labels), np.arange(len(TargetNames))))
mapping_2 = dict(zip(np.unique(test_labels), np.arange(len(TargetNames))))
train[Label] = pd.Categorical.from_codes(pd.Series(training_labels).map(mapping), TargetNames)
test[Label] = pd.Categorical.from_codes(pd.Series(test_labels).map(mapping_2), TargetNames)
# Show the number of observations for the test and training data frames
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Fold Information: ")
class_report.write("Fold Information: \n")
print('Number of observations in the training data:', len(train))
class_report.write('Number of observations in the training data: ' + str(len(train)) + '\n')
print('Number of observations in the test data:', len(test))
class_report.write('Number of observations in the test data: ' + str(len(test)) + '\n')
print('Number of features generated:', str(tf_dimension + bert_dimension))
class_report.write('Number of features generated: ' + str(tf_dimension + bert_dimension) + '\n')
# Create a list of the feature column's names
features = train.columns[:(tf_dimension + bert_dimension)]
# Create a classifier. By convention, clf means 'classifier'
if Algorithm == "SVC":
clf = SVC(kernel='rbf', class_weight='balanced', probability=True)
if Algorithm == "SVC-Sweep":
clf = SVC(kernel='poly', class_weight='balanced', C=1, decision_function_shape='ovo', gamma=0.0001,
probability=True)
if Algorithm == "LSVC":
clf = svm.LinearSVC()
if Algorithm == "RF":
clf = RandomForestClassifier(n_jobs=-1, class_weight="balanced")
if Algorithm == "GBT":
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
if Algorithm == "VT":
clf1 = SVC(kernel='rbf', class_weight="balanced", probability=True)
clf2 = RandomForestClassifier(n_jobs=-1, class_weight="balanced")
clf3 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
clf = VotingClassifier(estimators=[('svc', clf1), ('rf', clf2), ('gbt', clf3)], voting='soft', weights=[1, 1, 1])
# Train the classifier to take the training features and learn how they relate
clf.fit(train[features], train[Label])
# Apply the classifier we trained to the test data (which, remember, it has never seen before)
preds = clf.predict(test[features])
if Algorithm == "SVC" or Algorithm == "SVC-Sweep":
# Output the probabilities for the SVC, it's possible this could be extended to other alogirthms
# TODO: Investigate
# Below this is some legacy code which will allow you to filter the output and see it reflected
# in the stats below by swapping in y_pred but this can have a whacky interaction with other classifiers
preds_proba = clf.predict_proba(test[features])
y_pred = (clf.predict_proba(test[features])[:, 1] >= 0.695).astype(bool)
# View the PREDICTED classes for the first five observations
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Example Prediction: ")
class_report.write("Example Prediction: \n")
print(preds[0:5])
class_report.write(str(preds[0:5]))
if Algorithm == "SVC" or Algorithm == "SVC-Sweep":
with open(output_probs, 'w', newline='') as my_csv:
csvWriter = csv.writer(my_csv, delimiter=',')
csvWriter.writerows(preds_proba)
# View the ACTUAL classes for the first five observations
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Actual: ")
class_report.write("Actual: \n")
print(str(test[Label].head()))
class_report.write(str(test[Label].head()) + '\n')
# Create confusion matrix
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Confusion Matrix: ")
class_report.write("Confusion Matrix: \n")
print(" ")
class_report.write('\n')
confusion_matrix = | pd.crosstab(test[Label], preds, rownames=['Actual Categories'], colnames=['Predicted Categories']) | pandas.crosstab |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 09:50:47 2020
@author:
"""
##################################################
### Otras librerías de interés
##################################################
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from matplotlib.collections import LineCollection
from PIL import Image
import urllib
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio import Entrez
from Bio.SeqUtils import GC
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio import Phylo
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astroquery.sdss import SDSS
from astropy.table import Table
from astropy import units as u
from astropy.table import join
from astropy.io import fits
from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling import Fittable1DModel, Parameter
from astroquery.sdss import SDSS
##################################################
### Librería biopython
##################################################
from Bio import SeqIO
from Bio.Seq import Seq
paginaD = 'Datos/'
archivo = 'SARS-CoV2Col.fasta'
SARSCoV2 = list(SeqIO.parse(paginaD+archivo, "fasta"))
SARSCoV2[0].id
SARSCoV2[0].seq
from Bio.Alphabet import IUPAC
SARSCoV2 = list(SeqIO.parse(paginaD+archivo, "fasta",\
IUPAC.ambiguous_dna))
archivo = 'SARS-CoV2Ant.gb'
SARSCoV2Ant = list(SeqIO.parse(paginaD+archivo, "genbank"))[0]
SARSCoV2Ant.id
SARSCoV2Ant.description
print(SARSCoV2Ant.features[2])
SARSCoV2Ant_ARN = (SARSCoV2[1].seq[211:13429]).transcribe()
SARSCoV2Ant_Prot = (SARSCoV2[1].seq[211:13429]).translate()
SARSCoV2Ant_QIS30052_2 = SARSCoV2Ant.features[2].\
extract(SARSCoV2Ant.seq)
SARSCoV2Ant_ARN.transcribe() # Produce un error
SARSCoV2Ant_ARN.translate()
SARSCoV2Ant_ARN.back_transcribe()
SARSCoV2Ant_QIS30052_2.complement()
SARSCoV2Ant_QIS30052_2.reverse_complement()
##################################################
### Acceso al repositorio `Entrez`
##################################################
from Bio import Entrez
Entrez.email = "<EMAIL>"
busqueda = Entrez.esearch(db="nucleotide",\
term="sickle cell AND human NOT chromosome",
retmax =100)
resultados = Entrez.read(busqueda)
busqueda = Entrez.efetch(db="nucleotide", id=resultados['IdList'],
retmax="100", rettype="fasta",
retmode="xml")
resultados = Entrez.read(busqueda)
resultados[0]['TSeq_sequence']
busqueda = Entrez.efetch(db="nucleotide", id="179408", \
rettype="fasta", retmode="xml")
HBB_gen = Entrez.read(busqueda)[0]
["AB026282", "AF076044","AF076051", "AF076052","AB026277",
"AB026278", "AB066603", "AF076089", "AF076090","AB026269",
"AB026271", "AB026270"]
busqueda = Entrez.efetch(db="nucleotide", id=["AB026282",\
"AF076044", "AF076051", "AF076052","AB026277",\
"AB026278", "AB066603", "AF076089", "AF076090",\
"AB026269", "AB026271", "AB026270"],\
rettype="fasta", retmode="xml")
CitocromoB = Entrez.read(busqueda)
CitocromoB_sec = [Seq(x['TSeq_sequence'],IUPAC.ambiguous_dna)\
for x in CitocromoB]
CitocromoB_sec = []
for x in CitocromoB:
CitocromoB_sec.append(Seq(x['TSeq_sequence'],\
IUPAC.ambiguous_dna))
from Bio.SeqUtils import GC
CitocromoB_GC = [GC(x) for x in CitocromoB_sec]
from Bio.SeqUtils.ProtParam import ProteinAnalysis
CitocromoB_trad = [str(x.translate(to_stop=True)) \
for x in CitocromoB_sec]
CitocromoB_analizada = [ProteinAnalysis(x) for x in\
CitocromoB_trad]
CitocromoB_pesoM = [x.molecular_weight() for x in \
CitocromoB_analizada]
pinguinos = | pd.read_csv(paginaD+'pinguinos.csv', index_col=[0]) | pandas.read_csv |
import pandas
import datetime
Incentives = {1: 400, 2: 500, 3: 700, 4: 900, 5: 1100, 6: 1300, 7: 1500, 8: 1800, 9: 2100, 10: 2400}
def Calculate_Car_Incentive(cars):
df = pandas.read_csv("E:\\000\\Cars.csv")
Incentives = {1: df.loc[0, 'first'], 2: df.loc[0, 'second'], 3: df.loc[0, 'third'], 4: df.loc[0, 'fourth'],
5: df.loc[0, 'fifth'], 6: df.loc[0, 'sixth'], 7: df.loc[0, 'seventh'], 8: df.loc[0, 'eighth'],
9: df.loc[0, 'ninth'], 10: df.loc[0, 'tenth']}
Car_Amount = 0
if cars > 10:
Car_Amount = (cars - 10) * 2400
for i in Incentives:
if i > cars:
break
Car_Amount += Incentives[i]
del df
return Car_Amount
def Calculate_MGA_Incentive(mga):
df1 = pandas.read_csv("E:\\000\\Ranges.csv")
highest = df1.loc[0, 'Highest']
second = df1.loc[0, 'Second']
third = df1.loc[0, 'Third']
fourth = df1.loc[0, 'Fourth']
fifth = df1.loc[0, 'Fifth']
sixth = df1.loc[0, 'Sixth']
seventh = df1.loc[0, 'Seventh']
eighth = df1.loc[0, 'Eighth']
ninth = df1.loc[0, 'Nineth']
tenth = df1.loc[0, 'Tenth']
lowest = df1.loc[0, 'Lowest']
Mga_Amount = 0
if mga >= highest:
Mga_Amount = (8/100) * mga
elif third <= mga <= second:
Mga_Amount = (7/100) * mga
elif fifth <= mga <= fourth:
Mga_Amount = (6/100) * mga
elif seventh <= mga <= sixth:
Mga_Amount = (5/100) * mga
elif ninth <= mga <= eighth:
Mga_Amount = (4/100) * mga
elif lowest <= mga <= tenth:
Mga_Amount = (3/100) * mga
elif mga < 30000:
return 0
return Mga_Amount
def Calculate_Warranty_Incentive(warranty):
df1 = pandas.read_csv("E:\\000\\Data.csv")
Warranty_Amount = warranty * df1.loc[0, 'Warranty']
del df1
return Warranty_Amount
def Brezza_set(new):
df1 = pandas.read_csv("E:\\000\\Data.csv")
df1['Brezza'] = new
df1.to_csv('E:\\000\\Data.csv', sep=',', mode='w')
del df1
def Dzire_set(new):
df1 = | pandas.read_csv("E:\\000\\Data.csv") | pandas.read_csv |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
| tm.assert_index_equal(result.columns, expected_columns_numeric) | pandas._testing.assert_index_equal |
#!/usr/bin/env python
# coding: utf-8
# "With whom do users initiate?" Mlogit Modeling
# ===
#
# This version is the script version.
import os
import re
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
import sqlite3
from tqdm import tqdm
import random
import pickle
from datetime import datetime
import bisect
import matplotlib.pyplot as plt
import matplotlib.dates as md
import matplotlib
import pylab as pl
from IPython.core.display import display, HTML
import networkx as nx
import sys
# if set to True, will generate in the test data range
# Otherwise, will generate in the train data range
# This is definited by model_start_timestamp below
should_generate_test_data = False
working_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/author_initiations"
assert os.path.exists(working_dir)
start_date = datetime.fromisoformat('2005-01-01')
start_timestamp = int(start_date.timestamp() * 1000)
end_date = datetime.fromisoformat('2016-06-01')
end_timestamp = int(end_date.timestamp() * 1000)
subset_start_date = datetime.fromisoformat('2014-01-01')
subset_start_timestamp = int(subset_start_date.timestamp() * 1000)
##### Data reading
# load the list of valid users
data_selection_working_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/data_selection"
valid_user_ids = set()
with open(os.path.join(data_selection_working_dir, "valid_user_ids.txt"), 'r') as infile:
for line in infile:
user_id = line.strip()
if user_id == "":
continue
else:
valid_user_ids.add(int(user_id))
# load the list of valid sites
data_selection_working_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/data_selection"
valid_site_ids = set()
with open(os.path.join(data_selection_working_dir, "valid_site_ids.txt"), 'r') as infile:
for line in infile:
site_id = line.strip()
if site_id == "":
continue
else:
valid_site_ids.add(int(site_id))
# read the journal metadata with author type info added
s = datetime.now()
author_type_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/author_type"
journal_metadata_filepath = os.path.join(author_type_dir, "journal_metadata_with_author_type.df")
journal_df = pd.read_feather(journal_metadata_filepath)
print("Journal metadata:", datetime.now() - s)
# as a quick fix for invalid dates in journals, when created_at is 0 we use the updated_at instead
# note that only 41 updates have this issue
invalid_created_at = journal_df.created_at <= 0
journal_df.loc[invalid_created_at, 'created_at'] = journal_df.loc[invalid_created_at, 'updated_at']
health_cond_filepath = os.path.join("/home/lana/shared/caringbridge/data/projects/sna-social-support/user_metadata", "assigned_health_conditions.feather")
user_health_conds_df = pd.read_feather(health_cond_filepath)
# read the user author type dataframe
author_type_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/author_type"
user_patient_proportions_filepath = os.path.join(author_type_dir, 'user_patient_proportions.df')
user_df = pd.read_feather(user_patient_proportions_filepath)
# read the user->user interactions dataframe
metadata_dir = "/home/lana/shared/caringbridge/data/projects/sna-social-support/user_metadata"
u2u_df = pd.read_feather(os.path.join(metadata_dir,"u2u_df.feather"))
# read the site-level metadata
site_metadata_working_dir = "/home/lana/shared/caringbridge/data/derived/site_metadata"
site_metadata_filepath = os.path.join(site_metadata_working_dir, "site_metadata.feather")
site_metadata_df = pd.read_feather(site_metadata_filepath)
print("Data loaded.")
# ## Compute and merge the features
# In[15]:
user_df = user_df[user_df.user_id.isin(valid_user_ids)]
user_df['is_multisite_author'] = user_df.num_sites > 1
is_mixedsite_author_dict = {}
site_author_sets = journal_df[journal_df.user_id.isin(valid_user_ids)].groupby('site_id').agg({'user_id': lambda user_ids: set(user_ids)})
for site_id, user_ids in zip(site_author_sets.index, site_author_sets.user_id):
if len(user_ids) > 1:
for user_id in user_ids:
is_mixedsite_author_dict[user_id] = True
is_mixedsite_author = [user_id in is_mixedsite_author_dict for user_id in user_df.user_id]
user_df['is_mixedsite_author'] = is_mixedsite_author
# merge in the health condition data
user_health_cond_dict = {user_id: assigned_health_cond for user_id, assigned_health_cond in zip(user_health_conds_df.user_id, user_health_conds_df.assigned_health_cond)}
health_condition = [user_health_cond_dict[user_id] for user_id in user_df.user_id]
user_df['health_condition'] = health_condition
# number of journal updates, first update, last update
user_updates_df = journal_df[journal_df.user_id.isin(valid_user_ids)].groupby('user_id').agg({
'journal_oid': lambda group: len(group),
'created_at': lambda created_ats: (np.min(created_ats), np.max(created_ats))
}).reset_index() # note that columns are not renamed appropriately, but are reused immediately
user_update_count_dict = {
user_id: count for user_id, count
in zip(user_updates_df.user_id, user_updates_df.journal_oid)}
user_first_update_dict = {
user_id: created_at[0] for user_id, created_at
in zip(user_updates_df.user_id, user_updates_df.created_at)}
user_last_update_dict = {
user_id: created_at[1] for user_id, created_at
in zip(user_updates_df.user_id, user_updates_df.created_at)}
update_count = [user_update_count_dict[user_id] for user_id in user_df.user_id]
first_update = [user_first_update_dict[user_id] for user_id in user_df.user_id]
last_update = [user_last_update_dict[user_id] for user_id in user_df.user_id]
user_df['update_count'] = update_count
user_df['first_update'] = first_update
user_df['last_update'] = last_update
user_df['author_tenure'] = user_df.last_update - user_df.first_update
assert np.all(user_df.author_tenure > 0)
# posting frequency (updates per month, across all sites)
tenure_in_months = user_df.author_tenure / (1000 * 60 * 60 * 24 * 30)
user_df['update_frequency'] = user_df.update_count / tenure_in_months
# is_interacted_with
# computed from the user->user interaction data
interacted_with_user_ids = set(u2u_df.to_user_id)
is_interacted_with = [user_id in interacted_with_user_ids for user_id in user_df.user_id]
user_df['is_interacted_with'] = is_interacted_with
# is this user an initiator at any point
initiating_user_ids = set(u2u_df.from_user_id)
is_initiator = [user_id in initiating_user_ids for user_id in user_df.user_id]
user_df['is_initiator'] = is_initiator
# #### Compute the dictionary for user->(created_at)
user_updates_dict = journal_df.sort_values(by='created_at', ascending=True).groupby('user_id').agg({
'created_at': lambda created_at: created_at.tolist()
}).created_at.to_dict()
# #### Compute the visits of the most-visited site authored by a user
# construct user->site dictionary
# contains all sites that authors have updated at least one journal update on
user_site_dict = defaultdict(set)
for row in tqdm(journal_df.itertuples(), total=len(journal_df)):
user_site_dict[row.user_id].add(row.site_id)
# construct site->visits dictionary
site_visits_dict = {site_id: visits for site_id, visits in zip(site_metadata_df.site_id, site_metadata_df.visits)}
# construct user->visits dictionary
# pools across multiple sites by taking the site with the maximum number of visits
user_visits_dict = {user_id: max(site_visits_dict[site_id] for site_id in user_site_dict[user_id] if site_id in site_visits_dict)
for user_id in user_df.user_id}
# ### Filter the u2u links
valid_u2u_df = u2u_df[(u2u_df.from_user_id.isin(valid_user_ids))&(u2u_df.to_user_id.isin(valid_user_ids))]
inits_df = valid_u2u_df.sort_values(by='created_at', ascending=True).drop_duplicates(subset=['from_user_id', 'to_user_id'], keep='first')
model_start_date = datetime.fromisoformat('2014-01-01')
model_start_timestamp = int(model_start_date.timestamp() * 1000)
model_end_date = datetime.fromisoformat('2016-01-01')
model_end_timestamp = int(model_end_date.timestamp() * 1000)
if should_generate_test_data:
model_start_date = datetime.fromisoformat('2016-01-01')
model_start_timestamp = int(model_start_date.timestamp() * 1000)
model_end_date = datetime.fromisoformat('2016-06-01')
model_end_timestamp = int(model_end_date.timestamp() * 1000)
# ### Implementation of high-level graph code
class WccGraph:
def __init__(self, node_uids):
self.node_uids = node_uids
self.node_dict = {} # maps node_uid to component_uid
self.component_dict = {} # maps component_uid to a set of node_uids
for component_uid, node_uid in enumerate(self.node_uids):
self.node_dict[node_uid] = component_uid
self.component_dict[component_uid] = set((node_uid,))
self.edge_count = 0
def add_edge(self, from_node_uid, to_node_uid):
self.edge_count += 1
from_component_uid = self.node_dict[from_node_uid]
to_component_uid = self.node_dict[to_node_uid]
if from_component_uid == to_component_uid:
# these nodes are already weakly connected
is_intra_component_edge = True
from_component_size, to_component_size = 0, 0
else: # two different components are being merged with this edge
is_intra_component_edge = False
from_component_nodes = self.component_dict[from_component_uid]
to_component_nodes = self.component_dict[to_component_uid]
from_component_size = len(from_component_nodes)
to_component_size = len(to_component_nodes)
if from_component_size >= to_component_size:
# merge To component into From component, deleting the To component
from_component_nodes.update(to_component_nodes)
del self.component_dict[to_component_uid]
for node_uid in to_component_nodes:
# update the merged in component ids
self.node_dict[node_uid] = from_component_uid
else:
# merge From component into To component, deleting the From component
to_component_nodes.update(from_component_nodes)
del self.component_dict[from_component_uid]
for node_uid in from_component_nodes:
# update the merged in component ids
self.node_dict[node_uid] = to_component_uid
return is_intra_component_edge, from_component_size, to_component_size
def are_weakly_connected(self, user_id1, user_id2):
# two nodes are weakly connected if they exist in the same WCC
return self.node_dict[user_id1] == self.node_dict[user_id2]
def compute_is_friend_of_friend(G, user_id1, user_id2):
if len(G[user_id1]) == 0 or len(G[user_id2]) == 0:
# if there are zero outbound edges from one of the nodes, they can't be strongly connected
return False
return are_fof_connected(G, user_id1, user_id2) and are_fof_connected(G, user_id2, user_id1)
def are_fof_connected(G, source, target):
# must be a direct connection from either source -> target, or from source -> neighbor -> target
if target in G[source]:
return True
for neighbor in G[source]:
if target in G[neighbor]:
return True
return False
# ### Build the initial graph subset
inits_subset = inits_df[inits_df.created_at < model_start_timestamp]
s = datetime.now()
base_graph = nx.DiGraph()
nodes = set(inits_subset.from_user_id) | set(inits_subset.to_user_id)
edges = [tuple(row) for row in inits_subset[["from_user_id", "to_user_id"]].values]
base_graph.add_nodes_from(nodes)
base_graph.add_edges_from(edges)
print(f"Base graph constructed: {datetime.now() - s}")
# In[142]:
# this second graph tracks only weakly connected component info
s = datetime.now()
user_set = set(inits_df.from_user_id) | set(inits_df.to_user_id)
wcc_graph = WccGraph(user_set)
for from_user_id, to_user_id in inits_subset[["from_user_id", "to_user_id"]].values:
wcc_graph.add_edge(from_user_id, to_user_id)
print(f"WCC graph: {datetime.now() - s}")
G = base_graph.copy()
s = 24
# use s negative samples
# valid candidate users are ALL valid authors who have posted their first update at this time
inits_subset = inits_df[(inits_df.created_at >= model_start_timestamp)&(inits_df.created_at <= model_end_timestamp)]
inits_subset = inits_subset.sort_values(by='created_at', ascending=True)
user_df['time_to_first_update'] = user_df.first_update - model_start_timestamp
# if first update is positive, it is still in the future
# if first update is <= 0, then it should already be an eligible node
# however, it might not be in the network, since the base network only contains connected nodes
active_user_ids = user_df.loc[user_df.time_to_first_update <= 0, 'user_id']
# create data structures storing all of the edges that do not yet but will exist in the model
# these will be added incrementally as computation continues
model_subset = inits_df[(inits_df.created_at >= model_start_timestamp)&(inits_df.created_at <= model_end_timestamp)]
all_edges = [(created_at, tuple(row))
for created_at, row
in zip(model_subset.created_at, model_subset[["from_user_id", "to_user_id"]].values)]
edge_df = pd.DataFrame(all_edges, columns=['created_at', 'edge'])
edge_df['time_to_existence'] = edge_df.created_at - model_start_timestamp
# if time_to_existence <= 0, it should exist in the network
assert np.all(edge_df.time_to_existence > 0)
prev_timestep = model_start_timestamp
active_user_ids = user_df.loc[user_df.time_to_first_update <= 0, 'user_id']
sampled_initiations = []
for from_user_id, to_user_id, created_at in tqdm(zip(inits_subset.from_user_id, inits_subset.to_user_id, inits_subset.created_at), total=len(inits_subset)):
curr_timestep = created_at
elapsed_time = curr_timestep - prev_timestep
if elapsed_time > 0: # When the next iteration causes time to advance
# update the active users set
user_df.time_to_first_update -= elapsed_time
active_user_ids = user_df.loc[user_df.time_to_first_update <= 0, 'user_id']
# update the graph with all initiations between previous timestep and now
edge_df.time_to_existence -= elapsed_time
new_edge_mask = edge_df.time_to_existence < 0 # edges that exist AT zero happen at the current timestep, including the edge from_user_id, to_user_id
new_edges = edge_df[new_edge_mask]
edge_df = edge_df[~new_edge_mask] # TODO Use loc for assignment?
#assert np.all(edge_df[edge_df.time_to_existence==0].created_at == created_at)
G.add_edges_from(new_edges.edge)
# also add edges to the WCC graph
for from_user_id, to_user_id in new_edges.edge:
wcc_graph.add_edge(from_user_id, to_user_id)
# candidate users are all active users...
candidate_user_ids = set(active_user_ids)
# ... minus the true initiation target...
candidate_user_ids.discard(to_user_id)
# ... minus users already initiated to by this user
if from_user_id in G:
candidate_user_ids -= set(G[from_user_id].keys())
# we only sample s of the candidate users
negative_sampled_users = list(random.sample(candidate_user_ids, s))
# now, extract ids for the target user and all of the negative sampled users
indegree_list = []
outdegree_list = []
is_reciprocal_list = []
is_weakly_connected_list = []
is_friend_of_friend_list = []
for user_id in [to_user_id] + negative_sampled_users:
is_friend_of_friend = False
if user_id in G:
indegree = G.in_degree(user_id)
outdegree = G.out_degree(user_id)
is_reciprocal = from_user_id in G[user_id]
is_weakly_connected = wcc_graph.are_weakly_connected(from_user_id, user_id)
if is_weakly_connected:
is_friend_of_friend = compute_is_friend_of_friend(G, from_user_id, user_id)
else:
indegree = 0
outdegree = 0
is_reciprocal = False
is_weakly_connected = False
indegree_list.append(indegree)
outdegree_list.append(outdegree)
is_reciprocal_list.append(is_reciprocal)
is_weakly_connected_list.append(is_weakly_connected)
is_friend_of_friend_list.append(is_friend_of_friend)
d = {
'initiator_user_id': from_user_id,
'target_user_id': to_user_id,
'negative_user_ids': negative_sampled_users,
'created_at': created_at,
'indegree_list': indegree_list,
'outdegree_list': outdegree_list,
'is_reciprocal_list': is_reciprocal_list,
'is_weakly_connected_list': is_weakly_connected_list,
'is_friend_of_friend_list': is_friend_of_friend_list
}
sampled_initiations.append(d)
prev_timestep = curr_timestep
sampled_inits_df = | pd.DataFrame(sampled_initiations) | pandas.DataFrame |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif( | compat.is_platform_32bit() | pandas.compat.is_platform_32bit |
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
def test_mask():
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
tm.assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
tm.assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
tm.assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
tm.assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
tm.assert_series_equal(rs, rs2)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.mask(1)
with pytest.raises(ValueError, match=msg):
s.mask(cond[:3].values, -s)
# dtype changes
s = | Series([1, 2, 3, 4]) | pandas.Series |
import matplotlib.pyplot as plt
import pandas as pd
from .legend_picker import *
from .helpers import *
def show_memory_access(metricsParser, options, onePlotFigureSize, fontSize):
memoryEntries = metricsParser.get_memory_entries()
data = | pd.DataFrame(memoryEntries, columns=['realTime', 'virtualTime', 'operation']) | pandas.DataFrame |
import os
if not os.path.exists("temp"):
os.mkdir("temp")
def add_pi_obj_func_test():
import os
import pyemu
pst = os.path.join("utils","dewater_pest.pst")
pst = pyemu.optimization.add_pi_obj_func(pst,out_pst_name=os.path.join("temp","dewater_pest.piobj.pst"))
print(pst.prior_information.loc["pi_obj_func","equation"])
#pst._update_control_section()
assert pst.control_data.nprior == 1
def fac2real_test():
import os
import numpy as np
import pyemu
# pp_file = os.path.join("utils","points1.dat")
# factors_file = os.path.join("utils","factors1.dat")
# pyemu.utils.gw_utils.fac2real(pp_file,factors_file,
# out_file=os.path.join("utils","test.ref"))
pp_file = os.path.join("utils", "points2.dat")
factors_file = os.path.join("utils", "factors2.dat")
pyemu.geostats.fac2real(pp_file, factors_file,
out_file=os.path.join("temp", "test.ref"))
arr1 = np.loadtxt(os.path.join("utils","fac2real_points2.ref"))
arr2 = np.loadtxt(os.path.join("temp","test.ref"))
#print(np.nansum(np.abs(arr1-arr2)))
#print(np.nanmax(np.abs(arr1-arr2)))
nmax = np.nanmax(np.abs(arr1-arr2))
assert nmax < 0.01
# import matplotlib.pyplot as plt
# diff = (arr1-arr2)/arr1 * 100.0
# diff[np.isnan(arr1)] = np.nan
# p = plt.imshow(diff,interpolation='n')
# plt.colorbar(p)
# plt.show()
def vario_test():
import numpy as np
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
h = v._h_function(np.array([0.0]))
assert h == contribution
h = v._h_function(np.array([a*1000]))
assert h == 0.0
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
print(v2._h_function(np.array([a])))
def aniso_test():
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
v3 = const(contribution,a,anisotropy=2.0,bearing=0.0)
pt0 = (0,0)
pt1 = (1,0)
assert v.covariance(pt0,pt1) == v2.covariance(pt0,pt1)
pt0 = (0,0)
pt1 = (0,1)
assert v.covariance(pt0,pt1) == v3.covariance(pt0,pt1)
def geostruct_test():
import pyemu
v1 = pyemu.utils.geostats.ExpVario(0.1,2.0)
v2 = pyemu.utils.geostats.GauVario(0.1,2.0)
v3 = pyemu.utils.geostats.SphVario(0.1,2.0)
g = pyemu.utils.geostats.GeoStruct(0.2,[v1,v2,v3])
pt0 = (0,0)
pt1 = (0,0)
print(g.covariance(pt0,pt1))
assert g.covariance(pt0,pt1) == 0.5
pt0 = (0,0)
pt1 = (1.0e+10,0)
assert g.covariance(pt0,pt1) == 0.2
def struct_file_test():
import os
import pyemu
structs = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))
#print(structs[0])
pt0 = (0,0)
pt1 = (0,0)
for s in structs:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
with open(os.path.join("utils","struct_out.dat"),'w') as f:
for s in structs:
s.to_struct_file(f)
structs1 = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct_out.dat"))
for s in structs1:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
def covariance_matrix_test():
import os
import pandas as pd
import pyemu
pts = pd.read_csv(os.path.join("utils","points1.dat"),delim_whitespace=True,
header=None,names=["name","x","y"],usecols=[0,1,2])
struct = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))[0]
struct.variograms[0].covariance_matrix(pts.x,pts.y,names=pts.name)
print(struct.covariance_matrix(pts.x,pts.y,names=pts.name).x)
def setup_ppcov_simple():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.struct2.out"),'','']
args3 = [pts_file,'0.0',str_file,"struct3",os.path.join("utils","ppcov.struct3.out"),'','']
for args in [args1,args2,args3]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_simple_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
mat1_file = os.path.join("utils","ppcov.struct1.out")
mat2_file = os.path.join("utils","ppcov.struct2.out")
mat3_file = os.path.join("utils","ppcov.struct3.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
ppc_mat3 = pyemu.Cov.from_ascii(mat3_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2,struct3 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
print(struct3)
for mat,struct in zip([ppc_mat1,ppc_mat2,ppc_mat3],[struct1,struct2,struct3]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
print(str_mat.row_names)
delt = mat.x - str_mat.x
assert np.abs(delt).max() < 1.0e-7
def setup_ppcov_complex():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.complex.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.complex.struct2.out"),'','']
for args in [args1,args2]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_complex_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
mat1_file = os.path.join("utils","ppcov.complex.struct1.out")
mat2_file = os.path.join("utils","ppcov.complex.struct2.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
for mat,struct in zip([ppc_mat1,ppc_mat2],[struct1,struct2]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
delt = mat.x - str_mat.x
print(mat.x[:,0])
print(str_mat.x[:,0])
print(np.abs(delt).max())
assert np.abs(delt).max() < 1.0e-7
#break
def pp_to_tpl_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
print(pp_df.columns)
def tpl_to_dataframe_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
df_tpl = pyemu.pp_utils.pp_tpl_to_dataframe(pp_file+".tpl")
assert df_tpl.shape[0] == pp_df.shape[0]
# def to_mps_test():
# import os
# import pyemu
# jco_file = os.path.join("utils","dewater_pest.jcb")
# jco = pyemu.Jco.from_binary(jco_file)
# #print(jco.x)
# pst = pyemu.Pst(jco_file.replace(".jcb",".pst"))
# #print(pst.nnz_obs_names)
# oc_dict = {oc:"l" for oc in pst.nnz_obs_names}
# obj_func = {name:1.0 for name in pst.par_names}
#
# #pyemu.optimization.to_mps(jco=jco_file)
# #pyemu.optimization.to_mps(jco=jco_file,obs_constraint_sense=oc_dict)
# #pyemu.optimization.to_mps(jco=jco_file,obj_func="h00_00")
# decision_var_names = pst.parameter_data.loc[pst.parameter_data.pargp=="q","parnme"].tolist()
# pyemu.optimization.to_mps(jco=jco_file,obj_func=obj_func,decision_var_names=decision_var_names,
# risk=0.975)
def setup_pp_test():
import os
import pyemu
try:
import flopy
except:
return
model_ws = os.path.join("..","examples","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
pp_dir = os.path.join("utils")
#ml.export(os.path.join("temp","test_unrot_grid.shp"))
sr = pyemu.helpers.SpatialReference().from_namfile(
os.path.join(ml.model_ws, ml.namefile),
delc=ml.dis.delc, delr=ml.dis.delr)
sr.rotation = 0.
par_info_unrot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr, prefix_dict={0: "hk1",1:"hk2"},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
#print(par_info_unrot.parnme.value_counts())
gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(a=1000,contribution=1.0))
ok = pyemu.geostats.OrdinaryKrige(gs,par_info_unrot)
ok.calc_factors_grid(sr)
sr2 = pyemu.helpers.SpatialReference.from_gridspec(
os.path.join(ml.model_ws, "test.spc"), lenuni=2)
par_info_drot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr2, prefix_dict={0: ["hk1_", "sy1_", "rch_"]},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr2)
par_info_mrot = pyemu.pp_utils.setup_pilotpoints_grid(ml,prefix_dict={0:["hk1_","sy1_","rch_"]},
every_n_cell=2,pp_dir=pp_dir,tpl_dir=pp_dir,
shapename=os.path.join("temp","test_unrot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(ml.sr)
sr.rotation = 15
#ml.export(os.path.join("temp","test_rot_grid.shp"))
#pyemu.gw_utils.setup_pilotpoints_grid(ml)
par_info_rot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr,every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_rot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr)
print(par_info_unrot.x)
print(par_info_drot.x)
print(par_info_mrot.x)
print(par_info_rot.x)
def read_hob_test():
import os
import pyemu
hob_file = os.path.join("utils","HOB.txt")
df = pyemu.gw_utils.modflow_hob_to_instruction_file(hob_file)
print(df.obsnme)
def read_pval_test():
import os
import pyemu
pval_file = os.path.join("utils", "meras_trEnhance.pval")
pyemu.gw_utils.modflow_pval_to_template_file(pval_file)
def pp_to_shapefile_test():
import os
import pyemu
try:
import shapefile
except:
print("no pyshp")
return
pp_file = os.path.join("utils","points1.dat")
shp_file = os.path.join("temp","points1.dat.shp")
pyemu.pp_utils.write_pp_shapfile(pp_file)
def write_tpl_test():
import os
import pyemu
tpl_file = os.path.join("utils","test_write.tpl")
in_file = os.path.join("temp","tpl_test.dat")
par_vals = {"q{0}".format(i+1):12345678.90123456 for i in range(7)}
pyemu.pst_utils.write_to_template(par_vals,tpl_file,in_file)
def read_pestpp_runstorage_file_test():
import os
import pyemu
rnj_file = os.path.join("utils","freyberg.rnj")
#rnj_file = os.path.join("..", "..", "verification", "10par_xsec", "master_opt1","pest.rnj")
p1,o1 = pyemu.helpers.read_pestpp_runstorage(rnj_file)
p2,o2 = pyemu.helpers.read_pestpp_runstorage(rnj_file,9)
diff = p1 - p2
diff.sort_values("parval1",inplace=True)
def smp_to_ins_test():
import os
import pyemu
smp = os.path.join("utils","TWDB_wells.smp")
ins = os.path.join('temp',"test.ins")
try:
pyemu.pst_utils.smp_to_ins(smp,ins)
except:
pass
else:
raise Exception("should have failed")
pyemu.smp_utils.smp_to_ins(smp,ins,True)
def master_and_workers():
import shutil
import pyemu
worker_dir = os.path.join("..","verification","10par_xsec","template_mac")
master_dir = os.path.join("temp","master")
if not os.path.exists(master_dir):
os.mkdir(master_dir)
assert os.path.exists(worker_dir)
pyemu.helpers.start_workers(worker_dir,"pestpp","pest.pst",1,
worker_root="temp",master_dir=master_dir)
#now try it from within the master dir
base_cwd = os.getcwd()
os.chdir(master_dir)
pyemu.helpers.start_workers(os.path.join("..","..",worker_dir),
"pestpp","pest.pst",3,
master_dir='.')
os.chdir(base_cwd)
def first_order_pearson_regul_test():
import os
from pyemu import Schur
from pyemu.utils.helpers import first_order_pearson_tikhonov,zero_order_tikhonov
w_dir = "la"
sc = Schur(jco=os.path.join(w_dir,"pest.jcb"))
pt = sc.posterior_parameter
zero_order_tikhonov(sc.pst)
first_order_pearson_tikhonov(sc.pst,pt,reset=False)
print(sc.pst.prior_information)
sc.pst.rectify_pi()
assert sc.pst.control_data.pestmode == "regularization"
sc.pst.write(os.path.join('temp','test.pst'))
def zero_order_regul_test():
import os
import pyemu
pst = pyemu.Pst(os.path.join("pst","inctest.pst"))
pyemu.helpers.zero_order_tikhonov(pst)
print(pst.prior_information)
assert pst.control_data.pestmode == "regularization"
pst.write(os.path.join('temp','test.pst'))
pyemu.helpers.zero_order_tikhonov(pst,reset=False)
assert pst.prior_information.shape[0] == pst.npar_adj * 2
def kl_test():
import os
import numpy as np
import pandas as pd
import pyemu
import matplotlib.pyplot as plt
try:
import flopy
except:
print("flopy not imported...")
return
model_ws = os.path.join("..","verification","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
str_file = os.path.join("..","verification","Freyberg","structure.dat")
arr_tru = np.loadtxt(os.path.join("..","verification",
"Freyberg","extra_crispy",
"hk.truth.ref")) + 20
basis_file = os.path.join("utils","basis.jco")
tpl_file = os.path.join("utils","test.tpl")
factors_file = os.path.join("temp","factors.dat")
num_eig = 100
prefixes = ["hk1"]
df = pyemu.utils.helpers.kl_setup(num_eig=num_eig, sr=ml.sr,
struct=str_file,
factors_file=factors_file,
basis_file=basis_file,
prefixes=prefixes,islog=False)
basis = pyemu.Matrix.from_binary(basis_file)
basis = basis[:,:num_eig]
arr_tru = np.atleast_2d(arr_tru.flatten()).transpose()
proj = np.dot(basis.T.x,arr_tru)[:num_eig]
#proj.autoalign = False
back = np.dot(basis.x, proj)
back = back.reshape(ml.nrow,ml.ncol)
df.parval1 = proj
arr = pyemu.geostats.fac2real(df,factors_file,out_file=None)
fig = plt.figure(figsize=(10, 10))
ax1, ax2 = plt.subplot(121),plt.subplot(122)
mn,mx = arr_tru.min(),arr_tru.max()
print(arr.max(), arr.min())
print(back.max(),back.min())
diff = np.abs(back - arr)
print(diff.max())
assert diff.max() < 1.0e-5
def ok_test():
import os
import pandas as pd
import pyemu
str_file = os.path.join("utils","struct_test.dat")
pts_data = pd.DataFrame({"x":[1.0,2.0,3.0],"y":[0.,0.,0.],"name":["p1","p2","p3"]})
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
interp_points = pts_data.copy()
kf = ok.calc_factors(interp_points.x,interp_points.y)
#for ptname in pts_data.name:
for i in kf.index:
assert len(kf.loc[i,"inames"])== 1
assert kf.loc[i,"ifacts"][0] == 1.0
assert sum(kf.loc[i,"ifacts"]) == 1.0
print(kf)
def ok_grid_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
kf = ok.calc_factors_grid(sr,verbose=False,var_filename=os.path.join("temp","test_var.ref"),minpts_interp=1)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ok_grid_zone_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
pts_data.loc[:,"zone"] = 1
pts_data.zone.iloc[1] = 2
print(pts_data.zone.unique())
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
zone_array = np.ones((nrow,ncol))
zone_array[0,0] = 2
kf = ok.calc_factors_grid(sr,verbose=False,
var_filename=os.path.join("temp","test_var.ref"),
minpts_interp=1,zone_array=zone_array)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ppk2fac_verf_test():
import os
import numpy as np
import pyemu
try:
import flopy
except:
return
ws = os.path.join("..","verification","Freyberg")
gspc_file = os.path.join(ws,"grid.spc")
pp_file = os.path.join(ws,"pp_00_pp.dat")
str_file = os.path.join(ws,"structure.complex.dat")
ppk2fac_facfile = os.path.join(ws,"ppk2fac_fac.dat")
pyemu_facfile = os.path.join("temp","pyemu_facfile.dat")
sr = flopy.utils.SpatialReference.from_gridspec(gspc_file)
ok = pyemu.utils.OrdinaryKrige(str_file,pp_file)
ok.calc_factors_grid(sr,maxpts_interp=10)
ok.to_grid_factors_file(pyemu_facfile)
zone_arr = np.loadtxt(os.path.join(ws,"extra_crispy","ref","ibound.ref"))
pyemu_arr = pyemu.utils.fac2real(pp_file,pyemu_facfile,out_file=None)
ppk2fac_arr = pyemu.utils.fac2real(pp_file,ppk2fac_facfile,out_file=None)
pyemu_arr[zone_arr == 0] = np.NaN
pyemu_arr[zone_arr == -1] = np.NaN
ppk2fac_arr[zone_arr == 0] = np.NaN
ppk2fac_arr[zone_arr == -1] = np.NaN
diff = np.abs(pyemu_arr - ppk2fac_arr)
print(diff)
assert np.nansum(diff) < 1.0e-6,np.nansum(diff)
# def opt_obs_worth():
# import os
# import pyemu
# wdir = os.path.join("utils")
# os.chdir(wdir)
# pst = pyemu.Pst(os.path.join("supply2_pest.fosm.pst"))
# zero_weight_names = [n for n,w in zip(pst.observation_data.obsnme,pst.observation_data.weight) if w == 0.0]
# #print(zero_weight_names)
# #for attr in ["base_jacobian","hotstart_resfile"]:
# # pst.pestpp_options[attr] = os.path.join(wdir,pst.pestpp_options[attr])
# #pst.template_files = [os.path.join(wdir,f) for f in pst.template_files]
# #pst.instruction_files = [os.path.join(wdir,f) for f in pst.instruction_files]
# #print(pst.template_files)
# df = pyemu.optimization.get_added_obs_importance(pst,obslist_dict={"zeros":zero_weight_names})
# os.chdir("..")
# print(df)
def mflist_budget_test():
import pyemu
import os
import pandas as pd
try:
import flopy
except:
print("no flopy...")
return
model_ws = os.path.join("..","examples","Freyberg_transient")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False,load_only=[])
list_filename = os.path.join(model_ws,"freyberg.list")
assert os.path.exists(list_filename)
df = pyemu.gw_utils.setup_mflist_budget_obs(list_filename,start_datetime=ml.start_datetime)
print(df)
times = df.loc[df.index.str.startswith('vol_wells')].index.str.split(
'_', expand=True).get_level_values(2)[::100]
times = pd.to_datetime(times, yearfirst=True)
df = pyemu.gw_utils.setup_mflist_budget_obs(
list_filename, start_datetime=ml.start_datetime, specify_times=times)
flx, vol = pyemu.gw_utils.apply_mflist_budget_obs(
list_filename, 'flux.dat', 'vol.dat', start_datetime=ml.start_datetime,
times='budget_times.config'
)
assert (flx.index == vol.index).all()
assert (flx.index == times).all()
def mtlist_budget_test():
import pyemu
import pandas as pd
import os
try:
import flopy
except:
print("no flopy...")
return
list_filename = os.path.join("utils","mt3d.list")
assert os.path.exists(list_filename)
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970')
assert len(ins_files) == 2
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970', gw_prefix='')
assert len(ins_files) == 2
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime=None)
assert len(ins_files) == 2
list_filename = os.path.join("utils", "mt3d_imm_sor.lst")
assert os.path.exists(list_filename)
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime='1-1-1970')
def geostat_prior_builder_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
# print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{str_file:tpl_file})
d1 = np.diag(cov.x)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{gs:df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
d2 = np.diag(cov.x)
assert np.array_equiv(d1, d2)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
cov = pyemu.helpers.geostatistical_prior_builder(pst, {gs: df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
cov = pyemu.helpers.geostatistical_prior_builder(pst, {str_file: tpl_file})
assert cov.shape[0] == pst.npar_adj
def geostat_draws_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
pe = pyemu.helpers.geostatistical_draws(pst_file,{str_file:tpl_file})
assert (pe.shape == pe.dropna().shape)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
pe = pyemu.helpers.geostatistical_draws(pst_file,{gs:df},
sigma_range=4)
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
pst.parameter_data.loc[pst.par_names[1:10],"partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
# def linearuniversal_krige_test():
# try:
# import flopy
# except:
# return
#
# import numpy as np
# import pandas as pd
# import pyemu
# nrow,ncol = 10,5
# delr = np.ones((ncol)) * 1.0/float(ncol)
# delc = np.ones((nrow)) * 1.0/float(nrow)
#
# num_pts = 0
# ptx = np.random.random(num_pts)
# pty = np.random.random(num_pts)
# ptname = ["p{0}".format(i) for i in range(num_pts)]
# pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
# pts_data.index = pts_data.name
# pts_data = pts_data.loc[:,["x","y","name"]]
#
#
# sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
# pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
# pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
# pts_data.loc["i0j0","value"] = 1.0
# pts_data.loc["imxjmx","value"] = 0.0
#
# str_file = os.path.join("utils","struct_test.dat")
# gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
# luk = pyemu.utils.geostats.LinearUniversalKrige(gs,pts_data)
# df = luk.estimate_grid(sr,verbose=True,
# var_filename=os.path.join("utils","test_var.ref"),
# minpts_interp=1)
def gslib_2_dataframe_test():
import os
import pyemu
gslib_file = os.path.join("utils","ch91pt.shp.gslib")
df = pyemu.geostats.gslib_2_dataframe(gslib_file)
print(df)
def sgems_to_geostruct_test():
import os
import pyemu
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
def load_sgems_expvar_test():
import os
import numpy as np
#import matplotlib.pyplot as plt
import pyemu
dfs = pyemu.geostats.load_sgems_exp_var(os.path.join("utils","ch00_expvar"))
xmn,xmx = 1.0e+10,-1.0e+10
for d,df in dfs.items():
xmn = min(xmn,df.x.min())
xmx = max(xmx,df.x.max())
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
v = gs.variograms[0]
#ax = gs.plot(ls="--")
#plt.show()
#x = np.linspace(xmn,xmx,100)
#y = v.inv_h(x)
#
#plt.plot(x,y)
#plt.show()
def read_hydmod_test():
import os
import numpy as np
import pandas as pd
import pyemu
try:
import flopy
except:
return
df, outfile = pyemu.gw_utils.modflow_read_hydmod_file(os.path.join('utils','freyberg.hyd.bin'),
os.path.join('temp','freyberg.hyd.bin.dat'))
df = pd.read_csv(os.path.join('temp', 'freyberg.hyd.bin.dat'), delim_whitespace=True)
dftrue = pd.read_csv(os.path.join('utils', 'freyberg.hyd.bin.dat.true'), delim_whitespace=True)
assert np.allclose(df.obsval.values, dftrue.obsval.values)
def make_hydmod_insfile_test():
import os
import shutil
import pyemu
try:
import flopy
except:
return
shutil.copy2(os.path.join('utils','freyberg.hyd.bin'),os.path.join('temp','freyberg.hyd.bin'))
pyemu.gw_utils.modflow_hydmod_to_instruction_file(os.path.join('temp','freyberg.hyd.bin'))
#assert open(os.path.join('utils','freyberg.hyd.bin.dat.ins'),'r').read() == open('freyberg.hyd.dat.ins', 'r').read()
assert os.path.exists(os.path.join('temp','freyberg.hyd.bin.dat.ins'))
def plot_summary_test():
import os
import pandas as pd
import pyemu
try:
import matplotlib.pyplot as plt
except:
return
par_df = pd.read_csv(os.path.join("utils","freyberg_pp.par.usum.csv"),
index_col=0)
idx = list(par_df.index.map(lambda x: x.startswith("HK")))
par_df = par_df.loc[idx,:]
ax = pyemu.plot_utils.plot_summary_distributions(par_df,label_post=True)
plt.savefig(os.path.join("temp","hk_par.png"))
plt.close()
df = os.path.join("utils","freyberg_pp.pred.usum.csv")
figs,axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
#plt.show()
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_pred_{0}.png".format(i)))
plt.close(fig)
df = os.path.join("utils","freyberg_pp.par.usum.csv")
figs, axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_par_{0}.png".format(i)))
plt.close(fig)
def hds_timeseries_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
model_ws =os.path.join("..","examples","Freyberg_transient")
org_hds_file = os.path.join(model_ws, "freyberg.hds")
hds_file = os.path.join("temp", "freyberg.hds")
org_cbc_file = org_hds_file.replace(".hds",".cbc")
cbc_file = hds_file.replace(".hds", ".cbc")
shutil.copy2(org_hds_file, hds_file)
shutil.copy2(org_cbc_file, cbc_file)
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, check=False)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "test": (0, 10, 14)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
# m.change_model_ws("temp",reset_external=True)
# m.write_input()
# pyemu.os_utils.run("mfnwt freyberg.nam",cwd="temp")
cmd, df1 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, include_path=True, prefix="stor",
text="storage", fill=0.0)
cmd,df2 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="stor",
text="storage",fill=0.0)
print(df1)
d = np.abs(df1.obsval.values - df2.obsval.values)
print(d.max())
assert d.max() == 0.0,d
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="constant head")
except:
pass
else:
raise Exception("should have failed")
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="JUNK")
except:
pass
else:
raise Exception("should have failed")
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True,prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,load_only=[],check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict,model=m,include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True,prefix="hds")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True, prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds")
# df1 = pd.read_csv(out_file, delim_whitespace=True)
# pyemu.gw_utils.apply_hds_obs(hds_file)
# df2 = pd.read_csv(out_file, delim_whitespace=True)
# diff = df1.obsval - df2.obsval
def grid_obs_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
m_ws = os.path.join("..", "examples", "freyberg_sfr_update")
org_hds_file = os.path.join("..","examples","Freyberg_Truth","freyberg.hds")
org_multlay_hds_file = os.path.join(m_ws, "freyberg.hds") # 3 layer version
org_ucn_file = os.path.join(m_ws, "MT3D001.UCN") # mt example
hds_file = os.path.join("temp","freyberg.hds")
multlay_hds_file = os.path.join("temp", "freyberg_3lay.hds")
ucn_file = os.path.join("temp", "MT3D001.UCN")
out_file = hds_file+".dat"
multlay_out_file = multlay_hds_file+".dat"
ucn_out_file = ucn_file+".dat"
shutil.copy2(org_hds_file,hds_file)
shutil.copy2(org_multlay_hds_file, multlay_hds_file)
shutil.copy2(org_ucn_file, ucn_file)
pyemu.gw_utils.setup_hds_obs(hds_file)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert abs(diff.max()) < 1.0e-6, abs(diff.max())
pyemu.gw_utils.setup_hds_obs(multlay_hds_file)
df1 = pd.read_csv(multlay_out_file,delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval,df2.obsval), abs(diff.max())
pyemu.gw_utils.setup_hds_obs(hds_file,skip=-999)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
pyemu.gw_utils.setup_hds_obs(ucn_file, skip=1.e30, prefix='ucn')
df1 = pd.read_csv(ucn_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(ucn_file)
df2 = pd.read_csv(ucn_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
# skip = lambda x : x < -888.0
skip = lambda x: x if x > -888.0 else np.NaN
pyemu.gw_utils.setup_hds_obs(hds_file,skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = (0,0)
pyemu.gw_utils.setup_hds_obs(hds_file,kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 2 * len(df2), "{} != 2*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=m_ws, load_only=["BAS6"],forgive=False,verbose=True)
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
skipmask = m.bas6.ibound.array
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == np.abs(skipmask).sum(), \
"array skip failing, expecting {0} obs but returned {1}".format(np.abs(skipmask).sum(), len(df1))
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
skipmask = m.bas6.ibound.array[0]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == 2 * m.nlay * np.abs(skipmask).sum(), "array skip failing"
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
skipmask = m.bas6.ibound.array
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == 2 * np.abs(skipmask).sum(), "array skip failing"
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
def postprocess_inactive_conc_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
bd = os.getcwd()
model_ws = os.path.join("..", "examples", "Freyberg_transient")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "inact": [0, 81, 35]}
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
frun_line, df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds",
postprocess_inact=1E30)
os.chdir("temp")
df0 = pd.read_csv("{0}_timeseries.processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
df1 = pd.read_csv("{0}_timeseries.post_processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
eval(frun_line)
df2 = pd.read_csv("{0}_timeseries.processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
df3 = pd.read_csv("{0}_timeseries.post_processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
assert np.allclose(df0, df2)
assert np.allclose(df2.test1, df3.test1)
assert np.allclose(df2.test2, df3.test2)
assert np.allclose(df3, df1)
os.chdir(bd)
def gw_sft_ins_test():
import os
import pyemu
sft_outfile = os.path.join("utils","test_sft.out")
#pyemu.gw_utils.setup_sft_obs(sft_outfile)
#pyemu.gw_utils.setup_sft_obs(sft_outfile,start_datetime="1-1-1970")
df = pyemu.gw_utils.setup_sft_obs(sft_outfile, start_datetime="1-1-1970",times=[10950.00])
#print(df)
def sfr_helper_test():
import os
import shutil
import pandas as pd
import pyemu
import flopy
#setup the process
m = flopy.modflow.Modflow.load("supply2.nam",model_ws="utils",check=False,verbose=True,forgive=False,load_only=["dis","sfr"])
sd = m.sfr.segment_data[0].copy()
sd["flow"] = 1.0
sd["pptsw"] = 1.0
m.sfr.segment_data = {k:sd.copy() for k in range(m.nper)}
df_sfr = pyemu.gw_utils.setup_sfr_seg_parameters(
m, include_temporal_pars=['hcond1', 'flow'])
print(df_sfr)
os.chdir("utils")
# change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config", 'w') as f:
for k, v in pars.items():
f.write("{0} {1}\n".format(k, v))
# change some hcond1 values
df = pd.read_csv("sfr_seg_temporal_pars.dat", delim_whitespace=False, index_col=0)
df.loc[:, "flow"] = 10.0
df.to_csv("sfr_seg_temporal_pars.dat", sep=',')
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data
m1 = flopy.modflow.Modflow.load("supply2.nam", load_only=["sfr"], check=False)
os.chdir("..")
for kper,sd in m1.sfr.segment_data.items():
#print(sd["flow"],sd1[kper]["flow"])
for i1,i2 in zip(sd["flow"],sd1[kper]["flow"]):
assert i1 * 10 == i2,"{0},{1}".format(i1,i2)
df_sfr = pyemu.gw_utils.setup_sfr_seg_parameters("supply2.nam", model_ws="utils", include_temporal_pars=True)
os.chdir("utils")
# change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config", 'w') as f:
for k, v in pars.items():
f.write("{0} {1}\n".format(k, v))
# change some hcond1 values
df = pd.read_csv("sfr_seg_pars.dat", delim_whitespace=False,index_col=0)
df.loc[:, "hcond1"] = 1.0
df.to_csv("sfr_seg_pars.dat", sep=',')
# make sure the hcond1 mult worked...
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data[0]
m1 = flopy.modflow.Modflow.load("supply2.nam", load_only=["sfr"], check=False)
sd2 = m1.sfr.segment_data[0]
sd1 = pd.DataFrame.from_records(sd1)
sd2 = pd.DataFrame.from_records(sd2)
# print(sd1.hcond1)
# print(sd2.hcond2)
assert sd1.hcond1.sum() == sd2.hcond1.sum()
# change some hcond1 values
df = pd.read_csv("sfr_seg_pars.dat",delim_whitespace=False,index_col=0)
df.loc[:,"hcond1"] = 0.5
df.to_csv("sfr_seg_pars.dat",sep=',')
#change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config",'w') as f:
for k,v in pars.items():
f.write("{0} {1}\n".format(k,v))
#make sure the hcond1 mult worked...
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data[0]
m1 = flopy.modflow.Modflow.load("supply2.nam",load_only=["sfr"],check=False)
sd2 = m1.sfr.segment_data[0]
sd1 = | pd.DataFrame.from_records(sd1) | pandas.DataFrame.from_records |
from dotenv import load_dotenv
import pandas as pd
import re
import regex
import pickle
from bs4 import BeautifulSoup
from io import StringIO
from contextlib import redirect_stdout, redirect_stderr
import traceback
from sqlalchemy import text
from fuzzywuzzy import fuzz
import json
import numpy as np
from berdi.Database_Connection_Files.connect_to_database import connect_to_db
import berdi.Section_03_Table_and_Figure_Title_Extraction.constants as constants
# Load environment variables (from .env file) for the database
load_dotenv(
dotenv_path=constants.ROOT_PATH / "berdi/Database_Connection_Files" / ".env", override=True
)
engine = connect_to_db()
# take a project and assign all Figure titles from toc to a pdf id and page
def project_figure_titles(project):
buf = StringIO()
with redirect_stdout(buf), redirect_stderr(buf):
try:
# get all fig titles for this project from db
with engine.connect() as conn:
params = {"project": project}
stmt = text("SELECT toc.titleTOC, toc.page_name, toc.toc_page_num, toc.toc_pdfId, toc.toc_title_order "
"FROM toc LEFT JOIN pdfs ON toc.toc_pdfId = pdfs.pdfId "
"WHERE title_type='Figure' and short_name = :project "
"ORDER BY pdfs.short_name, toc.toc_pdfId, toc.toc_page_num, toc.toc_title_order;")
df_figs = pd.read_sql_query(stmt, conn, params=params)
stmt = text("SELECT pdfId FROM pdfs WHERE short_name = :project;")
project_df = pd.read_sql_query(stmt, conn, params=params)
prev_id = 0
project_ids = project_df['pdfId'].tolist()
for index, row in df_figs.iterrows():
title = row['titleTOC']
c = title.count(' ')
if c >= 2:
word1, word2, s2 = title.split(' ', 2)
else:
word1, word2 = title.split(' ', 1)
s2 = ''
word2 = re.sub('[^a-zA-Z0-9]$', '', word2)
word2 = re.sub('[^a-zA-Z0-9]', '[^a-zA-Z0-9]', word2)
word1_rex = re.compile(r'(?i)\b' + word1 + r'\s')
word2_rex = re.compile(r'(?i)\b' + word2)
s2 = re.sub(constants.punctuation, ' ', s2) # remove punctuation
s2 = re.sub(constants.whitespace, ' ', s2) # remove whitespace
toc_id = row['toc_pdfId']
toc_page = row['toc_page_num']
page_str = row['page_name']
title_order = row['toc_title_order']
page_rex = re.sub('[^a-zA-Z0-9]', '[^a-zA-Z0-9]', word2)
page_rex = re.compile(r'(?i)\b' + page_rex + '\b')
docs_check = [prev_id, toc_id]
after = [i for i in project_ids if i > toc_id]
after.sort()
before = [i for i in project_ids if i < toc_id]
before.sort(reverse=True)
docs_check.extend(after)
docs_check.extend(before)
count = 0
for doc_id in docs_check:
if doc_id > 0:
arg = (doc_id, toc_id, toc_page, title_order, word1_rex, word2_rex, s2.lower(), page_rex, title)
count = figure_checker(arg)
if count > 0:
prev_id = doc_id
break
if count == 0:
# write 0 count to db
with engine.connect() as conn:
stmt = text(
"UPDATE toc SET assigned_count = 0, loc_pdfId = null, loc_page_list = null "
"WHERE (toc_pdfId = :pdf_id) and (toc_page_num = :page_num) and (toc_title_order = :title_order);")
params = {"pdf_id": toc_id, "page_num": toc_page, "title_order": title_order}
result = conn.execute(stmt, params)
if result.rowcount != 1:
print('could not assign 0 count to: ', toc_id, toc_page, title_order)
return True, buf.getvalue()
except Exception as e:
traceback.print_tb(e.__traceback__)
return False, buf.getvalue()
# For a Figure TOC title, find a pdf id and page where that figure lives
def figure_checker(args):
conn = engine.connect()
try:
doc_id, toc_id, toc_page, toc_order, word1_rex, word2_rex, s2, page_rex, title = args
# get pages where we have images for this document (from db)
stmt = text("SELECT page_num, block_order, type as 'images', bbox_area_image, bbox_area FROM blocks "
"WHERE (pdfId = :pdf_id) and (bbox_x0 >= 0) and (bbox_y0 >= 0) and (bbox_x1 >= 0) and (bbox_y1 >= 0);")
params = {"pdf_id": doc_id}
df = pd.read_sql_query(stmt, conn, params=params)
df_pages = df[['page_num', 'bbox_area_image', 'bbox_area', 'images']].groupby('page_num', as_index=False).sum()
df_pages['imageProportion'] = df_pages['bbox_area_image'] / df_pages['bbox_area']
min_proportion = df_pages['imageProportion'].mean() if df_pages['imageProportion'].mean() < 0.1 else 0.1
min_images = df_pages['images'].mean()
df_pages = df_pages[(df_pages['imageProportion'] > min_proportion) | (df_pages['images'] > min_images)]
image_pages = df_pages['page_num'].unique().tolist()
# df['Real Order'] = df.groupby(['page'])['tableNumber'].rank()
stmt = text("SELECT page FROM csvs WHERE (pdfId = :pdf_id) "
"and (titleFinal = '' or titleFinal is null) GROUP BY page;")
params = {"pdf_id": doc_id}
extra_pages_df = pd.read_sql_query(stmt, conn, params=params)
extra_pages = [p for p in extra_pages_df['page'].tolist() if p not in image_pages] # list of extra pages to check
# get text
if len(extra_pages) > 0 and len(image_pages) > 0:
params = {"pdf_id": doc_id, "image_list": image_pages, "extra_list": extra_pages}
stmt = text("SELECT page_num, clean_content FROM pages_normal_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :image_list or page_num in :extra_list);")
stmt_rotated = text("SELECT page_num, clean_content FROM pages_rotated90_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :image_list or page_num in :extra_list);")
text_df = pd.read_sql_query(stmt, conn, params=params, index_col='page_num')
text_rotated_df = pd.read_sql_query(stmt_rotated, conn, params=params, index_col='page_num')
elif len(image_pages) > 0:
params = {"pdf_id": doc_id, "image_list": image_pages}
stmt = text("SELECT page_num, clean_content FROM pages_normal_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :image_list);")
stmt_rotated = text("SELECT page_num, clean_content FROM pages_rotated90_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :image_list);")
text_df = pd.read_sql_query(stmt, conn, params=params, index_col='page_num')
text_rotated_df = pd.read_sql_query(stmt_rotated, conn, params=params, index_col='page_num')
elif len(extra_pages) > 0:
params = {"pdf_id": doc_id, "extra_list": extra_pages}
stmt = text("SELECT page_num, clean_content FROM pages_normal_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :extra_list);")
stmt_rotated = text("SELECT page_num, clean_content FROM pages_rotated90_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :extra_list);")
text_df = pd.read_sql_query(stmt, conn, params=params, index_col='page_num')
text_rotated_df = pd.read_sql_query(stmt_rotated, conn, params=params, index_col='page_num')
else:
text_df = None
text_rotated_df = None
p_list = []
sim_list = []
ratio_list = []
word2_list = []
for check_list in [image_pages, extra_pages]:
for page_num in check_list:
if (doc_id != toc_id) or (page_num != toc_page): # if not toc page
text_ws = text_df.loc[page_num, 'clean_content']
text_clean = re.sub(constants.punctuation, ' ', text_ws)
text_clean = re.sub(constants.whitespace, ' ', text_clean)
text_clean = text_clean.lower()
text_rotated_ws = text_rotated_df.loc[page_num, 'clean_content']
text_rotated_clean = re.sub(constants.punctuation, ' ', text_rotated_ws)
text_rotated_clean = re.sub(constants.whitespace, ' ', text_rotated_clean)
text_rotated_clean = text_rotated_clean.lower()
if re.search(word2_rex, text_ws) or re.search(word2_rex, text_rotated_ws): # check fig number
words = [word for word in s2.split() if (len(word) > 3) and (word != 'project')]
match = [word for word in words if (regex.search(r'(?i)\b' + word + r'{e<=1}\b', text_clean))
or (regex.search(r'(?i)\b' + word + r'{e<=1}\b', text_rotated_clean))]
if len(words) > 0:
sim = len(match) / len(words)
else:
sim = 0
if sim >= 0.7: # check that enough words exists
l = len(s2)
ratio = 0
for i in range(len(text_clean) - l + 1):
r = fuzz.ratio(s2, text_clean[i:i + l])
if r > ratio:
ratio = r
if ratio == 1:
break
for i in range(len(text_rotated_clean) - l + 1):
if ratio == 1:
break
r = fuzz.ratio(s2, text_rotated_clean[i:i + l])
if r > ratio:
ratio = r
# ratio = max(fuzz.partial_ratio(s2, text_clean),
# fuzz.partial_ratio(s2, text_rotated_clean))
if ratio >= 60: # check that the fuzzy match is close enough
p_list.append(page_num)
sim_list.append(sim)
ratio_list.append(ratio)
if len(sim_list) > 0: # if found in image pages don't check extra pages
break
# only keep those with largest sim
if len(sim_list) > 0:
max_sim = max(sim_list)
p_list2 = []
ratio_list2 = []
for i, sim in enumerate(sim_list):
if sim >= max_sim:
p_list2.append(p_list[i])
ratio_list2.append(ratio_list[i])
max_ratio = max(ratio_list2)
# now only keep those with largest ratio
final_list = [{'page_num':int(p_list2[i]), 'sim':max_sim, 'ratio':ratio} for i, ratio in enumerate(ratio_list2) if ratio >= max_ratio]
else:
final_list = []
count = len(final_list)
if count > 0:
stmt = text("UPDATE toc SET assigned_count = :count, loc_pdfId = :loc_id, loc_page_list = :loc_pages "
"WHERE (toc_pdfId = :pdf_id) and (toc_page_num = :page_num) and (toc_title_order = :title_order);")
params = {"count": count, "loc_id": doc_id, "loc_pages": json.dumps(final_list), "pdf_id": toc_id, "page_num": toc_page, "title_order": toc_order}
result = conn.execute(stmt, params)
if result.rowcount != 1:
print('could not assign TOC count to: ', toc_id, toc_page, toc_order)
conn.close()
return count
except Exception as e:
conn.close()
print('Error in', doc_id)
print(traceback.print_tb(e.__traceback__))
return 0
# determine category of table tag title (if continued title --> 1, if regular title --> 2, if just text --> 0)
def get_category(title):
category = False
# title_clean = re.sub(extra_chars, '', title) # get rid of some extra characters
title_clean = re.sub(constants.punctuation, '', title) # remove punctuation
title_clean = re.sub(constants.small_word, '', title_clean) # delete any 1 or 2 letter words without digits
title_clean = re.sub(constants.whitespace, ' ', title_clean).strip() # replace whitespace with single space
num_words = title_clean.count(' ') + 1
_, _, third, _ = (title_clean + ' ').split(' ', 3)
if num_words <= 3:
if 'cont' in title_clean.lower(): # if any word starts with cont
category = 1
else:
category = 2
else:
if third.lower().startswith('cont') or third[0].isdigit() or third[0].isupper():
category = 1
else:
category = 0
return category
def find_tag_title_table(data_id):
buf = StringIO()
conn = engine.connect()
with redirect_stdout(buf), redirect_stderr(buf):
try:
# get tables for this document
stmt = text("SELECT page, tableNumber FROM csvs "
"WHERE (hasContent = 1) and (csvColumns > 1) and (whitespace < 78) "
"and (pdfId = :pdf_id);")
params = {"pdf_id": data_id}
df = pd.read_sql_query(stmt, conn, params=params)
if df.empty:
df['Real Order'] = None
else:
df['Real Order'] = df.groupby(['page'])['tableNumber'].rank()
table_pages = df['page'].unique().tolist()
if len(table_pages) > 0:
params = {"pdf_id": data_id, "table_list": table_pages}
stmt = text("SELECT page_num, content FROM pages_normal_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :table_list);")
stmt_rotated = text("SELECT page_num, content FROM pages_rotated90_txt "
"WHERE (pdfId = :pdf_id) and (page_num in :table_list);")
text_df = | pd.read_sql_query(stmt, conn, params=params, index_col='page_num') | pandas.read_sql_query |
#!/usr/bin/env python
import enum
from enum import Enum
import logging
import pandas as pd
from pandas.api.types import CategoricalDtype
from .left_right import LeftRight
logger = logging.getLogger(__name__)
@enum.unique
class TraumaCategory(Enum):
NOT_PRESENT = -1 # noqa: E221,E222
PARTIAL_BONE = 0.5 # noqa: E221,E222
NORMAL = 1 # noqa: E221,E222
INFECTION = 2 # noqa: E221,E222
FRACTURE = 3 # noqa: E221,E222
UNHEALED_FRACTURE = 4 # noqa: E221,E222
CRIBA = 5 # noqa: E221,E222
BLUNT_FORCE_TRAUMA = 6 # noqa: E221,E222
SHARP_FORCE_TRAUMA = 7 # noqa: E221,E222
TREPONATION = 8 # noqa: E221,E222
UNFUSED = 9 # noqa: E221,E222
BONY_GROWTH = 10 # noqa: E221,E222
FUSED = 11 # noqa: E221,E222
OSTEOCHONDRITIS_DESSICANS = 12 # noqa: E221,E222
@staticmethod
def parse(value):
if value is None:
return None
if type(value) == TraumaCategory: # pylint: disable=C0123
return value
if isinstance(value, (float, int)):
value = str(value)
if not isinstance(value, str):
raise ValueError(f'Failed to parse TraumaCategory: "{value}"')
value = value.upper()
for condition in TraumaCategory:
if value == condition.name:
return condition
if value == str(condition.value):
return condition
if value in ('NA', 'N'):
return TraumaCategory.NOT_PRESENT
raise ValueError(f'Failed to parse TraumaCategory: "{value}"')
@staticmethod
def avg(left, right):
if left in (None, TraumaCategory.NOT_PRESENT):
return right
if right in (None, TraumaCategory.NOT_PRESENT):
return left
if left == right:
return left
raise NotImplementedError()
def __repr__(self):
return f'{self.__class__.__name__}: {self}'
def __str__(self):
return self.name
@staticmethod
def dtype():
return CategoricalDtype(categories=[s.name for s in TraumaCategory], ordered=False)
class Trauma(object): # pylint: disable=R0902
"""docstring for Trauma"""
def __init__(self, facial_bones: TraumaCategory, clavicle: LeftRight[TraumaCategory], scapula: LeftRight[TraumaCategory], humerus: LeftRight[TraumaCategory], ulna: LeftRight[TraumaCategory], radius: LeftRight[TraumaCategory], femur: LeftRight[TraumaCategory], tibia: LeftRight[TraumaCategory], fibula: LeftRight[TraumaCategory], ribs: TraumaCategory, vertabrae: TraumaCategory):
self.facial_bones = facial_bones
self.clavicle = clavicle
self.scapula = scapula
self.humerus = humerus
self.ulna = ulna
self.radius = radius
self.femur = femur
self.tibia = tibia
self.fibula = fibula
self.ribs = ribs
self.vertabrae = vertabrae
@staticmethod
def empty():
categories = [TraumaCategory.NOT_PRESENT] * 1
categories += [LeftRight(TraumaCategory.NOT_PRESENT, TraumaCategory.NOT_PRESENT)] * 8
categories += [TraumaCategory.NOT_PRESENT] * 2
return Trauma(*categories)
def to_pd_data_frame(self, index):
d = {
'id': | pd.Series([index]) | pandas.Series |
from sanic import Sanic
from jinja2 import Template
import asyncio
import json
from sanic import response
import collections
import pandas as pd
import datetime
import aiohttp
from aiohttp import ClientConnectionError
import math
import random
import pkgutil
import os
BENCHMARK_TICKER = {'HSI': 'HK.800000', 'SPX': 'HK.800000'}
class WebApp:
def __init__(self, max_curve_rows=10000):
self.app = Sanic('Dashboard')
self.app_add_route(app=self.app)
self.hook_ip = None
self.algo_ips = dict()
self.algo_data = collections.defaultdict(lambda: collections.defaultdict(lambda: pd.DataFrame()))
self.algo_curves = collections.defaultdict(lambda: collections.defaultdict(lambda: pd.DataFrame()))
self.failed_algo = dict()
self.benchmark_df = collections.defaultdict(lambda: pd.DataFrame())
self.last_update_time = None
self.max_curve_rows = max_curve_rows
self.port = None
async def update_summary(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/summary') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content'], index=[0])
self.algo_data[algo_name]['summary'] = self.algo_data[algo_name]['summary'].append(resp_df).drop_duplicates(
['name'])
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_curves(self, algo_name):
if self.algo_curves[algo_name]['PV'].shape[0] == 0:
start_date = '2000-01-01'
else:
start_date = min(self.algo_curves[algo_name]['PV']['x']).strftime('%Y-%m-%d')
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/curves', params={'start_date': start_date}) as resp:
result = await resp.json()
result = result['return']['content']
for curve_type in result.keys():
tmp_df = pd.DataFrame(result[curve_type], index=[0]) if len(result[curve_type]) == 1 else pd.DataFrame(
result[curve_type])
tmp_df['x'] = pd.to_datetime(tmp_df['x'])
self.algo_curves[algo_name][curve_type] = self.algo_curves[algo_name][curve_type].append(tmp_df)
self.algo_curves[algo_name][curve_type] = self.algo_curves[algo_name][curve_type].drop_duplicates(['x'])
if self.algo_curves[algo_name][curve_type].shape[0] >= self.max_curve_rows:
self.algo_curves[algo_name][curve_type] = self.algo_curves[algo_name][curve_type].iloc[
-self.max_curve_rows:]
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_curves[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_positions(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/positions') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content']['positions'], index=[0]) if len(
result) == 1 else pd.DataFrame(
result['return']['content']['positions'])
self.algo_data[algo_name]['positions'] = resp_df
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_settings(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/attributes') as resp:
result = await resp.json()
self.algo_data[algo_name]['settings'] = result['return']['content']
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: pd.DataFrame())
raise
async def update_pending(self, algo_name):
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.algo_ips[algo_name] + '/pending') as resp:
result = await resp.json()
resp_df = pd.DataFrame(result['return']['content']['pending_orders'], index=[0]) if len(
result) == 1 else pd.DataFrame(
result['return']['content']['pending_orders'])
self.algo_data[algo_name]['pending'] = resp_df
except ClientConnectionError:
self.failed_algo[algo_name] = self.algo_ips[algo_name]
del self.algo_ips[algo_name]
self.algo_data[algo_name] = collections.defaultdict(lambda: | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([ | Timestamp('20130101 9:01:01') | pandas.tseries.index.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.