prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import seaborn as sns
import matplotlib.py as plt
import pandas as pd
s = pd.Series([0,2,4,18,32,50])
t= pd.Series([1,2,3,4,5,6])
motion_graph(specify="p-t",s=s,t=t,color="#1a2b3f");
v = pd.Series([0,2,4,6,8,8,8,6,4,2,0])
t= pd.Series([1,2,3,4,5,6,7,8,9,10,11])
motion_graph(specify="v-t",v=v,t=t);
a = pd.Series([0,2,4,6,8,8,8,6,4,2,0])
t= | pd.Series([1,2,3,4,5,6,7,8,9,10,11]) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = | DataFrame(sorted_dict, index=output_index) | pandas.DataFrame |
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>), <NAME> (<EMAIL>)
import pandas as pd
import numpy as np
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from datetime import datetime, timedelta
from DELPHI_utils import (
DELPHIDataCreator, DELPHIAggregations, DELPHIDataSaver, get_initial_conditions, mape
)
from DELPHI_params import (date_MATHEMATICA, default_parameter_list, default_bounds_params,
validcases_threshold, IncubeD, RecoverID, RecoverHD, DetectD,
VentilatedD, default_maxT, p_v, p_d, p_h, max_iter)
import os
import yaml
with open("config.yml", "r") as ymlfile:
CONFIG = yaml.load(ymlfile, Loader=yaml.BaseLoader)
CONFIG_FILEPATHS = CONFIG["filepaths"]
USER_RUNNING = "michael"
yesterday = "".join(str(datetime.now().date() - timedelta(days=1)).split("-"))
PATH_TO_FOLDER_DANGER_MAP = CONFIG_FILEPATHS["danger_map"][USER_RUNNING]
PATH_TO_WEBSITE_PREDICTED = CONFIG_FILEPATHS["website"][USER_RUNNING]
popcountries = pd.read_csv(
PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Population_Global.csv"
)
try:
pastparameters = pd.read_csv(
PATH_TO_FOLDER_DANGER_MAP + f"predicted/Parameters_Global_{yesterday}.csv"
)
except:
pastparameters = None
if pd.to_datetime(yesterday) < pd.to_datetime(date_MATHEMATICA):
param_MATHEMATICA = True
else:
param_MATHEMATICA = False
# Initalizing lists of the different dataframes that will be concatenated in the end
list_df_global_predictions_since_today = []
list_df_global_predictions_since_100_cases = []
list_df_global_parameters = []
obj_value = 0
for continent, country, province in zip(
popcountries.Continent.tolist(),
popcountries.Country.tolist(),
popcountries.Province.tolist(),
):
country_sub = country.replace(" ", "_")
province_sub = province.replace(" ", "_")
if os.path.exists(PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Cases_{country_sub}_{province_sub}.csv"):
totalcases = pd.read_csv(
PATH_TO_FOLDER_DANGER_MAP + f"processed/Global/Cases_{country_sub}_{province_sub}.csv"
)
if totalcases.day_since100.max() < 0:
print(f"Not enough cases for Continent={continent}, Country={country} and Province={province}")
continue
print(country + ", " + province)
if pastparameters is not None:
parameter_list_total = pastparameters[
(pastparameters.Country == country) &
(pastparameters.Province == province)
].reset_index(drop=True)
if len(parameter_list_total) > 0:
parameter_list_line = parameter_list_total.iloc[-1, :].values.tolist()
if param_MATHEMATICA:
parameter_list = parameter_list_line[4:]
parameter_list[3] = np.log(2) / parameter_list[3]
else:
parameter_list = parameter_list_line[5:]
# Allowing a 5% drift for states with past predictions, starting in the 5th position are the parameters
param_list_lower = [x - 0.1 * abs(x) for x in parameter_list]
param_list_upper = [x + 0.1 * abs(x) for x in parameter_list]
bounds_params = tuple(
[(lower, upper)
for lower, upper in zip(param_list_lower, param_list_upper)]
)
date_day_since100 = pd.to_datetime(parameter_list_line[3])
validcases = totalcases[
(totalcases.day_since100 >= 0) &
(totalcases.date <= str((pd.to_datetime(yesterday) + timedelta(days=1)).date()))
][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
else:
# Otherwise use established lower/upper bounds
parameter_list = default_parameter_list
bounds_params = default_bounds_params
date_day_since100 = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, "date"].iloc[-1])
validcases = totalcases[
(totalcases.day_since100 >= 0) &
(totalcases.date <= str((pd.to_datetime(yesterday) + timedelta(days=1)).date()))
][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
else:
# Otherwise use established lower/upper bounds
parameter_list = default_parameter_list
bounds_params = default_bounds_params
date_day_since100 = pd.to_datetime(totalcases.loc[totalcases.day_since100 == 0, "date"].iloc[-1])
validcases = totalcases[
(totalcases.day_since100 >= 0) &
(totalcases.date <= str((pd.to_datetime(yesterday) + timedelta(days=1)).date()))
][["day_since100", "case_cnt", "death_cnt"]].reset_index(drop=True)
# Now we start the modeling part:
if len(validcases) > validcases_threshold:
PopulationT = popcountries[
(popcountries.Country == country) & (popcountries.Province == province)
].pop2016.iloc[-1]
# We do not scale
N = PopulationT
PopulationI = validcases.loc[0, "case_cnt"]
PopulationR = validcases.loc[0, "death_cnt"] * 5
PopulationD = validcases.loc[0, "death_cnt"]
PopulationCI = PopulationI - PopulationD - PopulationR
"""
Fixed Parameters based on meta-analysis:
p_h: Hospitalization Percentage
RecoverHD: Average Days till Recovery
VentilationD: Number of Days on Ventilation for Ventilated Patients
maxT: Maximum # of Days Modeled
p_d: Percentage of True Cases Detected
p_v: Percentage of Hospitalized Patients Ventilated,
balance: Ratio of Fitting between cases and deaths
"""
# Currently fit on alpha, a and b, r_dth,
# & initial condition of exposed state and infected state
# Maximum timespan of prediction, defaulted to go to 15/06/2020
maxT = (default_maxT - date_day_since100).days + 1
""" Fit on Total Cases """
t_cases = validcases["day_since100"].tolist() - validcases.loc[0, "day_since100"]
validcases_nondeath = validcases["case_cnt"].tolist()
validcases_death = validcases["death_cnt"].tolist()
balance = validcases_nondeath[-1] / max(validcases_death[-1], 10) / 3
fitcasesnd = validcases_nondeath
fitcasesd = validcases_death
GLOBAL_PARAMS_FIXED = (
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v
)
def model_covid(
t, x, alpha, days, r_s, r_dth, p_dth, k1, k2
):
"""
SEIR + Undetected, Deaths, Hospitalized, corrected with ArcTan response curve
alpha: Infection rate
days: Median day of action
r_s: Median rate of action
p_dth: Mortality rate
k1: Internal parameter 1
k2: Internal parameter 2
y = [0 S, 1 E, 2 I, 3 AR, 4 DHR, 5 DQR, 6 AD,
7 DHD, 8 DQD, 9 R, 10 D, 11 TH, 12 DVR,13 DVD, 14 DD, 15 DT]
"""
r_i = np.log(2) / IncubeD # Rate of infection leaving incubation phase
r_d = np.log(2) / DetectD # Rate of detection
r_ri = np.log(2) / RecoverID # Rate of recovery not under infection
r_rh = np.log(2) / RecoverHD # Rate of recovery under hospitalization
r_rv = np.log(2) / VentilatedD # Rate of recovery under ventilation
gamma_t = (2 / np.pi) * np.arctan(-(t - days) / 20 * r_s) + 1
assert len(x) == 16, f"Too many input variables, got {len(x)}, expected 16"
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT = x
# Equations on main variables
dSdt = -alpha * gamma_t * S * I / N
dEdt = alpha * gamma_t * S * I / N - r_i * E
dIdt = r_i * E - r_d * I
dARdt = r_d * (1 - p_dth) * (1 - p_d) * I - r_ri * AR
dDHRdt = r_d * (1 - p_dth) * p_d * p_h * I - r_rh * DHR
dDQRdt = r_d * (1 - p_dth) * p_d * (1 - p_h) * I - r_ri * DQR
dADdt = r_d * p_dth * (1 - p_d) * I - r_dth * AD
dDHDdt = r_d * p_dth * p_d * p_h * I - r_dth * DHD
dDQDdt = r_d * p_dth * p_d * (1 - p_h) * I - r_dth * DQD
dRdt = r_ri * (AR + DQR) + r_rh * DHR
dDdt = r_dth * (AD + DQD + DHD)
# Helper states (usually important for some kind of output)
dTHdt = r_d * p_d * p_h * I
dDVRdt = r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
dDVDdt = r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
dDDdt = r_dth * (DHD + DQD)
dDTdt = r_d * p_d * I
return [
dSdt, dEdt, dIdt, dARdt, dDHRdt, dDQRdt, dADdt, dDHDdt, dDQDdt,
dRdt, dDdt, dTHdt, dDVRdt, dDVDdt, dDDdt, dDTdt
]
def residuals_totalcases(params):
"""
Wanted to start with solve_ivp because figures will be faster to debug
params: (alpha, days, r_s, r_dth, p_dth, k1, k2), fitted parameters of the model
"""
# Variables Initialization for the ODE system
alpha, days, r_s, r_dth, p_dth, k1, k2 = params
params = max(alpha, 0), days, max(r_s, 0), max(r_dth, 0), max(min(p_dth, 1), 0), max(k1, 0), max(k2, 0)
x_0_cases = get_initial_conditions(
params_fitted=params,
global_params_fixed=GLOBAL_PARAMS_FIXED
)
x_sol = solve_ivp(
fun=model_covid,
y0=x_0_cases,
t_span=[t_cases[0], t_cases[-1]],
t_eval=t_cases,
args=tuple(params),
).y
weights = list(range(1, len(fitcasesnd) + 1))
# focus on last 5 days
weights[-5:] =[x + 50 for x in weights[-5:]]
residuals_value = sum(
np.multiply((x_sol[15, :] - fitcasesnd) ** 2, weights)
+ balance * balance * np.multiply((x_sol[14, :] - fitcasesd) ** 2, weights)
)
return residuals_value
output = minimize(
residuals_totalcases,
parameter_list,
method='trust-constr', # Can't use Nelder-Mead if I want to put bounds on the params
bounds=bounds_params,
options={'maxiter': max_iter, 'verbose': 0}
)
best_params = output.x
obj_value = obj_value + output.fun
t_predictions = [i for i in range(maxT)]
def solve_best_params_and_predict(optimal_params):
# Variables Initialization for the ODE system
x_0_cases = get_initial_conditions(
params_fitted=optimal_params,
global_params_fixed=GLOBAL_PARAMS_FIXED
)
x_sol_best = solve_ivp(
fun=model_covid,
y0=x_0_cases,
t_span=[t_predictions[0], t_predictions[-1]],
t_eval=t_predictions,
args=tuple(optimal_params),
).y
return x_sol_best
x_sol_final = solve_best_params_and_predict(best_params)
data_creator = DELPHIDataCreator(
x_sol_final=x_sol_final, date_day_since100=date_day_since100, best_params=best_params,
continent=continent, country=country, province=province, testing_data_included=False
)
# Creating the parameters dataset for this (Continent, Country, Province)
# mape_data = (
# mape(fitcasesnd, x_sol_final[15, :len(fitcasesnd)]) +
# mape(fitcasesd, x_sol_final[14, :len(fitcasesd)])
# ) / 2
if len(fitcasesnd)> 15:
mape_data_2 = (
mape(fitcasesnd[-15:], x_sol_final[15, len(fitcasesnd)-15:len(fitcasesnd)]) +
mape(fitcasesd[-15:], x_sol_final[14, len(fitcasesnd)-15:len(fitcasesd)])
) / 2
print(mape_data_2)
df_parameters_cont_country_prov = data_creator.create_dataset_parameters(mape_data_2)
list_df_global_parameters.append(df_parameters_cont_country_prov)
# Creating the datasets for predictions of this (Continent, Country, Province)
df_predictions_since_today_cont_country_prov, df_predictions_since_100_cont_country_prov = (
data_creator.create_datasets_predictions()
)
list_df_global_predictions_since_today.append(df_predictions_since_today_cont_country_prov)
list_df_global_predictions_since_100_cases.append(df_predictions_since_100_cont_country_prov)
print(f"Finished predicting for Continent={continent}, Country={country} and Province={province}")
else: # len(validcases) <= 7
print(f"Not enough historical data (less than a week)" +
f"for Continent={continent}, Country={country} and Province={province}")
continue
else: # file for that tuple (country, province) doesn't exist in processed files
continue
# Appending parameters, aggregations per country, per continent, and for the world
# for predictions today & since 100
today_date_str = "".join(str(datetime.now().date()).split("-"))
df_global_parameters = pd.concat(list_df_global_parameters)
df_global_predictions_since_today = | pd.concat(list_df_global_predictions_since_today) | pandas.concat |
import os
import pandas as pd
import geopandas as gpd
files = ['prop_urban_2000_2010.csv',
'pop_women_2010.csv',
'pop_men_2010.csv',
'idhm_2000_2010.csv',
'estimativas_pop.csv',
'interest_real.csv',
'num_people_age_gender_AP_2010.csv',
'qualification_APs_2010.csv',
'firms_by_APs2010_t0_full.csv',
'firms_by_APs2010_t1_full.csv',
'average_num_members_families_2010.csv'
]
def read_data(path, sep=';'):
return | pd.read_csv(path, sep=sep) | pandas.read_csv |
"""
Copyright (c) 2021 <NAME> as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
https://github.com/elephaint/pgbm/blob/main/LICENSE
"""
#%% Import packages
import pandas as pd
import numpy as np
import time
import lightgbm as lgb
#%% Load data
data = pd.read_hdf('datasets/m5/m5_dataset_products.h5', key='data')
# Remove last 28 days for now...
data = data[data.date <= '22-05-2016']
data = data[data.weeks_on_sale > 0]
data = data[data.date >= '2014-01-01']
data = data.reset_index(drop=True)
# Choose single store
store_id = 0
subset = data[data.store_id_enc == store_id]
subset = subset.reset_index(drop=True)
#%% Preprocessing for forecast
cols_unknown = ['sales_lag1', 'sales_lag2',
'sales_lag3', 'sales_lag4', 'sales_lag5', 'sales_lag6', 'sales_lag7',
'sales_lag1_mavg7', 'sales_lag1_mavg28', 'sales_lag1_mavg56',
'sales_lag7_mavg7', 'sales_lag7_mavg28', 'sales_lag7_mavg56',
'sales_short_trend', 'sales_long_trend', 'sales_year_trend',
'sales_item_long_trend', 'sales_item_year_trend']
cols_known = ['date','item_id_enc', 'dept_id_enc', 'cat_id_enc',
'snap_CA',
'snap_TX', 'snap_WI', 'event_name_1_enc', 'event_type_1_enc',
'event_name_2_enc', 'event_type_2_enc', 'sell_price',
'sell_price_change', 'sell_price_norm_item', 'sell_price_norm_dept',
'weeks_on_sale', 'dayofweek_sin', 'dayofweek_cos', 'dayofmonth_sin',
'dayofmonth_cos', 'weekofyear_sin', 'weekofyear_cos', 'monthofyear_sin',
'monthofyear_cos', 'sales_lag364', 'sales_lag28_mavg7',
'sales_lag28_mavg28', 'sales_lag28_mavg56', 'sales_lywow_trend',
'sales_lag28', 'sales_lag56']
def create_forecastset(data, cols_unknown, cols_known, forecast_day):
X_unknown = data.groupby(['store_id_enc','item_id_enc'])[cols_unknown].shift(forecast_day)
X_known = data[cols_known]
X = pd.concat((X_known, X_unknown), axis=1)
y = data[['date','sales']]
return X, y
#%% Set training parameters
params = {'min_split_gain':0,
'min_data_in_leaf':1,
'max_depth':-1,
'max_bin':1024,
'max_leaves':16,
'learning_rate':0.1,
'n_estimators':1000,
'verbose':2,
'feature_fraction':0.7,
'bagging_fraction':0.7,
'bagging_freq':1,
'seed':1,
'lambda':1,
'objective':'rmse',
'metric':'rmse',
'device':'cpu'}
#%% Validation loop
forecast_day = 0
X, y = create_forecastset(subset, cols_unknown, cols_known, forecast_day)
train_last_date = '2016-03-27'
val_first_date = '2016-03-28'
val_last_date = '2016-04-24'
X_train, y_train = X[X.date <= train_last_date], y[y.date <= train_last_date]
X_train, y_train = X_train.drop(columns='date'), y_train.drop(columns='date')
X_val, y_val = X[(X.date >= val_first_date) & (X.date <= val_last_date)], y[(y.date >= val_first_date) & (y.date <= val_last_date)]
X_val, y_val = X_val.drop(columns='date'), y_val.drop(columns='date')
# Train
start = time.perf_counter()
params['bin_construct_sample_cnt'] = len(X_train)
train_set = lgb.Dataset(X_train, y_train)
valid_set = lgb.Dataset(X_val, y_val)
model = lgb.train(params, train_set, valid_sets=[train_set, valid_set], early_stopping_rounds=20)
end = time.perf_counter()
print(f'Training time: {end - start:.2f}s')
#%% Test loop
forecast_day = 0
X, y = create_forecastset(subset, cols_unknown, cols_known, forecast_day)
train_last_date = '2016-04-24'
test_first_date = '2016-04-25'
X_train, y_train = X[X.date <= train_last_date], y[y.date <= train_last_date]
X_train, y_train = X_train.drop(columns='date'), y_train.drop(columns='date')
X_test, y_test = X[X.date >= test_first_date], y[y.date >= test_first_date]
iteminfo = X_test[['date','item_id_enc', 'dept_id_enc', 'cat_id_enc']]
X_test, y_test = X_test.drop(columns='date'), y_test.drop(columns='date')
# Train
train_set = lgb.Dataset(X_train, y_train)
valid_set = lgb.Dataset(X_test, y_test)
params['n_estimators'] = model.best_iteration + 1
start = time.perf_counter()
params['bin_construct_sample_cnt'] = len(X_train)
model = lgb.train(params, train_set)
end = time.perf_counter()
print(f'Training time: {end - start:.2f}s')
# Save model
model.save_model('experiments/02_hierarchical_time_series/lgbm_mse.model')
# Predict
start = time.perf_counter()
yhat = model.predict(X_test)
end = time.perf_counter()
print(f'Prediction time: {end - start:.2f}s')
#%% RMSE
def rmseloss_metric(yhat, y):
loss = np.sqrt(np.mean((yhat - y)**2))
return loss
y = y_test.values.squeeze()
error = rmseloss_metric(yhat, y)
#%% Save
df = | pd.DataFrame({'y':y, 'yhat_lgb':yhat}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:47:30 2018
@author: SilverDoe
"""
#============ Selecting a column ==============================================
import pandas as pd
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
print(df['one'])
#=========== Adding a column ==================================================
import pandas as pd
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
# Adding a new column to an existing DataFrame object with column label by passing new series
print ("Adding a new column by passing as Series:")
df['three']=pd.Series([10,20,30],index=['a','b','c'])
print(df)
print ("Adding a new column using the existing columns in DataFrame:")
df['four']=df['one']+df['three']
# adding multiple columns using the new assign function
df = df.assign(five=(df['four']+df['three']),six=(df['one']+df['four']))
print(df)
#=========== Deleting a column ================================================
# Using the previous DataFrame, we will delete a column
# using del function
import pandas as pd
d = {'one' : | pd.Series([1, 2, 3], index=['a', 'b', 'c']) | pandas.Series |
import pandas as pd
import numpy as np
import os
def import_schedules(file_path, file_name):
"""
Given the file path and file name (of the schedule file that is inputted into the Maccor Cycler), this
function will import and clean the schedule file and return it as a df.
Parameters
-----------
file_path : string
File path
file_name : string
Filename
Returns
--------
df : pandas dataframe
A cleaned schedule df
Notes
------
The schedule file should be input as a csv.
Examples
---------
>>> import maccorcyclingdata.schedules as schedules
>>> schedule_df = schedules.import_schedules('example_data/','schedule.csv')
>>> schedule_df.head(5)
"""
if not isinstance(file_path, str):
raise TypeError('file path must be a string')
if not isinstance(file_name, str):
raise TypeError('file name must be a string')
if not os.path.exists(file_path+file_name):
raise NotADirectoryError("The path " + str(file_path + file_name) + " not found")
df = pd.read_csv(file_path + file_name)
df = df.dropna(how='all') #delete the rows that are completely blank
df.columns = ['step', 'step_type', 'step_mode', 'step_mode_value', 'step_limit', 'step_limit_value', 'step_end_type', 'step_end_type_op', 'step_end_type_value', 'goto_step', 'report_type', 'report_type_value', 'options', 'step_note'] #rename the column headers
df = df.reset_index(drop=True) #reset the df index
#this section of the function creates an array that has the indices of the row where the multi-row step starts
arr = []
for ind in df.index:
if pd.isnull(df['step'][ind]):
arr.append((ind-1)) #array contains all indices of when the step is "nan" - 1 (basically, the logic is that the row before the row where the step is nan is when the multi-line step begins)
for x in range((len(arr) - 1), -1, -1): #iterates through the array backwards (however, since the multi-line steps are not just two lines long and can be three/four/etc lines long, if the values are consecutive it means it is still a part of the previous multi-line group so it needs to be deleted)
if (arr[x]) == (arr[x - 1] + 1):
del arr[x]
for x in arr:
#the only columns that have multi-line steps are end_type, op, value, and goto, so make an array for each of those columns (the arrays hold the value of the first line of multi-line step group)
end_type = [df['step_end_type'][x]]
op = [df['step_end_type_op'][x]]
value = [df['step_end_type_value'][x]]
goto = [df['goto_step'][x]]
ind = x + 1
while | pd.isnull(df['step'][ind]) | pandas.isnull |
# coding: utf-8
import numpy as np
import pandas as pd
import scipy
import scipy.sparse as sp
import os
import time
import multiprocessing
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import mean_squared_error, mutual_info_score, normalized_mutual_info_score
from utils import check_and_make_path, get_sp_adj_mat, sigmoid
from scipy.spatial.distance import pdist, squareform
# Generate data used for structural similarity prediction
class DataGenerator(object):
base_path: str
input_base_path: str
output_base_path: str
file_sep: str
full_node_list: list
node_num: int
alpha: float
iter_num: int
def __init__(self, base_path, input_folder, output_folder, node_file, file_sep='\t', alpha=0.5, iter_num=100):
self.base_path = base_path
self.input_base_path = os.path.abspath(os.path.join(base_path, input_folder))
self.output_base_path = os.path.abspath(os.path.join(base_path, output_folder))
self.file_sep = file_sep
node_file_path = os.path.abspath(os.path.join(base_path, node_file))
nodes_set = pd.read_csv(node_file_path, names=['node'])
self.full_node_list = nodes_set['node'].tolist()
self.node_num = len(self.full_node_list)
self.alpha = alpha
self.iter_num = iter_num
assert 0 < self.alpha < 1
check_and_make_path(self.input_base_path)
check_and_make_path(self.output_base_path)
def generate_node_similarity(self, file):
"""the implement of Vertex similarity in networks"""
# Vertex similarity in networks(https://arxiv.org/abs/physics/0510143)
print('file = ', file)
file_path = os.path.join(self.input_base_path, file)
date = file.split('.')[0]
output_file_path = os.path.join(self.output_base_path, date + '_similarity.npz')
A = get_sp_adj_mat(file_path, self.full_node_list, sep=self.file_sep)
A = A.tocsr()
lambda_1 = scipy.sparse.linalg.eigsh(A, k=1, which='LM', return_eigenvectors=False)[0]
print('lambda 1: ', lambda_1)
rows, cols = A.nonzero()
edge_num = rows.shape[0]
n = A.shape[0]
d = np.array(A.sum(1)).flatten()
d_inv = np.zeros(n) # dtype is float
indices = np.where(d > 0)[0]
d_inv[indices] = 1. / d[indices]
d_inv = np.diag(d_inv)
# dsd = np.random.normal(0, 1 / np.sqrt(n), (n, n))
dsd = np.zeros((n, n))
I = np.eye(n)
for i in range(self.iter_num):
# if i == 0:
# dsd = self.alpha / lambda_1 * A
# else:
# dsd = self.alpha / lambda_1 * A + self.alpha / lambda_1 * dsd.dot(A)
dsd = self.alpha / lambda_1 * A.dot(dsd) + I
if i % 10 == 0:
print('VS', i, '/', self.iter_num)
# coeff = 2 * edge_num * lambda_1
# S = d_inv.dot(dsd).dot(d_inv)
S = dsd
S = (S + S.T) / 2
S = S - I
S = (S - S.min()) / (S.max() - S.min())
print(type(S))
print('S max: ', S.max(), ', min: ', S.min())
eps = 1e-6
S[S < eps] = 0
# S[S > 1] = 1
S = sp.coo_matrix(S)
sp.save_npz(output_file_path, S)
# exit(0)
def generate_node_similarity_all_time(self, worker=-1):
f_list = sorted(os.listdir(self.input_base_path))
length = len(f_list)
if worker <= 0:
for i, file in enumerate(f_list):
self.generate_node_similarity(file)
else:
worker = min(worker, length, os.cpu_count())
pool = multiprocessing.Pool(processes=worker)
print("start " + str(worker) + " worker(s)")
for i, file in enumerate(f_list):
pool.apply_async(self.generate_node_similarity, (file, ))
pool.close()
pool.join()
# Centrality predictor class
class SimilarityPredictor(object):
base_path: str
origin_base_path: str
embedding_base_path: str
similarity_base_path: str
output_base_path: str
file_sep: str
full_node_list: list
def __init__(self, base_path, origin_folder, embedding_folder, similarity_folder, output_folder, node_file, file_sep='\t'):
self.base_path = base_path
self.origin_base_path = os.path.abspath(os.path.join(base_path, origin_folder))
self.embedding_base_path = os.path.abspath(os.path.join(base_path, embedding_folder))
self.similarity_base_path = os.path.abspath(os.path.join(base_path, similarity_folder))
self.output_base_path = os.path.abspath(os.path.join(base_path, output_folder))
self.file_sep = file_sep
node_file_path = os.path.abspath(os.path.join(base_path, node_file))
nodes_set = pd.read_csv(node_file_path, names=['node'])
self.full_node_list = nodes_set['node'].tolist()
check_and_make_path(self.embedding_base_path)
check_and_make_path(self.origin_base_path)
check_and_make_path(self.output_base_path)
def get_prediction_error(self, method, node_sim_mat, embedding_mat, date):
mse_list = [date]
# node_sim_mat = node_sim_mat / node_sim_mat.sum()
# node_sim = pd.Series(node_sim_mat.flatten())
# node_sim_mat = sigmoid(node_sim_mat)
#pred_sim_mat = pred_sim_mat / pred_sim_mat.sum()
#print('node sim max:', node_sim_mat.max(), 'pred sim max: ', pred_sim_mat.max())
#print('node sim min: ', node_sim_mat.min(), 'pred sim min: ', pred_sim_mat.min())
#print('node sim avg: ', node_sim_mat.mean(), 'pred sim avg: ', pred_sim_mat.mean())
# pred_sim_mat = pred_sim_mat / pred_sim_mat.sum()
#embedding_mat = (embedding_mat - embedding_mat.min()) / (embedding_mat.max() - embedding_mat.min())
# print('pred sim: ', pred_sim_mat.max(), pred_sim_mat.min())
pred_sim_mat = embedding_mat.dot(embedding_mat.T)
# node_sim_mat = (node_sim_mat - node_sim_mat.min()) / (node_sim_mat.max() - node_sim_mat.min())
# pred_sim_mat = (pred_sim_mat - pred_sim_mat.min()) / (pred_sim_mat.max() - pred_sim_mat.min())
#pred_sim_mat = pred_sim_mat / pred_sim_mat.sum()
# pred_sim = pd.Series(pred_sim_mat.flatten())
# pred_sim_mat = sigmoid(pred_sim_mat)
#print(node_sim_mat.mean(), pred_sim_mat.mean())
# exit(0)
# np.savetxt(os.path.join(self.base_path, date + 'pred_sim.txt'), pred_sim_mat)
# error = mean_squared_error(node_sim_mat, pred_sim_mat)
eps = 1e-6
column_list = []
n = pred_sim_mat.shape[0]
for i in range(n):
if node_sim_mat[i].sum() < eps: # node is single node whose degree is 0
continue
column_list.append(i)
real_sim_mat = node_sim_mat[column_list, :][:, column_list]
real_sim_mat = (real_sim_mat - real_sim_mat.min()) / (real_sim_mat.max() - real_sim_mat.min())
real_sim_mat = real_sim_mat / real_sim_mat.sum()
real_sim = pd.Series(real_sim_mat.flatten())
pred_sim_mat = pred_sim_mat[column_list, :][:, column_list]
pred_sim_mat = (pred_sim_mat - pred_sim_mat.min()) / (pred_sim_mat.max() - pred_sim_mat.min())
pred_sim_mat = pred_sim_mat / pred_sim_mat.sum()
pred_sim = pd.Series(pred_sim_mat.flatten())
print('real sim min: ', real_sim.min(), 'max: ', real_sim.max(), 'avg: ', real_sim.mean())
print('pred sim min: ', pred_sim.min(), 'max: ', pred_sim.max(), 'avg: ', pred_sim.mean())
# import ot
from scipy.stats import entropy
#connected_node_num = len(column_list)
#print('connected node number: ', connected_node_num)
mse_list.append(real_sim.corr(pred_sim, method='spearman'))
# mse_list.append(mutual_info_score(node_sim, pred_sim))
return mse_list
def similarity_prediction_all_time(self, method):
print('method = ', method)
f_list = sorted(os.listdir(self.origin_base_path))
all_mse_list = []
for i, f_name in enumerate(f_list):
print('Current date is: {}'.format(f_name))
date = f_name.split('.')[0]
node_sim_mat = np.loadtxt(os.path.join(self.similarity_base_path, date + '_similarity.csv'))
cur_embedding_path = os.path.join(self.embedding_base_path, method, f_name)
if not os.path.exists(cur_embedding_path):
continue
df_embedding = | pd.read_csv(cur_embedding_path, sep=self.file_sep, index_col=0) | pandas.read_csv |
from collections import Counter, defaultdict
from pprint import pprint
import pandas as pd
from util import data_io
def to_datetime(df, key):
df[key] = | pd.to_datetime(df[key]) | pandas.to_datetime |
"""
.. module: security_monkey.views.GuardDutyEventMapPointsList
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @nuagedm
"""
import datetime
from flask import jsonify, request
from security_monkey import db, rbac
from security_monkey.views import AuthenticatedService
from security_monkey.datastore import (
GuardDutyEvent,
Item,
ItemAudit,
Account,
AccountType,
Technology,
AuditorSettings,
Datastore,
ItemRevision)
# Severity Levels for GuardDuty Findings
# https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity
def sev_name(val):
if 0.1 <= val <= 3.9:
return 'Low'
if 4.0 <= val <= 6.9:
return 'Medium'
if 7.0 <= val <= 8.9:
return 'High'
# Returns a list of Map Circle Marker Points List
class GuardDutyEventMapPointsList(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/worldmapguarddutydata
Get a list of World Map Data points matching the given criteria.
**Example Request**:
.. sourcecode:: http
GET /api/1/worldmapguarddutydata HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"roles": [
{
"name": "Admin"
},
{
"name": "Justify"
},
{
"name": "Comment"
},
{
"name": "View"
}
],
"user": "<EMAIL>"
},
"items": [
{
"cityName": "Mar del Plata",
"count": 1,
"countryName": "Argentina",
"lat": -38.0,
"localPort": 22.0,
"localPortName": "SSH",
"lon": -57.55,
"remoteIpV4": "192.168.3.11",
"remoteOrg": "Telefonica de Argentina",
"remoteOrgASN": 22927.0,
"remoteOrgASNOrg": "Telefonica de Argentina",
"remoteOrgISP": "Telefonica de Argentina"
}
],
"page": 1,
"total": 197
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
# Reference query as provided by Rick
# select
# g.item_id,
# g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join guarddutyevent g ON i.id = g.item_id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' is not NULL;
# """
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
args = self.reqparse.parse_args()
for k, v in args.items():
if not v:
del args[k]
# @pritam: 25 July, 2018
# With implementation of GuardDuty Data Injection using Custom Watcher, changing the source of GuardDutyEvent
# data for this query to ItemRevision Table
# inner join itemrevision g ON i.id = g.item_id
# select g.item_id,
# g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join itemrevision g ON i.latest_revision_id = g.id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' is not NULL;
# Adding following additonal output data fields for display details modal popup of Map
# g.config -> 'Description' as "description",
# g.config -> 'Severity' as "severity",
# g.config -> 'Region' as "region",
# g.config -> 'Service' -> 'Count' as "count",
# g.config -> 'AccountId' as "accountid"
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
from sqlalchemy.sql.functions import coalesce
query = ItemRevision.query.with_entities(
ItemRevision.item_id,
ItemRevision.config[('Service', 'Action', 'PortProbeAction','PortProbeDetails')].label('portprobedetails'),
ItemRevision.config[('Description')].label('description'),
ItemRevision.config[('Severity')].label('severity'),
ItemRevision.config[('Region')].label('region'),
ItemRevision.config[('Service')].label('service'),
ItemRevision.config[('Resource')].label('resource'),
ItemRevision.config[('AccountId')].label('accountid'),
) \
.join((Item, Item.latest_revision_id == ItemRevision.id), (ItemAudit, Item.id == ItemAudit.item_id)) \
.filter((coalesce(ItemAudit.justified, False) == False), (coalesce(ItemAudit.fixed, False) == False),
(ItemRevision.config[
('Service', 'Action', 'PortProbeAction', 'PortProbeDetails')] != None))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
records = query.all()
items = []
def flatten_structure( rec):
result = dict(rec.__dict__)
if result.has_key('service'):
result.pop('service')
if result.has_key('resource'):
result.pop('resource')
if result.has_key('portprobedetails'):
result.pop('portprobedetails')
result.update(flatten_json(rec.portprobedetails[0]))
result['probe_count'] = rec.service['Count']
result['first_seen'] = rec.service['EventFirstSeen']
result['last_seen'] = rec.service['EventLastSeen']
result['resource_type'] = rec.resource['ResourceType']
result['instance_id'] = rec.resource['InstanceDetails']['InstanceId']
instance_tag_name = [k['Value'] for k in rec.resource['InstanceDetails']['Tags'] if k['Key']=='Name' ]
if instance_tag_name:
result['instance_name'] = instance_tag_name[0]
else:
result['instance_name'] = 'NA'
if result.has_key('_labels'):
result.pop('_labels')
# Convert Severity from float to Text
result['severity'] = sev_name(result['severity'])
return result
if len(records) > 0:
import pandas as pd
from ..flatten import flatten_json
flatten_records = (flatten_structure(record) for record in records)
fulldata_dataFrame = pd.DataFrame(flatten_records).rename(
columns={'RemoteIpDetails_GeoLocation_Lat': 'lat',
'RemoteIpDetails_GeoLocation_Lon': 'lon',
'LocalPortDetails_Port': 'localPort',
'LocalPortDetails_portName': 'localPortName',
'RemoteIpDetails_City_CityName': 'cityName',
'RemoteIpDetails_Country_CountryName': 'countryName',
'RemoteIpDetails_IpAddressV4': 'remoteIpV4',
'RemoteIpDetails_Organization_Asn': 'remoteOrgASN',
'RemoteIpDetails_Organization_AsnOrg': 'remoteOrgASNOrg',
'RemoteIpDetails_Organization_Isp': 'remoteOrgISP',
'RemoteIpDetails_Organization_Org': 'remoteOrg',
'counts': 'count'})
# Removing drop duplicates as each Probe will probably have different info to be displayed in popup
mapdata_dataframe = fulldata_dataFrame.groupby(['lat', 'lon']).size().reset_index(name='count').merge(
fulldata_dataFrame, on=['lat', 'lon'], how='left')
items = mapdata_dataframe.to_dict('records')
marshaled_dict = {
'page': 1,
'total': len(items),
'auth': self.auth_dict,
'items': items
}
return marshaled_dict, 200
# Returns a list of Top 10 Countries by number of probe events received to display in Bar Chart
class GuardDutyEventTop10Countries(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/top10countryguarddutydata
Get a list of Top 10 Countries by number of probe events received to display in Bar Chart
**Example Request**:
.. sourcecode:: http
GET /api/1/worldmapguarddutydata HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"roles": [
{
"name": "Admin"
},
{
"name": "Justify"
},
{
"name": "Comment"
},
{
"name": "View"
}
],
"user": "<EMAIL>"
},
"items": [
{
"count": 1527,
"countryName": "China"
},
{
"count": 456,
"countryName": "United States"
},
{
"count": 116,
"countryName": "Russia"
},
],
"page": 1,
"total": 197
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
args = self.reqparse.parse_args()
for k, v in args.items():
if not v:
del args[k]
# Reference query as provided by Rick
# select
# g.item_id,
# g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join guarddutyevent g ON i.id = g.item_id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' is not NULL;
# """
# @pritam: 25 July, 2018
# With implementation of GuardDuty Data Injection using Custom Watcher, changing the source of GuardDutyEvent
# data for this query to ItemRevision Table
# select g.item_id,
# g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join itemrevision g ON i.latest_revision_id = g.id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' is not NULL;
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
from sqlalchemy.sql.functions import coalesce
query = ItemRevision.query.with_entities(
ItemRevision.item_id, ItemRevision.config[('Service', 'Action', 'PortProbeAction',
'PortProbeDetails')]) \
.join((Item, Item.latest_revision_id == ItemRevision.id), (ItemAudit, Item.id == ItemAudit.item_id)) \
.filter((coalesce(ItemAudit.justified, False) == False), (coalesce(ItemAudit.fixed, False) == False),
(ItemRevision.config[
('Service', 'Action', 'PortProbeAction', 'PortProbeDetails')] != None))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
records = query.all()
items = []
if len(records) > 0:
import pandas as pd
from ..flatten import flatten_json
flatten_records = (flatten_json(record[1][0]) for record in records)
fulldata_dataFrame = | pd.DataFrame(flatten_records) | pandas.DataFrame |
import os
import unittest
import random
import sys
import site # so that ai4water directory is in path
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler
from ai4water.preprocessing.datahandler import MultiLocDataHandler
from ai4water.datasets import load_u1, arg_beach
os.environ['PYTHONHASHSEED'] = '313'
random.seed(313)
np.random.seed(313)
# todo, check last dimension of x,y
# todo test with 3d y
def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'):
feat_dim = 1
if lookback > 1:
assert x.shape[1] == lookback
feat_dim = 2
assert x.shape[
feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}"
if y is not None:
assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}"
else:
assert num_outs == 0
y = x # just for next statement to run
if prev_y is None:
prev_y = x # just for next statement to run
assert x.shape[0] == y.shape[0] == prev_y.shape[
0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}"
if num_examples:
assert x.shape[
0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}'
return
def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'):
if isinstance(x, np.ndarray):
_check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples,
data_type=data_type)
elif isinstance(x, list):
while len(y)<len(x):
y.append(None)
for idx, i in enumerate(x):
_check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx],
data_loader.num_outs[idx], num_examples, data_type=data_type
)
elif isinstance(x, dict):
for key, i in x.items():
_check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key],
data_loader.num_outs[key], num_examples, data_type=data_type
)
elif x is None: # all should be None
assert all(v is None for v in [x, prev_y, y])
else:
raise ValueError
def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs):
val_examples = 0
if val_ex:
val_examples = val_x.shape[0]
test_examples = 0
if test_ex:
test_examples = test_x.shape[0]
xyz_samples = train_x.shape[0] + val_examples + test_examples
# todo, whould be equal
assert xyz_samples == tot_obs, f"""
data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}."""
def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader):
if isinstance(train_x, np.ndarray):
_check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df())
elif isinstance(train_x, list):
for idx in range(len(train_x)):
_check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex,
data_loader.tot_obs_for_one_df()[idx])
return
def check_inverse_transformation(data, data_loader, y, cols, key):
if cols is None:
# not output columns, so not checking
return
# check that after inverse transformation, we get correct y.
if data_loader.source_is_df:
train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key)
train_y_, index = data_loader.deindexify(train_y_, key=key)
compare_individual_item(data, key, cols, train_y_, data_loader)
elif data_loader.source_is_list:
#for idx in range(data_loader.num_sources):
# y_ = y[idx].reshape(-1, len(cols[idx]))
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for idx, y in enumerate(train_y_):
compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader)
elif data_loader.source_is_dict:
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for src_name, val in train_y_.items():
compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader)
def compare_individual_item(data, key, cols, y, data_loader):
if y is None:
return
train_index = data_loader.indexes[key]
if y.__class__.__name__ in ['DataFrame']:
y = y.values
for i, v in zip(train_index, y):
if len(cols) == 1:
if isinstance(train_index, pd.DatetimeIndex):
# if true value in data is None, y's value should also be None
if np.isnan(data[cols].loc[i]).item():
assert np.isnan(v).item()
else:
_t = round(data[cols].loc[i].item(), 0)
_p = round(v.item(), 0)
if not np.allclose(data[cols].loc[i].item(), v.item()):
print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(v, np.ndarray):
v = round(v.item(), 3)
_true = round(data[cols].loc[i], 3).item()
_p = round(v, 3)
if _true != _p:
print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(train_index, pd.DatetimeIndex):
assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}'
else:
assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001
def check_kfold_splits(data_handler):
if data_handler.source_is_df:
splits = data_handler.KFold_splits()
for (train_x, train_y), (test_x, test_y) in splits:
... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return
def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader):
if isinstance(train_y, list):
assert isinstance(val_y, list)
assert isinstance(test_y, list)
train_y = train_y[0]
val_y = val_y[0]
test_y = test_y[0]
if isinstance(train_y, dict):
train_y = list(train_y.values())[0]
assert isinstance(val_y, dict)
isinstance(test_y, dict)
val_y = list(val_y.values())[0]
test_y = list(test_y.values())[0]
if out_cols is not None:
b = train_y.reshape(-1, )
if val_y is None:
a = test_y.reshape(-1, )
else:
a = val_y.reshape(-1, )
if not len(np.intersect1d(a, b)) == 0:
raise ValueError(f'train and val have overlapping values')
if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None:
a = test_y.reshape(-1,)
b = val_y.reshape(-1,)
assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values'
return
def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True,
assert_uniqueness=True, check_examples=True,
true_train_y=None, true_val_y=None, true_test_y=None):
config['teacher_forcing'] = True # todo
if 'val_fraction' not in config:
config['val_fraction'] = 0.3
if 'test_fraction' not in config:
config['test_fraction'] = 0.3
data_loader = DataHandler(data=data, save=save, verbosity=0, **config)
#dl = DataLoader.from_h5('data.h5')
train_x, prev_y, train_y = data_loader.training_data(key='train')
assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex)
val_x, prev_y, val_y = data_loader.validation_data(key='val')
assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation')
test_x, prev_y, test_y = data_loader.test_data(key='test')
assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test')
if check_examples:
check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader)
if isinstance(data, str):
data = data_loader.data
check_inverse_transformation(data, data_loader, train_y, out_cols, 'train')
if val_ex:
check_inverse_transformation(data, data_loader, val_y, out_cols, 'val')
if test_ex:
check_inverse_transformation(data, data_loader, test_y, out_cols, 'test')
check_kfold_splits(data_loader)
if assert_uniqueness:
assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader)
if true_train_y is not None:
assert np.allclose(train_y, true_train_y)
if true_val_y is not None:
assert np.allclose(val_y, true_val_y)
if true_test_y is not None:
assert np.allclose(test_y, true_test_y)
return data_loader
class TestAllCases(object):
def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True):
self.input_features = input_features
self.output_features = output_features
self.lookback = lookback
self.allow_nan_labels=allow_nan_labels
self.save=save
self.run_all()
def run_all(self):
all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']]
for m in all_methods:
getattr(self, m)()
return
def test_basic(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22
test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30
if self.output_features == ['c']:
tty = np.arange(202, 250).reshape(-1, 1, 1)
tvy = np.arange(250, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
check_examples=True,
)
assert loader.source_is_df
return
def test_with_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random'}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 20, 30,
save=self.save,
)
assert loader.source_is_df
return
def test_drop_remainder(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'batch_size': 8,
'drop_remainder': True,
'train_data': 'random'}
loader = build_and_test_loader(data, config, self.output_features,
48, 16, 24,
check_examples=False,
save=self.save,
)
assert loader.source_is_df
return
def test_with_same_val_data(self):
# val_data is "same" as and train_data is make based upon fractions.
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same'}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 29, 29,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
def test_with_same_val_data_and_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same'}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 30, 30,
check_examples=False,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_val_data(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 29,
true_train_y=tty,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_no_val_data_with_random(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_fraction': 0.0}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 30,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data(self):
# we don't want any test_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'test_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 29, 0,
true_train_y=tty,
true_val_y=tvy,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data_with_random(self):
# we don't want any test_data
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'test_fraction': 0.0,
'transformation': 'minmax'}
tr_examples = 15- (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_dt_index(self):
# we don't want any test_data
#print('testing test_with_dt_index', self.lookback)
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'test_fraction': 0.0,
'transformation': 'minmax'}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals(self):
#print('testing test_with_intervals', self.lookback)
examples = 35
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=35, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'transformation': 'minmax',
'intervals': [(0, 10), (20, 35)]
}
tr_examples = 12 - (self.lookback - 1) if self.lookback > 1 else 12
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 7,
save=self.save
)
assert loader.source_is_df
return
def test_with_dt_intervals(self):
# check whether indices of intervals can be datetime?
examples = 35
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=35, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'transformation': 'minmax',
'intervals': [('20110101', '20110110'), ('20110121', '20110204')]
}
tr_examples = 12 - (self.lookback - 1) if self.lookback > 1 else 12
val_examples = 7 - (self.lookback - 2) if self.lookback > 1 else 7
test_examples = 7 - (self.lookback - 2) if self.lookback > 1 else 7
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 7,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_indices(self):
#print('testing test_with_custom_train_indices')
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
}
tr_examples = 9 - (self.lookback - 2) if self.lookback > 1 else 9
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_indices_no_val_data(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
'val_fraction': 0.0,
}
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, 12, 0, test_examples,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_indices_same_val_data(self):
#print('testing test_with_custom_train_indices_same_val_data')
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
'val_data': 'same',
}
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, 12, 0, test_examples,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_and_val_indices(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
'val_data': [0, 12, 14, 16, 5],
'val_fraction': 0.0,
}
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, 12, 5, test_examples,
assert_uniqueness=False,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
# def test_with_train_and_val_and_test_indices(self):
# # todo, does it make sense to define test_data by indices
# return
def test_with_custom_train_indices_and_intervals(self):
#print('testing test_with_custom_train_indices_and_intervals', self.lookback)
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
#'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
if self.output_features == ['c']:
tty = np.array([63., 64., 65., 66., 67., 68., 69., 82.]).reshape(-1, 1, 1)
tvy = np.arange(83, 87).reshape(-1, 1, 1)
ttesty = np.array([62., 87., 88., 89.]).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_one_feature_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a']}],
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_one_feature_multi_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a']}, {'method': 'zscore', 'features': ['a']}],
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_one_feature_multi_transformation_on_diff_features(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a', 'b', 'c']}, {'method': 'zscore', 'features': ['c']}],
}
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_input_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a', 'b']}],
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_input_transformation_as_dict(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': {'method': 'minmax', 'features': ['a', 'b']},
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_output_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': {'method': 'minmax', 'features': ['c']},
}
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_intervals(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback, 'train_data': 'random',
'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 5 - (self.lookback - 1) if self.lookback > 1 else 5
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_intervals_same_val_data(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback, 'train_data': 'random', 'val_data': 'same',
'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_intervals_no_val_data(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random', 'val_fraction': 0.0,
'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_nans(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
if self.output_features is not None:
data['c'].iloc[10:20] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 6, 9,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_nans_interpolate(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
if self.output_features is not None:
data['b'].iloc[10:20] = np.nan
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'nan_filler': {'method': 'KNNImputer', 'features': self.input_features},
'train_data': 'random',
}
if self.input_features == ['a']:
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 6
else:
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
val_examples = 6
test_examples = 9
build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save)
data['c'].iloc[10:20] = np.nan
if 'b' not in self.output_features:
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'nan_filler': {'method': 'KNNImputer', 'features': ['b']},
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'nan_filler': {'method': 'KNNImputer', 'features': ['b'], 'imputer_args': {'n_neighbors': 4}},
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
return
def test_with_indices_and_nans_at_irregular_intervals(self):
if self.output_features is not None and len(self.output_features)>1:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[10:20] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 18, 8, 12,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
check_examples=False, # todo
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
val_examples = 7 - (self.lookback - 1) if self.lookback > 1 else 7
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 8,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_at_irregular_intervals(self):
# if data contains nans and we also have intervals
if self.output_features is not None and len(self.output_features) > 1:
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=50, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[40:50] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'intervals': [(0, 10), (20, 50)]
}
loader = build_and_test_loader(data, config, self.output_features, 9, 4, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 18, 7, 11,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_same_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same',
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 8, 8,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_at_irregular_intervals_and_same_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None and len(self.output_features) > 1:
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=50, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[40:50] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same',
'intervals': [(0, 10), (20, 50)]
}
loader = build_and_test_loader(data, config, self.output_features, 13, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 25, 11, 11,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_no_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 8,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_at_irreg_intervals_and_no_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None and len(self.output_features) > 1:
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=50, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[40:50] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0,
'intervals': [(0, 10), (20, 50)]
}
loader = build_and_test_loader(data, config, self.output_features, 13, 0, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 25, 0, 11,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 3, 5,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 8, save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans_with_same_val_data(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same',
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 8, 8,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans_with_no_val_data(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_fraction': 0.0,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 8,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans_with_no_test_data(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback, 'train_data': 'random', 'test_fraction': 0.0,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 8, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_indices_intervals_and_nans(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 16 - (self.lookback - 1) if self.lookback > 1 else 16
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples,
test_examples, save=self.save)
assert loader.source_is_df
return
def test_with_random_with_transformation_of_features():
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=len(data), freq='D'))
data['date'] = data.index
config = {'input_features':['b'],
'output_features': ['c'],
'lookback': 5,
'train_data': 'random'}
dh = DataHandler(data, verbosity=0, **config)
x,y = dh.training_data()
return
def test_random_with_intervals():
data = np.random.randint(0, 1000, (40560, 14))
input_features = [f'input_{i}' for i in range(13)]
output_features = ['NDX']
data = pd.DataFrame(data, columns=input_features+output_features)
out = data["NDX"]
# put four chunks of missing intervals
intervals = [(100, 200), (1000, 8000), (10000, 31000)]
for interval in intervals:
st, en = interval[0], interval[1]
out[st:en] = np.nan
data["NDX"] = out
config = {
'input_features': input_features,
'output_features': output_features,
'lookback': 5,
'train_data': 'random',
'intervals': [(0, 99), (200, 999), (8000, 9999), (31000, 40560)],
}
build_and_test_loader(data, config, out_cols=output_features,
train_ex=6096, val_ex=2612, test_ex=3733,
assert_uniqueness=False,
save=False)
return
def make_cross_validator(cv, **kwargs):
model = Model(
model={'randomforestregressor': {}},
data=arg_beach(),
cross_validator=cv,
val_metric="mse",
verbosity=0,
**kwargs
)
return model
class TestCVs(object):
def test_kfold(self):
model = make_cross_validator(cv={'TimeSeriesSplit': {'n_splits': 5}})
model.cross_val_score()
model.dh.plot_TimeSeriesSplit_splits(show=False)
return
def test_loocv(self):
model = make_cross_validator(cv={'KFold': {'n_splits': 5}})
model.cross_val_score()
model.dh.plot_KFold_splits(show=False)
return
def test_tscv(self):
model = make_cross_validator(cv={'LeaveOneOut': {}}, test_fraction=0.6)
model.cross_val_score()
model.dh.plot_LeaveOneOut_splits(show=False)
return
#
# class TestDataLoader(unittest.TestCase):
#
# def test_OndDF(self):
# TestAllCases(
# input_features = ['a', 'b'],
# output_features=['c'], allow_nan_labels=2)
# return
#
# def test_OneDFTwoOut(self):
# TestAllCases(input_features = ['a'],
# output_features=['b', 'c'])
# return
#
# def test_MultiSources(self):
# test_multisource_basic()
# return
#
# def test_MultiUnequalSources(self):
# return
def test_AI4WaterDataSets():
config = {'intervals': [("20000101", "20011231")],
'input_features': ['precipitation_AWAP',
'evap_pan_SILO'],
'output_features': ['streamflow_MLd_inclInfilled'],
'dataset_args': {'stations': 1}
}
build_and_test_loader('CAMELS_AUS', config=config,
out_cols=['streamflow_MLd_inclInfilled'],
train_ex=358, val_ex=154, test_ex=219,
assert_uniqueness=False,
save=False)
return
def test_multisource_basic():
examples = 40
data = np.arange(int(examples * 4), dtype=np.int32).reshape(-1, examples).transpose()
df1 = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
df2 = pd.DataFrame(np.array([5,6]).repeat(40, axis=0).reshape(40, -1), columns=['len', 'dep'],
index=pd.date_range('20110101', periods=40, freq='D'))
input_features = [['a', 'b'], ['len', 'dep']]
output_features = [['c', 'd'], []]
lookback = 4
config = {'input_features': input_features,
'output_features': output_features,
'lookback': lookback}
build_and_test_loader(data=[df1, df2], config=config, out_cols=output_features,
train_ex=18, val_ex=8, test_ex=11,
save=True)
# #testing data as a dictionary
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': []}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=True)
# #test when output_features for one data is not provided?
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd']}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=False)
# # #testing with transformation
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['transformation'] = {'cont_data': 'minmax', 'static_data': 'zscore'}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': []}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=True)
# # testing with `same` `val_data`
config['val_data'] = 'same'
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=26, val_ex=11, test_ex=11,
save=True)
# # testing with random train indices
config['val_data'] = 'same'
config['train_data'] = random.sample(list(np.arange(37)), 25)
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': []}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=25, val_ex=12, test_ex=12,
save=True)
return
def test_multisource_basic2():
examples = 40
data = np.arange(int(examples * 4), dtype=np.int32).reshape(-1, examples).transpose()
df1 = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
df2 = pd.DataFrame(np.array([[5],[6], [7]]).repeat(40, axis=1).transpose(), columns=['len', 'dep', 'y'],
index=pd.date_range('20110101', periods=40, freq='D'))
input_features = [['a', 'b'], ['len', 'dep']]
output_features = [['c', 'd'], ['y']]
lookback = 4
config = {'input_features': input_features,
'output_features': output_features,
'lookback': lookback}
build_and_test_loader(data=[df1, df2], config=config, out_cols=output_features,
train_ex=18, val_ex=8, test_ex=11,
save=True)
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': ['y']}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config,
out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=True)
return
def test_multisource_basic3():
examples = 40
data = np.arange(int(examples * 5), dtype=np.int32).reshape(-1, examples).transpose()
y_df = pd.DataFrame(data[:, -1], columns=['y'])
y_df.loc[y_df.sample(frac=0.5).index] = np.nan
cont_df = pd.DataFrame(data[:, 0:4], columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
static_df = pd.DataFrame(np.array([[5],[6], [7]]).repeat(40, axis=1).transpose(), columns=['len', 'dep', 'y'],
index=pd.date_range('20110101', periods=40, freq='D'))
disc_df = pd.DataFrame(np.random.randint(0, 10, (40, 4)), columns=['cl', 'o', 'do', 'bod'],
index=pd.date_range('20110101', periods=40, freq='D'))
cont_df['y'] = y_df.values
static_df['y'] = y_df.values
disc_df['y'] = y_df.values
input_features = [['len', 'dep'], ['a', 'b'], ['cl', 'o', 'do', 'bod']]
output_features = [['y'], ['c', 'y'], ['y']]
lookback = [1, 4, 1]
config = {'input_features': input_features,
'output_features': output_features,
'test_fraction': 0.3,
'val_fraction': 0.3,
'lookback': lookback}
# build_and_test_loader(data=[static_df, cont_df, disc_df], config=config, out_cols=output_features, train_ex=6,
# val_ex=4,
# test_ex=6, save=True)
data_handler = DataHandler(data=[static_df, cont_df, disc_df], verbosity=0, **config)
data_handler.training_data()
data_handler.validation_data()
data_handler.test_data()
return
def test_multisource_multi_loc():
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
training_data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
val_data = pd.DataFrame(data+1000.0, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
test_data = pd.DataFrame(data+2000, columns=['a', 'b', 'c'],
index= | pd.date_range('20110101', periods=40, freq='D') | pandas.date_range |
from __future__ import absolute_import
from __future__ import print_function
import h5py
import argparse
import logging
import re
import numpy as np
import pandas as pd
import os
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_transform.beam as tft_beam
import keras
from keras import models, layers
from kerastuner import HyperModel
from kerastuner.tuners import Hyperband
from sklearn import preprocessing
def find_feature_type_in_tuple(tuple_arr,feature_name):
for tuple in tuple_arr:
(feature,feature_type) = tuple
if(feature == feature_name):
return feature_type
return -1
def extract_column_names(datatype_array,label_name):
category = []
numeric = []
string_arr = []
for i in datatype_array:
(column_name,columntype) = i
if(column_name != label_name):
if(columntype == 'Boolean' or columntype == 'Categorical'):
category.append(column_name)
elif(columntype == 'String'):
string_arr.append(column_name)
else:
numeric.append(column_name)
else:
label = (column_name,columntype)
return category,numeric,string_arr,label
def column_operation(datatype_filepath, known_args):
datatype_arr_temp = []
char_list = ["'","(",")"]
with open(datatype_filepath, "rb") as fp:
for i in fp.readlines():
tmp = i.decode().strip().split(",")
tmp[0] = str(tmp[0][2:-1])
tmp[1] = str(tmp[1][2:-2])
datatype_arr_temp.append((tmp[0], tmp[1]))
# Get label column name and column name array
raw_header = | pd.read_csv(known_args.input,header=None,nrows=1) | pandas.read_csv |
"""A module providing the `Model` class representing the global model and tying together
all the other classes defined in the `pygmol` package (concrete subclasses of
`Chemistry`, `PlasmaParameters` and `Equations`.)
"""
from typing import Union, Mapping
import numpy as np
import pandas
import pandas as pd
from numpy import ndarray
from scipy.integrate import solve_ivp
from .abc import Chemistry, PlasmaParameters
from .chemistry import chemistry_from_dict, validate_chemistry
from .equations import ElectronEnergyEquations
from .plasma_parameters import (
plasma_parameters_from_dict,
validate_plasma_parameters,
PlasmaParametersValidationError,
)
class ModelSolutionError(Exception):
"""Custom exception signaling problems with the global model solution."""
pass
class Model:
"""The Global Model class.
Takes instances of `Chemistry` (or dict following the correct interface), and
`PlasmaParameters` (or a dict following the correct interface), and instantiates
a concrete `Equations` subclass.
The model with consistent chemistry and plasma parameters inputs can be run with
the `run` method, and success checked with the `success` method. Other methods are
implemented to access the full final solution, reaction rates or wall fluxes, all
either as function of time, or the final values. Finally, the `diagnose` method
is provided to extract any partial results defined in the `Equations` subclass
(see the docstring.)
"""
def __init__(
self,
chemistry: Union[Chemistry, dict],
plasma_params: Union[PlasmaParameters, dict],
):
"""The global model initializer.
The model instance solves for the equations defined by the
`ElectronEnergyEquations` class.
Parameters
----------
chemistry : Chemistry
plasma_params : PlasmaParameters
Raises
------
ChemistryValidationError
Signals inconsistent chemistry passed.
PlasmaParametersValidationError
Signals inconsistent plasma parameters passed.
"""
if isinstance(chemistry, dict):
chemistry = chemistry_from_dict(chemistry)
if isinstance(plasma_params, dict):
plasma_params = plasma_parameters_from_dict(plasma_params)
self.chemistry = chemistry
self.plasma_params = plasma_params
self._validate_chemistry_and_plasma_parameters()
# placeholder for the equations employed by the model:
self.equations = None
# placeholder for whatever the selected low-level solver returns:
self.solution_raw = None
# placeholder for the array of time samples [sec]:
self.t = None
# 2D array of state vectors `y` for all time samples:
self.solution_primary = None
# `pandas.DataFrame` of all the final solution values:
self.solution = None
def _validate_chemistry_and_plasma_parameters(self):
"""Method running the validation on both chemistry and plasma parameters,
and checking if they are both consistent with each other.
Raises
------
ChemistryValidationError
PlasmaParametersValidationError
"""
validate_chemistry(self.chemistry)
validate_plasma_parameters(self.plasma_params)
if not set(self.plasma_params.feeds).issubset(self.chemistry.species_ids):
raise PlasmaParametersValidationError(
"Feed gas species defined in the plasma parameters are inconsistent "
"with the chemistry species ids!"
)
def _initialize_equations(self):
"""Populates the equations instance attribute."""
self.equations = ElectronEnergyEquations(self.chemistry, self.plasma_params)
def _solve(self, y0: ndarray = None, method: str = "BDF"):
"""Runs the low-level solver (`scipy.integrate.solve_ivp`).
The solver solves for the state vector *y* (see `Equations` documentation) from
the initial value `y0`. The raw solution from the solver is stored under the
`solution_raw` instance attribute. The equations must have been initialized
already!
Parameters
----------
y0 : ndarray, optional
The optional initial guess for the state vector (see the `Equations` docs).
If not passed, it's built using the `Equations.get_y0_default` method.
method : str, optional
The optional solver method forwarded to the low-level solver (see
`scipy.integrate.solve_ivp`). Defaults to ``"BDF"``.
Raises
------
ModelSolutionError
If the `solve_ivp` solver encounters an error, or if it is in any way
unsuccessful.
"""
if self.equations is None:
raise ModelSolutionError("The equations have not yet been initialized!")
if y0 is None:
y0 = self.equations.get_y0_default()
func = self.equations.ode_system_rhs
try:
self.solution_raw = solve_ivp(
func, (0, self.plasma_params.t_end), y0, method=method, t_eval=None
)
except ValueError as e:
raise ModelSolutionError(f"solve_ivp raised a ValueError: {e}")
def _build_solution(self):
"""Populates the `solution` instance attribute.
The `solution_raw` attribute must have already been populated by the `solve`
method! This method (`build_solution`) will take the raw rows of state vectors
*y* in time (see `Equations` docs) and turn them into the final solution values
by the appropriate methods supplied by `Equations` class.
The final solution will be saved as the `solution` instance attribute and will
take form of a pandas.DataFrame with columns consisting of ``"t"`` (the first
column of sampled time in [s]), followed by the
`equations.final_solution_labels`.
Raises
------
ModelSolutionError
If the `solution_raw` is None (not populated yet).
"""
if self.solution_raw is None:
raise ModelSolutionError("The solver has not yet been run!")
self.t = self.solution_raw.t
self.solution_primary = self.solution_raw.y.T
# build the final solution dataframe:
final_columns = ["t"] + list(self.equations.final_solution_labels)
final_values = np.stack(
[
np.r_[t_i, self.equations.get_final_solution_values(t_i, y_i)]
for t_i, y_i in zip(self.t, self.solution_primary)
]
)
self.solution = | pd.DataFrame(final_values, columns=final_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
# stats['kurtosis'] = series.kurt()
# stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': (leng - count) if (leng - count) > 1e-8 else 0,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': (leng - count) if (leng - count) > 1e-8 else 0,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9,
correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
# dfcorrPear = df.corr(method="pearson")
# dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes= | pd.Index([names]) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import random
# Date and Time
# =============
print(datetime.datetime(2000, 1, 1))
print(datetime.datetime.strptime("2000/1/1", "%Y/%m/%d"))
print(datetime.datetime(2000, 1, 1, 0, 0).strftime("%Y%m%d"))
# to_datetime
# ===========
print(pd.to_datetime("4th of July"))
print(pd.to_datetime("13.01.2000"))
print(pd.to_datetime("7/8/2000"))
print(pd.to_datetime("7/8/2000", dayfirst=True))
print(issubclass(pd.Timestamp, datetime.datetime))
ts = pd.to_datetime(946684800000000000)
print(ts.year, ts.month, ts.day, ts.weekday())
index = [pd.Timestamp("2000-01-01"),
pd.Timestamp("2000-01-02"),
pd.Timestamp("2000-01-03")]
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts)
print(ts.index)
ts = pd.Series(np.random.randn(len(index)),
index=["2000-01-01", "2000-01-02", "2000-01-03"])
print(ts.index)
index = pd.to_datetime(["2000-01-01", "2000-01-02", "2000-01-03"])
ts = pd.Series(np.random.randn(len(index)), index=index)
print(ts.index)
print(pd.date_range(start="2000-01-01", periods=3, freq='H'))
print(pd.date_range(start="2000-01-01", periods=3, freq='T'))
print(pd.date_range(start="2000-01-01", periods=3, freq='S'))
print(pd.date_range(start="2000-01-01", periods=3, freq='B'))
print(pd.date_range(start="2000-01-01", periods=5, freq='1D1h1min10s'))
print(pd.date_range(start="2000-01-01", periods=5, freq='12BH'))
bh = pd.tseries.offsets.BusinessHour(start='07:00', end='22:00')
print(bh)
print(pd.date_range(start="2000-01-01", periods=5, freq=12 * bh))
print(pd.date_range(start="2000-01-01", periods=5, freq='W-FRI'))
print(pd.date_range(start="2000-01-01", periods=5, freq='WOM-2TUE'))
s = pd.date_range(start="2000-01-01", periods=10, freq='BAS-JAN')
t = pd.date_range(start="2000-01-01", periods=10, freq='A-FEB')
s.union(t)
index = pd.date_range(start='2000-01-01', periods=200, freq='B')
print(index)
ts = pd.Series(np.random.randn(len(index)), index=index)
walk = ts.cumsum()
walk.plot()
plt.savefig('random_walk.png')
print(ts.head())
print(ts[0])
print(ts[1:3])
print(ts['2000-01-03'])
print(ts[datetime.datetime(2000, 1, 3)])
print(ts['2000-01-03':'2000-01-05'])
print(ts['2000-01-03':datetime.datetime(2000, 1, 5)])
print(ts['2000-01-03':datetime.date(2000, 1, 5)])
print(ts['2000-02'])
print(ts['2000-03':'2000-05'])
small_ts = ts['2000-02-01':'2000-02-05']
print(small_ts)
print(small_ts.shift(2))
print(small_ts.shift(-2))
# Downsampling
# ============
rng = pd.date_range('4/29/2015 8:00', periods=600, freq='T')
ts = pd.Series(np.random.randint(0, 100, len(rng)), index=rng)
print(ts.head())
print(ts.resample('10min').head())
print(ts.resample('10min', how='sum').head())
print(ts.resample('1h', how='sum').head())
print(ts.resample('1h', how='max').head())
print(ts.resample('1h', how=lambda m: random.choice(m)).head())
print(ts.resample('1h', how='ohlc').head())
# Upsampling
# ==========
rng = pd.date_range('4/29/2015 8:00', periods=10, freq='H')
ts = pd.Series(np.random.randint(0, 100, len(rng)), index=rng)
print(ts.head())
print(ts.resample('15min'))
print(ts.head())
print(ts.resample('15min', fill_method='ffill').head())
print(ts.resample('15min', fill_method='bfill').head())
print(ts.resample('15min', fill_method='ffill', limit=2).head())
print(ts.resample('15min', fill_method='ffill', limit=2, loffset='5min').head())
tsx = ts.resample('15min')
print(tsx.interpolate().head())
# Time zone handling
# ==================
t = pd.Timestamp('2000-01-01')
print(t.tz is None)
t = pd.Timestamp('2000-01-01', tz='Europe/Berlin')
print(t.tz)
rng = pd.date_range('1/1/2000 00:00', periods=10, freq='D', tz='Europe/London')
print(rng)
tz = pytz.timezone('Europe/London')
rng = pd.date_range('1/1/2000 00:00', periods=10, freq='D', tz=tz)
print(rng)
rng = pd.date_range('1/1/2000 00:00', periods=10, freq='D')
ts = pd.Series(np.random.randn(len(rng)), rng)
print(ts.index.tz is None)
ts_utc = ts.tz_localize('UTC')
print(ts_utc.index.tz)
print(ts_utc.tz_convert('Europe/Berlin').index.tz)
print(ts_utc.tz_convert(None).index.tz is None)
print(ts_utc.tz_localize(None).index.tz is None)
# Time deltas
# ===========
print(pd.Timedelta('1 days'))
print( | pd.Timedelta('-1 days 2 min 10s 3us') | pandas.Timedelta |
import numpy as np;
import pandas as pd;
import os
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path,'train.csv')
test_file_path = os.path.join(raw_data_path,'test.csv')
#read data as dataframe
train_df = pd.read_csv(train_file_path,index_col='PassengerId')
test_df = pd.read_csv(test_file_path,index_col='PassengerId')
#print(train_df.info())
#print(test_df.info())
#we need to predict the survived possibility value for passenger in test dataset.
#Let's create the Survived column in test df with dummy value -888
test_df['Survived'] = -888
# print(test_df.info())
#concat the test and train data
df = | pd.concat((train_df,test_df),axis=0) | pandas.concat |
from typing import Tuple, Union
import datetime
import os
from xlrd import XLRDError
import pandas as pd
def load_df(url: str, sheet_name: Union[int, str] = 0) -> Tuple[pd.DataFrame, bool]:
from_html = os.path.splitext(url)[1] in ['.htm', '.html']
# Read from input file
if from_html:
try:
sheets = pd.read_html(url, encoding='iso8859_8') # TODO: get encoding as parameter
except ValueError:
print(f'Failed parsing {url}')
raise
assert sheets
df = sheets[0]
# Make the first row a column name, and drop it
df.columns = df.iloc[0]
df = df.reindex(df.index.drop(0))
df.reset_index(inplace=True, drop=True)
else:
try:
df = pd.read_excel(url, sheet_name=sheet_name)
except XLRDError:
print('Should be parsed as HTML?')
raise
assert not df.empty
return df, from_html
def parse_input_df(
df: pd.DataFrame,
from_html: bool,
num_header_rows: int,
columns_name: str,
drop_first_header: bool = False,
num_last_rows_to_discard: int = None,
num_columns_to_keep: int = None,
column_translator: dict = None,
convert_to_numeric: bool = True
) -> Tuple[pd.DataFrame, pd.DataFrame]:
if num_columns_to_keep is not None:
df.drop(df.columns[range(num_columns_to_keep, df.shape[1])], axis=1, inplace=True)
assert not df.empty
column_translator_internal = {
'שעה': 'Time',
'תאריך': 'Date'
}
if column_translator:
column_translator_internal.update(column_translator)
# Get headers and set it as dataframe columns
df_headers = df.iloc[0:num_header_rows - 1].fillna('').transpose().reset_index(drop=True)
assert not df_headers.empty, 'No headers'
# Translate all Hebrew columns to English
df_headers[0].replace(column_translator_internal, inplace=True)
if drop_first_header:
# Drop the first header, not before saving 'Date' and 'Time' header names
# This is due to the header dataframe being in the following form:
# 0 1
# 0 Flares HHPFlare
# 1 Flares NEWFF
# 2 Flares OLDFF
# 3 CAOL Flares Flare-PP-185
# 4 CAOL Flares Flare-PP-180
# 5 CAOL Flares Flare-Monomers
# 6 Time
# 7 Date
df_headers[1] = df_headers.apply(lambda row: row[1] or row[0], axis=1)
df_headers.drop(df_headers.columns[0], axis='columns', inplace=True)
# Join multiple-line headers to a single line
columns = df_headers.apply(lambda row: row.map(str).str.cat(sep=' ').strip(), axis=1)
# Update dataframe with manipulated headers
df.columns = columns
df.columns.name = columns_name
# Move units to a separate dataframe
df_units = df.iloc[num_header_rows-1:num_header_rows].reset_index(drop=True)
df_units.columns = columns
df_units.drop(columns=['Date', 'Time'], axis=1, inplace=True)
# Discard headers and units
df.drop(df.head(num_header_rows).index, inplace=True)
# Drop last garbage rows
if num_last_rows_to_discard:
df.drop(df.tail(num_last_rows_to_discard).index, inplace=True)
# Fix bad input where midnight is '01/01/1900 0:00:00'
# Convert the time to midnight, and increment day to the next day
midnight_invalid = [datetime.datetime(1900, 1, 1, 0, 0, 0), '24:00']
midnight_valid = datetime.time()
for i in df[df['Time'].isin(midnight_invalid)].index:
df.loc[i, 'Time'] = midnight_valid
df.loc[i, 'Date'] = pd.to_datetime(df.loc[i, 'Date'], dayfirst=True) + datetime.timedelta(days=1)
df.to_csv('after_fix_midnight.csv')
# Make sure that Date and Time contain datetime values
# (it is expected to be string when using read_html instead of read_excel)
# TODO: make sure this does not corrupt dataframe read using read_html
if from_html:
df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
df.to_csv('after_to_datetime.csv')
def normalize_time(x):
if isinstance(x, str):
return pd.Timestamp(x).to_pydatetime().time()
return x # TODO: consider converting pd.Timestamp to datetime.time
# elif isinstance(x, pd.Timestamp):
# return x
# else:
# return x
df['Time'] = df['Time'].apply(normalize_time)
# Create combined 'DateTime' with both date and time
df['DateTime'] = df.apply(lambda x: datetime.datetime.combine(x['Date'].date(), x['Time']), axis=1)
df.to_csv('after_combine.csv')
# Create a DatetimeIndex and assign it to the dataframe.
df.index = | pd.DatetimeIndex(df['DateTime']) | pandas.DatetimeIndex |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as date
import seaborn as sns
import urllib
sns.set_context('talk')
data_crime_raw = pd.read_csv('.\\NYPD_Complaint_Data_Historic.csv',
usecols=['CMPLNT_FR_DT', 'OFNS_DESC', 'LAW_CAT_CD', 'Latitude', 'Longitude'],
dtype={'OFNS_DESC':'category', 'LAW_CAT_CD':'category', 'Latitude':float, 'Longitude':float})
data_crime_raw['CMPLNT_FR_DT'] = pd.to_datetime(data_crime_raw['CMPLNT_FR_DT'], format='%m/%d/%Y', errors='coerce')
data_311_raw = pd.read_csv('.\\311_Service_Requests_from_2010_to_Present.csv',
usecols=['Created Date', 'Complaint Type', 'Descriptor', 'Latitude', 'Longitude'],
dtype={'Complaint Type':'category', 'Descriptor':'category', 'Latitude':float, 'Longitude':float})
data_311_raw['created_date'] = pd.to_datetime(data_311_raw['Created Date'], format='%m/%d/%Y %I:%M:%S %p', errors='coerce')
data_crime = data_crime_raw[data_crime_raw.CMPLNT_FR_DT > pd.to_datetime(dt.date(2010,1,1))].dropna()
data_311 = data_311_raw[data_311_raw.created_date < pd.to_datetime(dt.date(2016,1,1))].dropna()
minlat = data_crime.Latitude.min()
maxlat = data_crime.Latitude.max()
minlon = data_crime.Longitude.min()
maxlon = data_crime.Longitude.max()
latrange = np.arange(minlat, maxlat+0.02, 0.02)
lonrange = np.arange(minlon, maxlon+0.02, 0.02)
data_crime = data_crime[data_crime.LAW_CAT_CD != 'VIOLATION']
d_c_grouped = data_crime.groupby(
by=[pd.cut(data_crime['Latitude'], latrange),
pd.cut(data_crime['Longitude'], lonrange),
pd.TimeGrouper(key='CMPLNT_FR_DT',freq='M')])
d_311_grouped = data_311.groupby(
by=[pd.cut(data_311['Latitude'], latrange),
| pd.cut(data_311['Longitude'], lonrange) | pandas.cut |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/python
# https://media.readthedocs.org/pdf/pynag/latest/pynag.pdf
# first try to export hosts, will be expanded over the time.
from pynag.Model import Parsers
import os
from tempfile import mkstemp
from shutil import move
from os import remove, close
import re
import time
import pandas as pd
from pandas.io.json import json_normalize
nagios_config = '/usr/local/nagios/etc/nagios.cfg'
nagios_sock = '/usr/local/nagios/var/rw/live.sock'
# Implemented wait for nagios socket
wcount = 0
while not os.path.exists(nagios_sock):
time.sleep(1)
wcount +=1
if wcount >= 60:
raise SystemExit("Wait timeout exceed {} for socket: {}".format(wcount, nagios_sock))
p = Parsers.Livestatus(livestatus_socket_path=nagios_sock, nagios_cfg_file=nagios_config)
filename = "nagios_export.csv"
if not os.path.isfile(nagios_config):
raise SystemExit("file: {} does not exist".format(nagios_config))
hosts = p.get_hosts()
jhosts = | json_normalize(hosts) | pandas.io.json.json_normalize |
import pygame
import math
import numpy as np
import networkx as nx
import itertools as it
import pandas as pd
import colorsys
import generateTreeWithPrior as generateTree
import generatePartitionGivenTreeWithPrior as generatePartition
class SampleNodesFeatureMeans():
def __init__(self, allFeatureMeans):
self.allFeatureMeans = allFeatureMeans
def __call__(self, tree):
nonRootNodes = [n for n,d in dict(tree.in_degree()).items() if d!=0]
featureIndex = self.allFeatureMeans.columns.values
featureName = featureIndex.copy()
np.random.shuffle(featureName)
nonRootNodesDepthes = [tree.node[treeNode]['depth'] for treeNode in nonRootNodes]
changeFeatures = [featureName[changeFeatureIndex - 1] for changeFeatureIndex in nonRootNodesDepthes]
nodesPossibleChangeFeatureMeans = [self.allFeatureMeans[changeFeature] for changeFeature in changeFeatures]
for node in nonRootNodes:
parentNode = list(tree.predecessors(node))
parentFeatureMeans = tree.node[parentNode[0]]['featureMeans'][:].copy()
possibleChangeFeatureMeans = nodesPossibleChangeFeatureMeans[nonRootNodes.index(node)]
parentFeatureMeans[changeFeatures[nonRootNodes.index(node)]] = possibleChangeFeatureMeans[np.random.randint(len(possibleChangeFeatureMeans))]
#print(parentFeatureMeans, tree.node[node]['partition'], nonRootNodes, nonRootNodesDepthes)
tree.node[node]['featureMeans'] = parentFeatureMeans.copy()
return tree
def makeLeafNodeParametersDataFrameWithPartitionAndFeatureMean(tree):
leafNodes = [n for n,d in dict(tree.out_degree()).items() if d==0]
featureMeansLeafPartion = | pd.concat([tree.node[leafNode]['featureMeans'] for leafNode in leafNodes]) | pandas.concat |
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
TIME_INTERVAL = "60s"
data = | pd.read_csv("/home/matilda/PycharmProjects/FailurePrediction/4_analysis/clog/data/NOVA/resources/"+TIME_INTERVAL+"/classification_data/classification_TFIDF_"+ TIME_INTERVAL +"_.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tests that comments are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import StringIO
class CommentTests(object):
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table( | StringIO(data) | pandas.compat.StringIO |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 10:54:45 2020
@author: Janusz
"""
import logging
import tkinter as tk
import tkinter.font as tkFont
from collections import namedtuple
import easyocr
import pandas as pd
from cv2 import cv2 as cv
import dss
from windowcapture import WindowCapture
# REMEMBER TO SET GAME TO WINDOW MODE!!!!!!!!!!!!!!!!!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# IF you want to test GUI without game then change LOAD_IMAGE in dss.py file
logging.basicConfig(level=logging.DEBUG)
VARIABLE_PRINT_MODE = 0
# VARIABLE_PRINT_MODE = 1
IMAGE_DEBUG_MODE_FULLSCREEN = 0
reader = easyocr.Reader(["en"])
# drawing rectangles
line_color = (255, 0, 255)
marker_color = (255, 0, 255)
rgb_colours_list = [
(255, 0, 255),
(0, 255, 255),
(0, 255, 255),
(0, 255, 255),
(0, 255, 0),
]
# GUI
UPSIDE = 0 # champion pool
DOWNSIDE = 16 # champions to buy
SHIFT_BETWEEN_ORIGINS = 6
ORIGIN_LABEL_POSITION_COLUMN = 1
CHAMPIONS_TO_BUY_VISIBLE = 0
# CHAMPIONS_TO_BUY_VISIBLE = 1
# TEST_BUTTON_VISIBLE = 0
TEST_BUTTON_VISIBLE = 1
Champion = namedtuple(
"Champion",
[
"name",
"name_ocr",
"index_ocr",
"ChampCounter",
"origin_prim",
"origin_sec",
"class_prim",
"class_sec",
"OriginPrimCounter",
"OriginSecCounter",
"ClassPrimCounter",
"ClassSecCounter",
],
)
# WINDOW THINGS
if CHAMPIONS_TO_BUY_VISIBLE:
MainWindow = tk.Tk()
MainWindow.geometry("1900x800+0+0")
MainWindow.title("TFTDSS")
else:
MainWindow = tk.Tk()
MainWindow.geometry("1900x450+0+0")
MainWindow.title("TFTDSS")
BOLDED_FONT = tkFont.Font(family="Arial", size=10, weight=tkFont.BOLD)
df = | pd.read_csv("champions_data_scaled.csv") | pandas.read_csv |
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = pd.to_datetime(hhs_provisional.update_date)
# Gets last archive of every day
group = hhs_provisional.groupby(['update_date'])
hhs_provisional = group.last()
# Add provisional data to HHS data
frames = []
for a in hhs_provisional.iterrows():
date = a[0]
url = a[1].item()['url']
df = pd.read_csv(url)[['state', 'inpatient_beds_used_covid']]
df['date']=date
if date > pd.Timestamp(max_date): # Avoids double counting if provisional update came after real update
frames.append(df)
frames.append(hhs_data)
hhs_data = (pd.concat(frames))
print("LOG: Added HHS Provisional data")
# Make date columns in proper format
# hhs_data.date = hhs_data.date.apply(lambda x: x[:10])
hhs_data.date= pd.to_datetime(hhs_data.date)
# hhs_data.to_csv("../data/hospitalizations.csv")
print("LOG: Wrote HHS data to CSV")
test_data.date = test_data.date.apply(lambda x: x[:10])
test_data.date = pd.to_datetime(test_data.date)
nyt_data_us.date = pd.to_datetime(nyt_data_us.date)
nyt_data_state.date = pd.to_datetime(nyt_data_state.date)
print("LOG: Done getting data")
"""
get_state_cases
Creates dataframe of time series date and cases for given state
inputs:
state_codes: List of 2-letter codes of states to query
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_cases(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)][:]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.cases.sum() / states_population * 1000000
else:
case_sum = day_data.cases.sum()
newRow = {'date': curr_date, 'cases': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_cases(start_date = pd.Timestamp(2020,1,1), end_date = | pd.Timestamp.today() | pandas.Timestamp.today |
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
df= | pd.read_csv(path) | pandas.read_csv |
import sys
import os
from time import time, sleep
import shutil
import datetime
import csv
import json
import tempfile
from ast import literal_eval
import re
import unittest2 as unittest
from mock import Mock, patch
import os.path
import numpy as np
import pandas as pd
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh import __version__ as tsfresh_version
baseline_dir = os.path.dirname(os.path.realpath(__file__))
tests_dir = os.path.dirname(baseline_dir)
parent_dir = os.path.dirname(tests_dir)
skyline_dir = parent_dir + '/skyline'
sys.path.append(skyline_dir)
root_dir = os.path.dirname(parent_dir)
if True:
import settings
from tsfresh_feature_names import TSFRESH_FEATURES, TSFRESH_BASELINE_VERSION
# TODO: reconsider including a config with no feature names just declaring the
# current baseline version as there is not difference in the baselines between
# 0.3.0 and 0.3.1, a version should not require a baseline if no changes were
# made, it should just use the last known baseline or just use a specific file
# name for current baseline file and deprecate the old baselines prefixed with
# tsfresh-x.y.z and if __version__ < TSFRESH_BASELINE_VERSION use ah... if each
# one does not have a baseline, which is the last baseline, listdir sort... :)
# Needs thought.
TSFRESH_BASELINE_VERSION = str(tsfresh_version)
if TSFRESH_BASELINE_VERSION == '0.1.1.post0.dev62+ng0f1b4c7':
# #109 was fixed in 0.3.1, just here for local testing purposes, for the
# various local version.
TSFRESH_BASELINE_VERSION = '0.3.0'
if 'post' in TSFRESH_BASELINE_VERSION:
travis_tsfresh_version = re.sub('\.post.*', '', TSFRESH_BASELINE_VERSION)
TSFRESH_BASELINE_VERSION = travis_tsfresh_version
# Directly declared every version hardcoded
TSFRESH_BASELINE_VERSION = '0.17.9'
python_version = int(sys.version_info[0])
baseline_dir = os.path.dirname(os.path.realpath(__file__))
tests_dir = os.path.dirname(baseline_dir)
baseline_ts_json_file = 'data.json'
baseline_ts_json_baseline = '%s/tsfresh-%s.py%s.%s.features.transposed.csv' % (
baseline_dir, TSFRESH_BASELINE_VERSION, str(python_version),
baseline_ts_json_file)
t_fname_out_fail = '%s/tsfresh-unknown-version.py%s.data.json.features.transposed.csv.bak' % (baseline_dir, str(python_version))
baseline_ts_json = '%s/utils/%s' % (parent_dir, baseline_ts_json_file)
# Baselines
baseline_dir = '%s/tests' % parent_dir
anomaly_json_baseline = '%s/tsfresh-%s.%s.features.transposed.csv' % (
baseline_dir, TSFRESH_BASELINE_VERSION, baseline_ts_json_file)
statsd_csv_file = 'stats.statsd.bad_lines_seen.20161110.csv'
statsd_csv = '%s/%s' % (
baseline_dir, statsd_csv_file)
statsd_baseline = '%s/tsfresh-%s.%s.features.transposed.csv' % (
baseline_dir, TSFRESH_BASELINE_VERSION, statsd_csv_file)
original_baseline_ts_json_baseline = str(baseline_ts_json_baseline)
baseline_ts_json_baseline = os.getenv('USE_TSFRESH_BASELINE', original_baseline_ts_json_baseline)
class TestTsfreshBaseline(unittest.TestCase):
"""
Test all the features and their calculated values with a 60 data point
sample of a simple anomalous timeseries data set and compare that the feature
names and calculated values match the baselines calcualated for the specific
version of tsfresh.
.. warning:: the Python 2 and 3 calculate different results in terms of
float precision therefore baseline transposed features csv files are
required for both py2 and py3.
Running the test
================
.. code-block:: bash
cd "<YOUR_SKYLINE_DIR>"
python -m pytest tests/baseline/tsfresh_features_test.py
Test the test fails
===================
To test that the test fails as desired and as does what it is supposed to do
there are 2 methods to achieve this:
- Modify the first value in your local skyline/utils/data.json (not
recommended in a repo) and run the test, then correct the modified local
data.json file.
- Modify a feature name or value in your local tests/baseline/tsfresh-<TSFRESH_BASELINE_VERSION>.py<PYTHON_VERSION>.data.json.features.transposed.csv file,
run the test and either pull it again or revert the change
"""
def setUp(self):
self.test_path = tempfile.mkdtemp()
self.fname_in = '%s/%s' % (self.test_path, baseline_ts_json_file)
tmp_csv = '%s.tmp.csv' % (self.fname_in)
t_fname_out = '%s.features.transposed.csv' % self.fname_in
if original_baseline_ts_json_baseline != baseline_ts_json_baseline:
print('Using tsfresh baseline json as passed with ENVIRONMENT variable USE_TSFRESH_BASELINE: %s' % baseline_ts_json_baseline)
self.assertTrue(os.path.isfile(baseline_ts_json))
timeseries_json = None
if os.path.isfile(baseline_ts_json):
with open(baseline_ts_json, 'r') as f:
timeseries_json = json.loads(f.read())
if python_version == 2:
timeseries_str = str(timeseries_json).replace('{u\'results\': ', '').replace('}', '')
if python_version == 3:
timeseries_str = str(timeseries_json).replace('{\'results\': ', '').replace('}', '')
full_timeseries = literal_eval(timeseries_str)
timeseries = full_timeseries[:60]
self.assertEqual(int(timeseries[0][0]), 1369677886)
self.assertEqual(len(timeseries), 60)
for ts, value in timeseries:
metric = 'tsfresh_features_test'
timestamp = int(ts)
value = str(float(value))
utc_ts_line = '%s,%s,%s\n' % (metric, str(timestamp), value)
with open(tmp_csv, 'a') as fh:
fh.write(utc_ts_line)
self.assertTrue(os.path.isfile(tmp_csv))
df_features = None
df = | pd.read_csv(tmp_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value']) | pandas.read_csv |
from sqlite3.dbapi2 import Timestamp
import sqlalchemy
import pandas as pd
from sqlalchemy.orm import sessionmaker
import requests
import json
from datetime import datetime
import datetime
import sqlite3
DATABASE_LOCATION = "sqlite:///my_played_tracks.sqlite"
USER_ID = "21cxorcxlyiwautslytprkgmq" # your Spotify username
TOKEN = "<KEY>" # your Spotify API token
if __name__ == '__main__':
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
"Authorization" : "Bearer {token}".format(token=TOKEN)
}
# Convert time to Unix timestamp in miliseconds
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
yesterday_unix_timestamp = int(yesterday.timestamp()) * 1000
r = requests.get("https://api.spotify.com/v1/me/player/recently-played", headers=headers)
print(r)
data = r.json()
song_names = []
artist_names = []
played_at_list = []
timestamps = []
for song in data["items"]:
song_names.append(song["track"]["name"])
artist_names.append(song["track"]["album"]["artists"][0]["name"])
played_at_list.append(song["played_at"])
timestamps.append(song["played_at"][0:10])
# Prepare a dictionary in order to turn it into a pandas dataframe below
song_dict = {
"song_name" : song_names,
"artist_name": artist_names,
"played_at" : played_at_list,
"timestamp" : timestamps
}
song_df = | pd.DataFrame(song_dict, columns = ["song_name", "artist_name", "played_at", "timestamp"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 31 16:59:28 2021
@author: liang
"""
import random
import pandas as pd
from make_random_date import make_random_time
from tqdm import tqdm
import pickle
import os
import numpy as np
from multiprocessing import Pool
import time
fea_config = pickle.load(open('../fea_config.pkl', 'rb'))
sparse_features = fea_config['sparse_features']
dense_features = fea_config['dense_features']
target = fea_config['target']
task = fea_config['task']
target_name = fea_config['target_name']
item_fea_map = fea_config['item_fea_map']
user_fea_map = fea_config['user_fea_map']
interaction_fea_map = fea_config['interaction_fea_map']
userid_df = pd.read_csv('../data/user_id.csv')[['USER_ID', 'U_ID']]
user_id = list(userid_df['USER_ID'].unique())
item_df = pd.read_csv('../data/item_id.csv')['resource']
item_id = list(item_df.unique())
# user_id = ['user_{}'.format(str(no)) for no in range(50000)]
# item_id = ['item_{}'.format(str(no)) for no in range(300)]
def Merge(dict1, dict2):
res = {**dict1, **dict2}
return res
def left_joinFunc2(df1,df2,colname1,colname2):
return df1.join(df2, df1[colname1] == df2[colname2],'left').drop(colname2, axis=1)
def left_joinFunc(df1,df2,colname):
return pd.merge(df1, df2,how='left', on=colname)#
def findlatestandleftjoin(df1,df2,colname1,colname2):
new_df = pd.DataFrame()
for index, item in df1.iterrows():
choose_list = df2[df2[colname1] == item[colname1] and df2[colname2] >= item[colname2]]
latest_one = choose_list.sort_values(by=colname2)[0]
join_df = left_joinFunc(item, latest_one, colname1)
new_df = new_df.append(join_df)
return new_df
def random_int(num, decimal):
array = []
for i in range(num):
array.append(random.randint(0, decimal))
return array
def random_float(num,decimal):
array = []
for i in range(num):
array.append(random.uniform(0, decimal))
return array
def random_float_1(num):
array = []
for i in range(num):
array.append(random.uniform(0, 1))
return array
######make user fea
multi = 10
enum_list = ['KYC等级', 'KYC国家']
KYC_level = ['A','B','C']
kyc_country = ['USA', 'JAPAN', 'ITELY', 'UK']
if_test = [True, False]
rank = 600
user_df = pd.DataFrame()
user_df['用户ID'] = user_id
user_df = left_joinFunc2(user_df, userid_df, '用户ID', 'USER_ID')
all_num = len(user_id)
label = 'KYC等级'
array = [random.choice(KYC_level) for _ in range(all_num)]
user_df[label] = array
label = 'KYC国家'
array = [random.choice(kyc_country) for _ in range(all_num)]
user_df[label] = array
label = '测试用户'
array = [random.choice(if_test) for _ in range(all_num)]
user_df[label] = array
user_df.to_csv('../data/user_base_info.csv', index=False)
user_df = pd.read_csv('../data/user_base_info.csv')
#
for label,value in user_fea_map.items():
if value[1] == 'float01':
array = random_float_1(all_num)
user_df[label] = array
elif value[1] == 'float':
array = random_float(all_num,1000000)
user_df[label] = array
elif value[1] == 'int':
array = random_int(all_num, rank)
user_df[label] = array
#
a1=(2021,5,1,0,0,0,0,0,0) #设置开始日期时间元组(2021-05-01 00:00:00)
a2=(2021,6,30,23,59,59,0,0,0) #设置结束日期时间元组(2021-12-31 23:59:59)
user_df['user_create_time'.upper()] = make_random_time(all_num, a1, a2, False)
user_df.to_csv('../data/user_fea.csv', mode='w', index=False)
eng = user_df.rename(columns=fea_config['map_eng_name'], inplace=False)
if os.path.exists('../data/user_eng_fea.csv'):
eng.to_csv('../data/user_eng_fea.csv', mode ='a',header=False, index= False)
else:
eng.to_csv('../data/user_eng_fea.csv', mode='w', index=False)
#
#######make item fea
multi = 1000
item_df = pd.DataFrame()
item_df['物品ID'] = item_id * multi
all_num = len(item_id) * multi
for label,value in item_fea_map.items():
if value[1] == 'float01':
array = random_float_1(all_num)
item_df[label] = array
elif value[1] == 'float':
array = random_float(all_num,1000000)
item_df[label] = array
elif value[1] == 'int':
array = random_int(all_num, rank)
item_df[label] = array
elif value[1] == 'string_type':
array = [random.choice(['101','100','111','102']) for _ in range(all_num)]
item_df[label] = array
item_df = item_df.drop('物品类型', axis=1, inplace=False)
item_df['ITEM_CATEGORY'] = [random.choice(['Currency','InvestPortfolio','Plate']) for _ in range(all_num)]
a1=(2021,5,1,0,0,0,0,0,0) #设置开始日期时间元组(2021-05-01 00:00:00)
a2=(2021,6,30,23,59,59,0,0,0) #设置结束日期时间元组(2021-12-31 23:59:59)
item_df['item_create_time'.upper()] = make_random_time(all_num, a1, a2, False)
item_df.to_csv('../data/item_fea.csv', mode='w', index=False)
eng = item_df.rename(columns=fea_config['map_eng_name'], inplace=False)
if os.path.exists('../data/item_eng_fea.csv'):
eng.to_csv('../data/item_eng_fea.csv', mode ='a',header=False, index= False)
else:
eng.to_csv('../data/item_eng_fea.csv', mode='w', index=False)
######make interaction fea
multi = 100
interation_df = pd.DataFrame()
#interation_df['物品ID'] = item_id * multi
#all_num = len(item_id) * multi
#interation_df['用户ID'] = [random.choice(user_id) for _ in range(all_num)]
user_id_array = []
item_id_array = []
for user in tqdm(user_id):
item_id_array.extend(item_id)
user_id_array.extend([user] * len(item_id))
interation_df['用户ID'] = user_id_array
interation_df['物品ID'] = item_id_array
all_num = len(item_id_array)
for label,value in tqdm(interaction_fea_map.items()):
if value[1] == 'float01':
array = random_float_1(all_num)
interation_df[label] = array
elif value[1] == 'float':
array = random_float(all_num,1000000)
interation_df[label] = array
elif 'int' in value[1]:
array = random_int(all_num, int(value[1][-2:]))
interation_df[label] = array
elif value[1] == 'string_type':
target = list(target_name.keys())
array = [random.choice(target) for _ in range(all_num)]
interation_df[label] = array
a1=(2021,5,1,0,0,1,0,0,0) #设置开始日期时间元组(2021-05-01 00:00:00)
a2=(2021,6,30,23,59,59,0,0,0) #设置结束日期时间元组(2021-12-31 23:59:59)
interation_df['interaction_create_time'.upper()] = make_random_time(all_num, a1, a2, False)
interation_df = interation_df[['用户ID','物品ID','交互类型','物品所在顺序','历史点击次数','历史购买次数','interaction_create_time'.upper()]]
interation_df.to_csv('../data/interaction_fea.csv', mode='w', index=False)
eng = interation_df.rename(columns=fea_config['map_eng_name'], inplace=False)
if os.path.exists('../data/interaction_eng_fea.csv'):
eng.to_csv('../data/interaction_eng_fea.csv', mode ='a',header=False, index= False)
else:
eng.to_csv('../data/interaction_eng_fea.csv', mode='w', index=False)
#
#
#
#user_df = pd.read_csv('../data/user_fea.csv')
#userbase_df = pd.read_csv('../data/user_id.csv')[['user_id'.upper(), 'u_id'.upper()]]
#user_df = left_joinFunc2(user_df, userbase_df, '用户ID', 'u_id'.upper())
item_df = pd.read_csv('../data/item_fea.csv')
interation_df = pd.read_csv('../data/interaction_fea.csv')
train_df = left_joinFunc(interation_df, user_df, '用户ID').sample(10000)
#train_df = left_joinFunc(interation_df, user_df)
colname1 = '物品ID'
colname2 = 'item_create_time'.upper()
t1= time.time()
#new_df = pd.DataFrame()
#for index, item in tqdm(train_df.iterrows()):
# choose_list = item_df[item_df[colname1] == item[colname1]]
# choose_list = choose_list[choose_list['item_create_time'] <= item['interaction_create_time']]
#
# if choose_list.shape[0] > 0:
# latest_one = choose_list.sort_values(by=colname2).head(1)
#
# join_dict = Merge(item.to_dict(), latest_one.to_dict())
#
# new_df = new_df.append(pd.DataFrame.from_dict(join_dict))
#t2 = time.time()
#print("Serial time =",t2-t1)
#print(new_df.head())
#
#new_df.to_csv('../data/train_fea.csv', mode ='w',index= False)
#new_df.rename(columns=fea_config['map_eng_name'], inplace=True)
#if os.path.exists('../data/train_eng_fea.csv'):
# new_df.to_csv('../data/train_eng_fea.csv', mode ='a',header=False, index= False)
#else:
# new_df.to_csv('../data/train_eng_fea.csv', mode ='w', index= False)
def find_func(dist_df):
new_df = pd.DataFrame()
for index, item in tqdm(dist_df.iterrows()):
choose_list = item_df[item_df[colname1] == item[colname1]]
choose_list = choose_list[choose_list['item_create_time'.upper()] <= item['interaction_create_time'.upper()]]
if choose_list.shape[0] > 0:
latest_one = choose_list.sort_values(by=colname2).head(1)
join_dict = Merge(item.to_dict(), latest_one.to_dict())
new_df = new_df.append(pd.DataFrame.from_dict(join_dict))
return new_df
df_parts=np.array_split(train_df,20)
print(len(df_parts),type(df_parts[0]))
# with Pool(processes=8,initializer=init_process,initargs=(a,)) as pool:
with Pool(processes=4) as pool:
result_parts = pool.map(find_func,df_parts)
# pool.map(MainRange,df_parts)
result_parallel= | pd.concat(result_parts) | pandas.concat |
"""
Collection of function to pre-process the master curve and perform the Prony
series parameter identification.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize, nnls
from . import shift
"""
--------------------------------------------------------------------------------
Prony series - Domain independent functions
--------------------------------------------------------------------------------
"""
def discretize(df_master, window='round', nprony=0):
"""
Discretizes relaxation times over time or frequency axis.
Discrete relaxation times are required for Prony parameter curve fitting
routine. This function spaces the relaxation times over the experimental characterization window.
Parameters
----------
df_master : pandas.DataFrame
Contains the master curve data.
window : {'round', 'exact', 'min'}
Defines the location of the discretization of the relaxation times.
- 'exact' : Use whole window of the experimental data and logarithmically
space the relaxation times inbetween.
- 'round' : Round the minimum and maximum values of the experimental data
to the nearest base 10 number and logarithmically space the
remaining relaxation times inbetween the rounded numbers
- 'min' : Position of relaxation times is optimized during minimization
routine to reduce the number of Prony terms.
nprony : numeric, default = 0
Number of Prony terms to be used for the discretization. The number
of Prony terms and the number of relaxation times is equal. If no number
or 0 is specified, the default behavior of one Prony term per decade is
used to automatically calculate the number of Prony terms.
Returns
-------
df_dis : pandas.DataFrame
Contains discrete point, equal to the relaxation times, of the
master curve data (df_master).
References
----------
Kraus, <NAME>., and <NAME>. "Generalized collocation method using
Stiffness matrices in the context of the Theory of Linear viscoelasticity
(GUSTL)." Technische Mechanik-European Journal of Engineering Mechanics
37.1 (2017): 82-106.
"""
modul = df_master.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
stor_filt = '{}_stor_filt'.format(modul)
loss_filt = '{}_loss_filt'.format(modul)
relax_filt = '{}_relax_filt'.format(modul)
#Get relaxation times
a = 1 #[Tschoegl 1989]
#omega = (1/(a*tau)) #[Kraus 2017, Eq. 25]
_tau = 1/(a*df_master['omega'])
#Window Time Domain
if df_master.domain == 'freq':
exp_inf = int(np.floor(np.log10(_tau.iloc[0]))) #highest time domain exponent
exp_0 = int(np.ceil(np.log10(_tau.iloc[-1]))) #lowest time domain exponent
val_inf = _tau.iloc[0]
val_0 = _tau.iloc[-1]
elif df_master.domain == 'time':
exp_inf = int(np.floor(np.log10(_tau.iloc[-1]))) #highest time domain exponent
exp_0 = int(np.ceil(np.log10(_tau.iloc[0]))) #lowest time domain exponent
val_inf = _tau.iloc[-1]
val_0 = _tau.iloc[0]
decades = exp_inf - exp_0
#Space evenly on a log scale in time domain
if nprony == 0:
nprony = exp_inf - exp_0 + 1 #One prony term per decade
if window == 'round':
tau = np.flip(np.geomspace(float(10**exp_0), float(10**exp_inf), nprony))
elif window == 'exact':
tau = np.flip(np.geomspace(val_0, val_inf, nprony))
elif window == 'min':
tau = np.flip(np.geomspace(val_0, val_inf, nprony+2))[1:-1]
#Get dataframe with discretized values
omega_dis = (1/(a*tau)) #[Kraus 2017, Eq. 25]
freq_dis = omega_dis/(2*np.pi) #Convert to cycles per second [Hz]
t_dis = 1/freq_dis
if df_master.domain == 'freq':
#Interpolate E_stor and E_loss at discretization poins
E_stor_dis = np.interp(freq_dis, df_master['f'], df_master[stor_filt])
E_loss_dis = np.interp(freq_dis, df_master['f'], df_master[loss_filt])
#Estimate instantenous (E_0) and equilibrium (E_inf) modulus
E_0 = df_master[stor_filt].iloc[-1]
E_inf = df_master[stor_filt].iloc[0]
#Assembly data frame
df_dis = pd.DataFrame([freq_dis, E_stor_dis, E_loss_dis, omega_dis, tau]).T
df_dis.columns = ['f', stor, loss, 'omega', 'tau_i']
elif df_master.domain == 'time':
#Interpolate E_stor and E_loss at discretization poins
E_relax_dis = np.interp(t_dis, df_master['t'], df_master[relax_filt])
#Estimate instantenous (E_0) and equilibrium (E_inf) modulus
E_0 = df_master[relax_filt].iloc[0]
E_inf = df_master[relax_filt].iloc[-1]
#Assembly data frame
df_dis = pd.DataFrame([tau, t_dis, E_relax_dis, omega_dis, freq_dis]).T
df_dis.columns = ['tau_i', 't', relax, 'omega', 'f']
#Add df attributes
df_dis.index += 1
df_dis.nprony = nprony
df_dis.E_0 = E_0
df_dis.E_inf = E_inf
df_dis.RefT = df_master.RefT
df_dis.f_min = df_master['f'].min()
df_dis.f_max = df_master['f'].max()
df_dis.decades = decades
df_dis.domain = df_master.domain
df_dis.modul = df_master.modul
return df_dis
def plot_dis(df_master, df_dis, units):
"""
Plot relaxation times on top of master curve.
Parameters
----------
df_master : pandas.DataFrame
Contains the master curve data.
df_dis : pandas.DataFrame
Contains the discrete relaxation times and corresponding data.
units : dict of {str : str}
Contains the names of the physical quantities as key and
the corresponding names of the units as item.
Returns
-------
fig : matplotlib.pyplot.figure
Plot showing the relaxation times on top of the master curve.
"""
modul = df_master.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
if df_master.domain == 'freq':
fig, ax1 = plt.subplots()
df_master.plot(x='f', y=[stor, loss],
ax=ax1, logx=True, color=['C0', 'C1'], alpha=0.5)
df_dis.plot(x='f', y=[stor, loss], label=['tau_i', 'tau_i'], ax=ax1,
logx=True, ls='', marker='o', color=['C0', 'C1'])
ax1.set_xlabel('Frequency ({})'.format(units['f']))
ax1.set_ylabel('Storage and loss modulus ({})'.format(units[stor]))
ax1.legend()
fig.show()
return fig
elif df_master.domain == 'time':
fig, ax1 = plt.subplots()
df_master.plot(x='t', y=[relax], ax=ax1, logx=True, color=['k'])
df_dis.plot(x='t', y=[relax], label = ['tau_i'],
ax=ax1, logx=True, ls='', marker='o', color=['red'])
ax1.set_xlabel('Time ({})'.format(units['t']))
ax1.set_ylabel('Relaxation modulus ({})'.format(units[relax]))
ax1.legend()
fig.show()
return fig
def ls_res(func):
"""
Wrapper function that calculates the least squares residual.
Parameters
----------
func : function
Time domain: prony.E_relax_norm
Frequency domain: prony.E_freq_norm
Returns
-------
residual : function
Calculates least squares residual for specified domain.
"""
def residual(alpha_i, tau_i, E_meas_norm, tf_meas):
"""
Calculate least squares resdiual.
Parameters
----------
alpha_i : array-like
Normalized relaxation moduli (unitless).
tau_i : array-like
relaxation times in s.
E_meas_norm : array-like
Normalized modulus from experimental measurement data.
tf_meas : array-like
Time domain: time data of measurements in s.
Frequency domain: frequency data of measurements in Hz.
Returns
-------
numeric
Least squares residual of measurement data and curve fit data.
"""
return np.sum((E_meas_norm - func(tf_meas, alpha_i, tau_i))**2)
return residual
def split_x0(func):
"""
Wrapper that splits array x0 of the minimization routine into two arrays.
Splits the the first argument x0 into two arrays alpha_i and tau_i and
forwards both arrays to the called function. A single array x0 is necessary
to optimize both alpha_i and tau_i at the same time. However, typically,
only alpha_i is optimized and tau_i is kept constant. This wrapper allows
to use the same function in both scenarios.
Parameters
----------
func : function
Function that calculates least squares residual.
Returns
-------
split : function
See also
--------
prony.ls_res : Function to be wrapped during minimization of Prony terms.
"""
def split(*args):
alpha_i = args[0][0:int(args[0].shape[0]/2)]
tau_i = args[0][int(args[0].shape[0]/2):]
return func(alpha_i, tau_i, args[1], args[2])
return split
"""
--------------------------------------------------------------------------------
Prony series - Time domain
--------------------------------------------------------------------------------
"""
def E_relax_norm(time, alpha_i, tau_i):
"""
Calculate normalized relaxation modulus values.
Parameters
----------
time : array-like
Time in s.
alpha_i : array-like
Normalized relaxation moduli (unitless).
tau_i : array-like
relaxation times in s.
Returns
-------
numpy.ndarray
Relaxation modulus values.
"""
#Loop implementation
#-------------------
#y = np.zeros(time.shape[0])
#for i, t in enumerate(time):
# y[i] = E_0 * (1 - np.sum(alpha_i*(1-np.exp(-t/tau_i))))
#return y
#-----------------------------
#Linear algebra implementation
return 1-np.sum(alpha_i) + np.dot(alpha_i, np.exp(-time/tau_i[:,None]))
def fit_time(df_dis, df_master, opt=False):
"""
Fit Prony series parameter in time domain.
A least-squares minimization is performed using the L-BFGS-B method from
the scipy package. The implementation is similar to the optimization problem described by [1] for a homogenous distribution of discrete times.
Parameters
----------
df_dis : pandas.DataFrame
Contains the discrete relaxation times and corresponding data.
df_master : pandas.DataFrame
Contains the master curve data.
opt : bool, default = False
Flag indicates wether the Prony term minimization routine should be
executed or not.
Returns
-------
prony : dict
Contains the Prony series parameters of the fit.
References
----------
[1] <NAME>., <NAME>., <NAME>. et al. Optimal discrete-time
Prony series fitting method for viscoelastic materials. Mech Time-Depend
Mater 23, 193-206 (2019). https://doi.org/10.1007/s11043-018-9394-z
"""
m = df_dis.modul
#Initial guess: alpha_i = 1
alpha_i = np.ones(df_dis['tau_i'].values.shape)
tau_i = df_dis['tau_i'].values
#Get measurement data and normalize modul
E_meas_norm = df_master['{}_relax_filt'.format(m)].values / df_dis.E_0
time_meas = df_master['t'].values
#Define bounds
bnd_a = ((0,1),)*alpha_i.shape[0]
#Perform minimization to obtain alpha_i
res = minimize(ls_res(E_relax_norm), alpha_i,
args=(tau_i, E_meas_norm, time_meas), method='L-BFGS-B', bounds=bnd_a)
alpha_i = res.x
#Use initial fit and try to optimize both alpha_i and tau_i
if opt:
#Stack alpha_i and tau_i into single array
x0 = np.hstack((alpha_i, tau_i))
#Define bounds
tau_max = 1/(2*np.pi*df_dis.f_min)
tau_min = 1/(2*np.pi*df_dis.f_max)
bnd_t = ((tau_min, tau_max),)*alpha_i.shape[0]
bnd = bnd_a + bnd_t
#Find optimal Prony terms
res = minimize(split_x0(ls_res(E_relax_norm)), x0,
args=(E_meas_norm, time_meas), method='L-BFGS-B' , bounds=bnd)
#Print success of optimization
if res.success:
msg = 'Prony series fit N = {:02d}: Convergence criterion reached!'
print(msg.format(alpha_i.shape[0]))
else:
msg = 'Prony series fit N = {:02d}: Convergence criterion not reached!'
print(msg.format(alpha_i.shape[0]))
#Store Prony terms in dataframe
alpha_i = res.x[0:int(res.x.shape[0]/2)]
df_dis['tau_i'] = res.x[int(res.x.shape[0]/2):]
#Ensure that Sum(alpha_i) < 1 (otherwise can lead to numerical difficulties in FEM)
if alpha_i.sum() >= 1:
df_dis['alpha_i'] = 0.999/alpha_i.sum()*alpha_i #normalize to 0.999
else:
df_dis['alpha_i'] = alpha_i
#Store Prony terms in dataframe
df_prony = df_dis[['tau_i', 'alpha_i']].copy()
df_prony = df_prony.iloc[::-1].reset_index(drop=True)
df_prony.index += 1
df_prony['{}_0'.format(m)] = df_dis.E_0
df_prony['{}_i'.format(m)] = df_dis.E_0 * df_prony['alpha_i']
df_prony.RefT = df_dis.RefT
#Store Prony parameters in dictionary
prony = {'E_0':df_dis.E_0, 'df_terms':df_prony, 'f_min':df_dis.f_min,
'f_max':df_dis.f_max, 'label':'equi.', 'err' : res.fun,
'decades':df_dis.decades, 'modul':m}
return prony
"""
--------------------------------------------------------------------------------
Prony series - Frequency domain
--------------------------------------------------------------------------------
"""
def E_freq_norm(omega, alpha_i, tau_i):
"""
Calculate normalized storage and loss modulus values.
Parameters
----------
omega : array-like
Angular frequency in rad/s.
alpha_i : array-like
Normalized relaxation moduli (unitless).
tau_i : array-like
relaxation times in s.
Returns
-------
numpy.ndarray
Concatenated array of normalized storage and loss modulus values.
"""
A = (omega*tau_i[:,None])
A2 = A**2
E_stor = 1-np.sum(alpha_i) + np.dot(alpha_i, A2/(A2+1))
E_loss = np.dot(alpha_i, A/(A2+1))
return np.concatenate((E_stor, E_loss))
def fit_freq(df_dis, df_master=None, opt=False):
"""
Fit Prony series parameter in frequency domain.
A generalized collocation method using stiffness matrices is used [1].
This methods utilizes both the storage and loss modulus master curves to
estimate the Prony series parameters.
Parameters
----------
df_dis : pandas.DataFrame
Contains the discrete relaxation times and corresponding data.
df_master : pandas.DataFrame, default = None
Contains the master curve data. Only required for Prone term
minimization routine (opt = True).
opt : bool, default = False
Flag indicates wether the Prony term minimization routine should be
executed or not.
Returns
-------
prony : dict
Contains the Prony series parameters of the fit.
References
----------
[1] Kraus, <NAME>., and <NAME>. "Generalized collocation method using
Stiffness matrices in the context of the Theory of Linear viscoelasticity
(GUSTL)." Technische Mechanik-European Journal of Engineering Mechanics
37.1 (2017): 82-106.
"""
modul = df_dis.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
inst_mod = '{}_0'.format(modul)
rel_mod = '{}_i'.format(modul)
#Assembly 'K_global' matrix [Kraus 2017, Eq. 22]
N = df_dis.nprony
K_stor = np.tril(np.ones((N,N)), -1) + np.diag([0.5] * N)
K_loss = (np.diag([0.5] * N)
+ np.diag([0.1] * (N-1), 1) + np.diag([0.1] * (N-1), -1)
+ np.diag([0.01] * (N-2), 2) + np.diag([0.01] * (N-2), -2)
+ np.diag([0.001] * (N-3), 3) + np.diag([0.001] * (N-3), -3))
K_global = np.vstack([K_stor, K_loss, np.ones((1,N))])
#Estimate instantenous (E_0) and equilibrium (E_inf) modulus
E_0 = df_dis.E_0
E_inf = df_dis.E_inf
#Assembly right-hand vector
E = np.concatenate((df_dis[stor]/(E_0-E_inf),
df_dis[loss]/(E_0-E_inf),
np.array([1])))
#Solve equation system
alpha_i, err = nnls(K_global, E)
#Use initial fit and try to optimize both alpha_i and tau_i
if opt:
#Get measurement data
E_freq_meas = np.concatenate((df_master[stor]/E_0,
df_master[loss]/E_0))
omega_meas = df_master['omega'].values
#Get Prony series
tau_i = df_dis['tau_i']
x0 = np.hstack((alpha_i, tau_i))
#Define bounds
tau_max = 1/(2*np.pi*df_dis.f_min)
tau_min = 1/(2*np.pi*df_dis.f_max)
bnd_t = ((tau_min, tau_max),)*alpha_i.shape[0]
bnd_a = ((0,1),)*alpha_i.shape[0]
bnd = bnd_a + bnd_t
#Find optimal Prony terms
res = minimize(split_x0(ls_res(E_freq_norm)), x0,
args=(E_freq_meas, omega_meas), bounds=bnd, method='L-BFGS-B',
options={'maxls' : 200})
#Store Prony terms in dataframe
alpha_i = res.x[0:int(res.x.shape[0]/2)]
df_dis['tau_i'] = res.x[int(res.x.shape[0]/2):]
err = res.fun
#Print success of optimization
if res.success:
_msg = 'Prony series N = {:02d}: Convergence criterion reached!'
print(_msg.format(alpha_i.shape[0]))
else:
_msg = 'Prony series N = {:02d}: Convergence criterion not reached!'
print(_msg.format(alpha_i.shape[0]))
#Ensure that Sum(alpha_i) < 1 (otherwise can lead to numerical difficulties in FEM)
if alpha_i.sum() >= 1:
df_dis['alpha_i'] = 0.999/alpha_i.sum()*alpha_i #normalize to 0.999
else:
df_dis['alpha_i'] = alpha_i
#Store Prony terms in dataframe
df_prony = df_dis[['tau_i', 'alpha_i']].copy()
df_prony = df_prony.iloc[::-1].reset_index(drop=True)
df_prony.index += 1
df_prony[inst_mod] = E_0
df_prony[rel_mod] = E_0 * df_prony['alpha_i']
df_prony.RefT = df_dis.RefT
#Store Prony parameters in dictionary
prony = {'E_0':E_0, 'df_terms':df_prony, 'f_min':df_dis.f_min,
'f_max':df_dis.f_max, 'label':'equi.', 'err' : err,
'decades':df_dis.decades, 'modul':modul}
return prony
"""
--------------------------------------------------------------------------------
Generalized Maxwell model
--------------------------------------------------------------------------------
"""
def calc_GMaxw(E_0, df_terms, f_min, f_max, decades, modul, **kwargs):
"""
Calculate the Generalized Maxwell model data from the Prony series parameter.
Parameters
----------
E_0 : numeric
Instantaneous storage modulus. Same variable name is used for either
tensile (E_0) or shear (G_0) loading.
df_terms : pandas.DataFrame
Contains the Prony series parameters tau_i and alpha_i.
f_min : numeric
Lower bound frequency for calculation of physical quanitities.
f_max : numeric
Upper bound frequency for calculation of physical quanitities.
decades : integer
Number of decades spanning the frequency window. Is used to calculate
the necessary number of data points spanning the frequency range for
an appropriate resolution.
modul : {'E', 'G'}
Indicates wether tensile ('E') or shear ('G') modulus data are provided.
Returns
-------
df_GMaxw : pandas.DataFrame
Contains the calculated Generalized Maxwell model data for the fitted
Prony series parameters with the specified boundaries and parameters.
"""
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
comp = '{}_comp'.format(modul)
relax = '{}_relax'.format(modul)
alpha_i = df_terms['alpha_i'].values
tau_i = df_terms['tau_i'].values
#Define angular frequency range for plotting
omega_min = 2*np.pi*f_min
omega_max = 2*np.pi*f_max
omega_len = 10*decades #number of datapoints along x-axis (10 per decade)
#Define dataframe
df_GMaxw = pd.DataFrame(np.zeros((omega_len, 8)),
columns=(['f', 'omega', stor, loss, comp, 'tan_del', 't', relax]))
#Fill frequency and time axis
df_GMaxw['omega'] = np.geomspace(omega_min, omega_max, omega_len)
df_GMaxw['f'] = df_GMaxw['omega']/(2*np.pi)
df_GMaxw['t'] = 1/df_GMaxw['f']
E_inf = E_0*(1-np.sum(alpha_i))
A = (df_GMaxw['omega'].values*tau_i[:,None])
A2 = (df_GMaxw['omega'].values*tau_i[:,None])**2
df_GMaxw[stor] = E_inf + np.dot(E_0*alpha_i, A2/(A2+1))
df_GMaxw[loss] = np.dot(E_0*alpha_i, A/(A2+1))
df_GMaxw[comp] = (df_GMaxw[stor]**2 + df_GMaxw[loss]**2)**0.5
df_GMaxw['tan_del'] = df_GMaxw[loss]/df_GMaxw[stor]
#Calculate time domain
df_GMaxw[relax] = E_0 * E_relax_norm(df_GMaxw['t'].values, alpha_i, tau_i)
#Define attributes
df_GMaxw.modul = modul
return df_GMaxw
def GMaxw_temp(shift_func, df_GMaxw, df_coeff, df_aT, freq = [1E-8, 1E-4, 1E0, 1E4]):
"""
Calculate Gen. Maxwell model for different loading frequencies and temperatures.
This function showcases the temperature and rate-dependence of the visco-
elastic material. The specified shift function is used to calculate
the material response at different temperatures and different loading
rates.
Parameters
----------
shift_func : {'WLF', 'D4', 'D3', 'D2', 'D1'}
Specifies the shift function to be used for calculations.
df_GMaxw : pandas.DataFrame
Contains the Generalized Maxwell model data for the reference
temperature at different loading rates.
df_coeff : pandas.DataFrame
Contains the coefficients and parameters for the specified shift
function.
df_aT : pandas.DataFrame
Contains the shift factors. The shift factors are used to identify
the Temperature range for the calculation.
freq : array-like, default = [1E-8, 1E-4, 1E0, 1E4]
Loading frequencies for which the calculations are performed.
Returns
-------
df_GMaxw_temp
Contains the Generalized Maxwell model data for a wide range of
temperatures at the specified frequencies.
See also
--------
shift.fit_WLF : Returns WLF shift functions.
shift.fit_poly : Returns polynomial shift functions of degree 1 to 4.
"""
modul = df_GMaxw.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
df_GMaxw_temp = pd.DataFrame()
T_min = int(df_aT['T'].min())
T_max = int(df_aT['T'].max())
for f in freq:
for T in range(T_min, T_max+1):
try:
if shift_func == 'WLF':
coeff_WLF = df_coeff.values[0].tolist()
aT = 10**(-shift.WLF(T, *coeff_WLF))
elif shift_func == 'D4':
coeff_D4 = df_coeff['P4 (C)'].tolist()
aT = 10**(-shift.poly4(T, *coeff_D4))
elif shift_func == 'D3':
coeff_D3 = df_coeff['P3 (C)'].iloc[0:4].tolist()
aT = 10**(-shift.poly3(T, *coeff_D3))
elif shift_func == 'D2':
coeff_D2 = df_coeff['P2 (C)'].iloc[0:3].tolist()
aT = 10**(-shift.poly2(T, *coeff_D2))
elif shift_func == 'D1':
coeff_D1 = df_coeff['P1 (C)'].iloc[0:2].tolist()
aT = 10**(-shift.poly1(T, *coeff_D1))
f_shift = aT * df_GMaxw['f']
except OverflowError:
continue
if any(f_shift<=f) and not all(f_shift<=f):
E_stor = np.interp(f, f_shift, df_GMaxw[stor])
E_loss = np.interp(f, f_shift, df_GMaxw[loss])
E_relax = np.interp(f, f_shift, df_GMaxw[relax])
tan_del = np.interp(f, f_shift, df_GMaxw['tan_del'])
df = pd.DataFrame([[f, T, E_stor, E_loss, tan_del, E_relax]],
columns=['f', 'T', stor, loss, 'tan_del', relax])
df_GMaxw_temp = pd.concat([df_GMaxw_temp, df])
else:
continue
df_GMaxw_temp = df_GMaxw_temp.reset_index(drop=True)
df_GMaxw_temp.modul = modul
return df_GMaxw_temp
def plot_GMaxw(df_GMaxw, units):
"""
Plot Generalized Maxwell model data for the reference temperature.
Parameters
----------
df_GMaxw : pandas.DataFrame
Contains the Generalized Maxwell model data for the reference
temperature at different loading rates.
units : dict of {str : str}
Contains the names of the physical quantities as key and
the corresponding names of the units as item.
Returns
-------
fig : matplotlib.pyplot.figure
Plot of calculated storage, loss, and relaxation modulus.
"""
modul = df_GMaxw.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
fig1, ax1 = plt.subplots()
df_GMaxw.plot(x='f', y=[stor], ax=ax1, logx=True, ls='-', lw=2, color=['C0'])
df_GMaxw.plot(x='f', y=[loss], ax=ax1, logx=True, ls=':', lw=2, color=['C1'])
df_GMaxw.plot(x='f', y=[relax], ax=ax1, logx=True, ls='--', lw=2, color=['C2'])
ax1.set_xlabel('Frequency ({})'.format(units['f']))
ax1.set_ylabel('Relaxation, storage and \n loss modulus ({})'.format(units[stor]))
fig1.show()
return fig1
def plot_GMaxw_temp(df_GMaxw_temp, units):
"""
Plot Generalized Maxwell model data for varies temperature and frequencies.
Parameters
----------
df_GMaxw_temp : pandas.DataFrame
Contains the Generalized Maxwell model data for various
temperatures and different loading rates.
units : dict of {str : str}
Contains the names of the physical quantities as key and
the corresponding names of the units as item.
Returns
-------
fig : matplotlib.pyplot.figure
Plot of showing the temperature and rate dependence of the storage,
loss, and relaxation modulus.
"""
modul = df_GMaxw_temp.modul
stor = '{}_stor'.format(modul)
loss = '{}_loss'.format(modul)
relax = '{}_relax'.format(modul)
fig, ax1 = plt.subplots()
for i, (f, df) in enumerate(df_GMaxw_temp.groupby('f')):
df.plot(y=stor, x='T', ls='-', ax=ax1, label='f = {:.0e} Hz'.format(f),
c='C{}'.format(i))
df.plot(y=loss, x='T', ls=':', ax=ax1, label='', c='C{}'.format(i))
df.plot(y=relax, x='T', ls='--', ax=ax1, c='C{}'.format(i), label='')
ax1.set_xlabel('Temperature ({})'.format(units['T']))
ax1.set_ylabel('Relaxation, storage and \n loss modulus ({})'.format(units[stor]))
ax1.legend()
fig.show()
return fig
def plot_param(prony_list, labels=None):
"""
Plot illustrating the Prony series parameters of one or more fits.
Parameters
----------
prony_list : list
List of `prony` dictionaries containing the Prony series parameters.
labels : list of str
List of strings to be used as legend label names.
Returns
-------
fig : matplotlib.pyplot.figure
Plot showing the relaxation moduli over the relaxation times.
See also
--------
prony.fit : Returns the prony dictionary to be used in prony_list.
"""
df_list = []
for i, prony in enumerate(prony_list):
df = prony['df_terms'][['tau_i', 'alpha_i']].copy()
df = df.set_index('tau_i')
if labels:
df.columns = [labels[i]]
else:
df.columns = [prony['label']]
df_list.append(df)
df_bar = | pd.concat(df_list, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
class TestConvert(tm.TestCase):
def test_convert_objects(self):
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
self.assertTrue(result.dtype == np.object_)
def test_convert_objects_ints(self):
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.integer))
def test_convert_objects_complex_number(self):
for dtype in np.sctypes['complex']:
arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.complexfloating))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar(pd.Series([1])))
self.assertFalse(lib.isscalar(pd.DataFrame()))
self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
self.assertFalse(lib.isscalar(pd.Panel()))
self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
self.assertFalse(lib.isscalar(pd.Index([])))
self.assertFalse(lib.isscalar(pd.Index([1])))
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
self.assert_numpy_array_equal(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = | lib.convert_sql_column(arr) | pandas.lib.convert_sql_column |
import json
import pandas as pd
from web import *
class Stock(object):
@staticmethod
def get_realtime_stock(symbol):
output = {'symbol': symbol}
url = yahoo_api_v8_template.replace('{symbol}', symbol)
url = url.replace('{interval}', '1d')
url = url.replace('{range}', '1d')
ret, res = send_request(url)
if ret != ERR_SUCCESS:
return ret
quote = Stock.__extract_quote(json.loads(res))
output['openP'] = quote['quote'][0]['open'][len(quote['quote'][0]['open']) - 1]
output['highP'] = quote['quote'][0]['high'][len(quote['quote'][0]['high']) - 1]
output['lowP'] = quote['quote'][0]['low'][len(quote['quote'][0]['low']) - 1]
output['closeP'] = quote['quote'][0]['close'][len(quote['quote'][0]['close']) - 1]
output['volume'] = quote['quote'][0]['volume'][len(quote['quote'][0]['volume']) - 1]
output['changeP'] = (output['closeP'] - quote['chartPreviousClose']) / quote['chartPreviousClose']
output['openP'] = "{:.2f}".format(output['openP'])
output['highP'] = "{:.2f}".format(output['highP'])
output['lowP'] = "{:.2f}".format(output['lowP'])
output['closeP'] = "{:.2f}".format(output['closeP'])
output['volume'] = Stock.__human_format(output['volume'])
output['changeP'] = "{:.2%}".format(output['changeP'])
return ret, output
@staticmethod
def get_stock(symbol):
ret1, output = Stock.get_realtime_stock(symbol)
ret2 = Stock.__get_pv_avg_3mo(symbol, output)
ret3 = Stock.__get_p_range_1y(symbol, output)
if ret1 != ERR_SUCCESS or ret2 != ERR_SUCCESS or ret3 != ERR_SUCCESS:
ret = ERR_GET_STOCK_ERROR
else:
ret = ERR_SUCCESS
output['avg3mP'] = "{:.2f}".format(output['avg3mP'])
output['avg3mV'] = Stock.__human_format(output['avg3mV'])
output['strikeP1Y'] = "{0:.2f} - {1:.2f}".format(output['strikeP1Y'][0], output['strikeP1Y'][1])
return ret, output
@staticmethod
def get_notification_data(symbol):
ret1, output = Stock.get_realtime_stock(symbol)
ret2 = Stock.__get_3mo_data(symbol, output)
if ret1 != ERR_SUCCESS or ret2 != ERR_SUCCESS:
ret = ERR_GET_STOCK_ERROR
else:
ret = ERR_SUCCESS
return ret, output
@staticmethod
def __get_3mo_data(symbol, output):
url = yahoo_api_v8_template.replace('{symbol}', symbol)
url = url.replace('{interval}', '1d')
url = url.replace('{range}', '3mo')
ret, res = send_request(url)
if ret != ERR_SUCCESS:
return ret
output['data_3mo'] = Stock.__extract_quote(json.loads(res))
return ERR_SUCCESS
@staticmethod
def __get_pv_avg_3mo(symbol, output):
url = yahoo_api_v8_template.replace('{symbol}', symbol)
url = url.replace('{interval}', '1d')
url = url.replace('{range}', '3mo')
ret, res = send_request(url)
if ret != ERR_SUCCESS:
return ret
quote = Stock.__extract_quote(json.loads(res))
output['avg3mP'] = pd.Series(quote['quote'][0]['close']).mean()
output['avg3mV'] = | pd.Series(quote['quote'][0]['volume']) | pandas.Series |
import numpy as np
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
import pandas as pd
from pdpbox.pdp_calc_utils import _calc_ice_lines, _calc_ice_lines_inter, _prepare_pdp_count_data
import pytest
class TestCalcICELinesBinary(object):
def test_ice_binary(self, titanic_data, titanic_model, titanic_features):
# binary feature
grid_results, _data = _calc_ice_lines(
feature_grid=0, data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature='Sex', feature_type='binary', predict_kwds={}, data_transformer=None, unit_test=True)
expected = pd.DataFrame({0: {0: 0.527877688407898, 150: 0.8060303926467896, 300: 0.7201764583587646,
450: 0.9118255376815796, 600: 0.8612496852874756, 750: 0.9331579208374023}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
assert_array_equal(_data['Sex'].unique(), np.array([0]))
def test_ice_numeric(self, titanic_data, titanic_model, titanic_features):
# numeric feature
grid_results, _data = _calc_ice_lines(
feature_grid=10, data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature='Fare', feature_type='numeric', predict_kwds={}, data_transformer=None, unit_test=True)
expected = pd.DataFrame({10: {0: 0.10624270886182785, 150: 0.09951823949813843, 300: 0.6190056204795837,
450: 0.16398519277572632, 600: 0.7467048764228821, 750: 0.868721067905426}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
assert_array_equal(_data['Fare'].unique(), np.array([10]))
def test_ice_onehot(self, titanic_data, titanic_model, titanic_features):
# onehot encoding feature
grid_results, _data = _calc_ice_lines(
feature_grid='Embarked_C', data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature=['Embarked_C', 'Embarked_S', 'Embarked_Q'], feature_type='onehot',
predict_kwds={}, data_transformer=None, unit_test=True)
expected = pd.DataFrame(
{'Embarked_C': {0: 0.19760717451572418, 150: 0.11059149354696274, 300: 0.7139607667922974,
450: 0.2575017809867859, 600: 0.9045996069908142, 750: 0.9531968832015991}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
assert_array_equal(_data[['Embarked_C', 'Embarked_S', 'Embarked_Q']].mean().values, np.array([1, 0, 0]))
def test_ice_predict_kwds(self, titanic_data, titanic_model, titanic_features):
# with predict_kwds
grid_results, _ = _calc_ice_lines(
feature_grid=0, data=titanic_data, model=titanic_model, model_features=titanic_features, n_classes=2,
feature='Sex', feature_type='binary', predict_kwds={'ntree_limit': 10}, data_transformer=None,
unit_test=True)
expected = pd.DataFrame({0: {0: 0.5039686560630798, 150: 0.6007370352745056, 300: 0.5556174516677856,
450: 0.643494725227356, 600: 0.643494725227356, 750: 0.65798020362854}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
def test_ice_data_transformer(self, titanic_data, titanic_model, titanic_features):
# with data_transformer
def embark_change(df):
df.loc[df['Embarked_C'] == 1, 'Fare'] = 10
df.loc[df['Embarked_S'] == 1, 'Fare'] = 20
df.loc[df['Embarked_Q'] == 1, 'Fare'] = 30
return df
grid_results, _data = _calc_ice_lines(
feature_grid='Embarked_C', data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature=['Embarked_C', 'Embarked_S', 'Embarked_Q'], feature_type='onehot',
predict_kwds={}, data_transformer=embark_change, unit_test=True)
expected = pd.DataFrame(
{'Embarked_C': {0: 0.20869030058383942, 150: 0.10480280220508575, 300: 0.6179739832878113,
450: 0.18637187778949738, 600: 0.8106594085693359, 750: 0.8973860740661621}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
assert_array_equal(_data['Fare'].unique(), np.array([10.]))
def test_calc_ice_lines_regression(ross_data, ross_model, ross_features):
grid_results, _data = _calc_ice_lines(
feature_grid=1, data=ross_data, model=ross_model, model_features=ross_features, n_classes=0,
feature='SchoolHoliday', feature_type='binary', predict_kwds={}, data_transformer=None, unit_test=True)
assert_array_equal(_data['SchoolHoliday'].unique(), np.array([1]))
expected = pd.DataFrame({1: {0: 8802.910080560769, 100000: 8293.287914628107, 200000: 5352.321273982288,
300000: 5412.1717528683475, 400000: 7933.070072150073, 500000: 7520.956055932758,
600000: 5493.134809064146, 700000: 5528.43699339258, 800000: 4877.434213535265}})
assert_frame_equal(grid_results.iloc[[0, 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000]],
expected, check_like=True, check_dtype=False)
def test_calc_ice_lines_multiclass(otto_data, otto_model, otto_features):
grid_results, _data = _calc_ice_lines(
feature_grid=1, data=otto_data, model=otto_model, model_features=otto_features, n_classes=9,
feature='feat_67', feature_type='numeric', predict_kwds={}, data_transformer=None, unit_test=True)
assert len(grid_results) == 9
assert_array_equal(_data['feat_67'].unique(), np.array([1]))
expected_target_0 = pd.DataFrame({1: {0: 0.56, 10000: 0.0, 20000: 0.0, 30000: 0.01, 40000: 0.01,
50000: 0.03, 60000: 0.04}})
assert_frame_equal(grid_results[0].iloc[[0, 10000, 20000, 30000, 40000, 50000, 60000]],
expected_target_0, check_like=True, check_dtype=False)
expected_target_3 = pd.DataFrame({1: {0: 0.0, 10000: 0.04, 20000: 0.03, 30000: 0.02, 40000: 0.0,
50000: 0.0, 60000: 0.0}})
assert_frame_equal(grid_results[3].iloc[[0, 10000, 20000, 30000, 40000, 50000, 60000]],
expected_target_3, check_like=True, check_dtype=False)
expected_target_7 = pd.DataFrame({1: {0: 0.0, 10000: 0.03, 20000: 0.01, 30000: 0.02, 40000: 0.03,
50000: 0.9, 60000: 0.02}})
assert_frame_equal(grid_results[7].iloc[[0, 10000, 20000, 30000, 40000, 50000, 60000]],
expected_target_7, check_like=True, check_dtype=False)
class TestCalcICELinesInterBinary(object):
def test_ice_inter_binary_numeric(self, titanic_data, titanic_model, titanic_features):
# binary and numeric
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[0, 10], data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature_list=['Sex', 'Fare'], predict_kwds={}, data_transformer=None, unit_test=True)
assert_array_equal(np.unique(_data[['Sex', 'Fare']].values, axis=0), np.array([[0, 10]]))
expected = pd.DataFrame({'Fare': {0: 10, 150: 10, 300: 10, 450: 10, 600: 10, 750: 10},
'Sex': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'preds': {0: 0.37542039155960083, 150: 0.7539840340614319, 300: 0.6190056204795837,
450: 0.8457906246185303, 600: 0.7467048764228821, 750: 0.868721067905426}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
def test_ice_inter_binary_onehot(self, titanic_data, titanic_model, titanic_features):
# binary and onehot
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[1, 0, 1, 0], data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature_list=['Sex', 'Embarked_C', 'Embarked_S', 'Embarked_Q'], predict_kwds={},
data_transformer=None, unit_test=True)
assert_array_equal(np.unique(_data[['Sex', 'Embarked_C', 'Embarked_S', 'Embarked_Q']].values, axis=0),
np.array([[1, 0, 1, 0]]))
expected = pd.DataFrame({'Embarked_C': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'Embarked_Q': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'Embarked_S': {0: 1, 150: 1, 300: 1, 450: 1, 600: 1, 750: 1},
'Sex': {0: 1, 150: 1, 300: 1, 450: 1, 600: 1, 750: 1},
'preds': {0: 0.1041787713766098, 150: 0.09937939792871475, 300: 0.09760041534900665,
450: 0.22834216058254242, 600: 0.14207805693149567,
750: 0.8904628753662109}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
def test_ice_inter_onehot_numeric(self, titanic_data, titanic_model, titanic_features):
# onehot and numeric
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[0, 0, 1, 10], data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature_list=['Embarked_C', 'Embarked_S', 'Embarked_Q', 'Fare'], predict_kwds={},
data_transformer=None, unit_test=True)
assert_array_equal(np.unique(_data[['Embarked_C', 'Embarked_S', 'Embarked_Q', 'Fare']].values, axis=0),
np.array([[0, 0, 1, 10]]))
expected = pd.DataFrame({'Embarked_C': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'Embarked_Q': {0: 1, 150: 1, 300: 1, 450: 1, 600: 1, 750: 1},
'Embarked_S': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'Fare': {0: 10, 150: 10, 300: 10, 450: 10, 600: 10, 750: 10},
'preds': {0: 0.14132696390151978, 150: 0.09227359294891357, 300: 0.6190056204795837,
450: 0.13888351619243622, 600: 0.7998642325401306, 750: 0.8927221894264221}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
def test_ice_inter_predict_kwds(self, titanic_data, titanic_model, titanic_features):
# with predict_kwds
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[0, 10], data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2,
feature_list=['Sex', 'Fare'], predict_kwds={'ntree_limit': 10}, data_transformer=None, unit_test=True)
expected = pd.DataFrame({'Fare': {0: 10, 150: 10, 300: 10, 450: 10, 600: 10, 750: 10},
'Sex': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'preds': {0: 0.48284032940864563, 150: 0.5588331818580627, 300: 0.5346577763557434,
450: 0.5927444100379944, 600: 0.5927444100379944, 750: 0.6094712615013123}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
def test_ice_inter_data_transformer(self, titanic_data, titanic_model, titanic_features):
# with data_transformer
def embark_change(df):
df.loc[df['Embarked_C'] == 1, 'Fare'] = 10
df.loc[df['Embarked_S'] == 1, 'Fare'] = 20
df.loc[df['Embarked_Q'] == 1, 'Fare'] = 30
return df
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[1, 0, 1, 0], data=titanic_data, model=titanic_model, model_features=titanic_features,
n_classes=2, feature_list=['Sex', 'Embarked_C', 'Embarked_S', 'Embarked_Q'], predict_kwds={},
data_transformer=embark_change, unit_test=True)
assert_array_equal(_data['Fare'].unique(), np.array([20]))
expected = pd.DataFrame({'Embarked_C': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'Embarked_Q': {0: 0, 150: 0, 300: 0, 450: 0, 600: 0, 750: 0},
'Embarked_S': {0: 1, 150: 1, 300: 1, 450: 1, 600: 1, 750: 1},
'Sex': {0: 1, 150: 1, 300: 1, 450: 1, 600: 1, 750: 1},
'preds': {0: 0.11097385734319687, 150: 0.10733838379383087, 300: 0.12938366830348969,
450: 0.18971671164035797, 600: 0.11736717820167542,
750: 0.9063724279403687}})
assert_frame_equal(grid_results.iloc[[0, 150, 300, 450, 600, 750]], expected, check_like=True,
check_dtype=False)
def test_calc_ice_lines_inter_regression(ross_data, ross_model, ross_features):
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[1, 1, 0, 0, 0], data=ross_data, model=ross_model, model_features=ross_features,
n_classes=0, feature_list=['SchoolHoliday', 'StoreType_a', 'StoreType_b', 'StoreType_c', 'StoreType_d'],
predict_kwds={}, data_transformer=None, unit_test=True)
assert_array_equal(np.unique(_data[['SchoolHoliday', 'StoreType_a', 'StoreType_b', 'StoreType_c',
'StoreType_d']].values, axis=0), np.array([[1, 1, 0, 0, 0]]))
expected = pd.DataFrame({'SchoolHoliday': {0: 1, 100000: 1, 200000: 1, 300000: 1, 400000: 1,
500000: 1, 600000: 1, 700000: 1, 800000: 1},
'StoreType_a': {0: 1, 100000: 1, 200000: 1, 300000: 1, 400000: 1,
500000: 1, 600000: 1, 700000: 1, 800000: 1},
'StoreType_b': {0: 0, 100000: 0, 200000: 0, 300000: 0, 400000: 0,
500000: 0, 600000: 0, 700000: 0, 800000: 0},
'StoreType_c': {0: 0, 100000: 0, 200000: 0, 300000: 0, 400000: 0,
500000: 0, 600000: 0, 700000: 0, 800000: 0},
'StoreType_d': {0: 0, 100000: 0, 200000: 0, 300000: 0, 400000: 0,
500000: 0, 600000: 0, 700000: 0, 800000: 0},
'preds': {0: 8252.710201150694, 100000: 8293.287914628107, 200000: 5352.321273982288,
300000: 5707.464798080147, 400000: 5307.592789748846, 500000: 7520.956055932758,
600000: 5493.134809064146, 700000: 7040.829184296985, 800000: 4877.434213535266}})
assert_frame_equal(grid_results.iloc[[0, 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000]],
expected, check_like=True, check_dtype=False)
# @pytest.mark.skip(reason="slow")
def test_calc_ice_lines_inter_multiclass(otto_data, otto_model, otto_features):
grid_results, _data = _calc_ice_lines_inter(
feature_grids_combo=[1, 1], data=otto_data, model=otto_model, model_features=otto_features,
n_classes=9, feature_list=['feat_67', 'feat_32'], predict_kwds={}, data_transformer=None, unit_test=True)
assert_array_equal(np.unique(_data[['feat_67', 'feat_32']].values, axis=0), np.array([[1, 1]]))
expected = pd.DataFrame({'class_0_preds': {0: 0.55, 10000: 0.01, 20000: 0.0, 30000: 0.01,
40000: 0.01, 50000: 0.03, 60000: 0.04},
'class_1_preds': {0: 0.0, 10000: 0.72, 20000: 0.19, 30000: 0.04,
40000: 0.0, 50000: 0.0, 60000: 0.04},
'class_2_preds': {0: 0.0, 10000: 0.12, 20000: 0.7, 30000: 0.01,
40000: 0.0, 50000: 0.0, 60000: 0.02},
'class_3_preds': {0: 0.0, 10000: 0.05, 20000: 0.03, 30000: 0.02,
40000: 0.0, 50000: 0.0, 60000: 0.0},
'class_4_preds': {0: 0.0, 10000: 0.01, 20000: 0.0, 30000: 0.84,
40000: 0.0, 50000: 0.0, 60000: 0.0},
'class_5_preds': {0: 0.44, 10000: 0.03, 20000: 0.04, 30000: 0.02,
40000: 0.88, 50000: 0.0, 60000: 0.02},
'class_6_preds': {0: 0.01, 10000: 0.0, 20000: 0.03, 30000: 0.03,
40000: 0.01, 50000: 0.07, 60000: 0.01},
'class_7_preds': {0: 0.0, 10000: 0.02, 20000: 0.01, 30000: 0.02,
40000: 0.03, 50000: 0.9, 60000: 0.02},
'class_8_preds': {0: 0.0, 10000: 0.04, 20000: 0.0, 30000: 0.01,
40000: 0.07, 50000: 0.0, 60000: 0.85},
'feat_32': {0: 1, 10000: 1, 20000: 1, 30000: 1, 40000: 1, 50000: 1, 60000: 1},
'feat_67': {0: 1, 10000: 1, 20000: 1, 30000: 1, 40000: 1, 50000: 1, 60000: 1}})
assert_frame_equal(grid_results.iloc[[0, 10000, 20000, 30000, 40000, 50000, 60000]], expected,
check_like=True, check_dtype=False)
class TestPreparePDPCountData(object):
def test_count_data_binary(self, titanic_data):
# binary feature
count_data = _prepare_pdp_count_data(feature='Sex', feature_type='binary', data=titanic_data,
feature_grids=[0, 1])
expected = pd.DataFrame({'count': {0: 314, 1: 577}, 'x': {0: 0, 1: 1},
'count_norm': {0: 0.35241301907968575, 1: 0.6475869809203143}})
assert_frame_equal(count_data, expected, check_like=True, check_dtype=False)
def test_count_data_onehot(self, titanic_data):
# onehot feature
count_data = _prepare_pdp_count_data(feature=['Embarked_C', 'Embarked_S', 'Embarked_Q'], feature_type='onehot',
data=titanic_data,
feature_grids=['Embarked_C', 'Embarked_S', 'Embarked_Q'])
expected = pd.DataFrame({'count': {0: 168, 1: 646, 2: 77},
'count_norm': {0: 0.18855218855218855, 1: 0.7250280583613917, 2: 0.08641975308641975},
'index': {0: 'Embarked_C', 1: 'Embarked_S', 2: 'Embarked_Q'}, 'x': {0: 0, 1: 1, 2: 2}})
assert_frame_equal(count_data, expected, check_like=True, check_dtype=False)
def test_count_data_numeric(self, titanic_data):
# numeric feature
count_data = _prepare_pdp_count_data(
feature='Fare', feature_type='numeric', data=titanic_data,
feature_grids=np.array([0., 7.73284444, 7.8958, 8.6625, 13., 16.7, 26., 35.11111111, 73.5, 512.3292]))
expected = pd.DataFrame({'count': {0: 99, 1: 86, 2: 110, 3: 91, 4: 108, 5: 71, 6: 128, 7: 96, 8: 102},
'count_norm': {0: 0.1111111111111111, 1: 0.09652076318742986, 2: 0.12345679012345678,
3: 0.10213243546576879, 4: 0.12121212121212122, 5: 0.07968574635241302,
6: 0.143658810325477, 7: 0.10774410774410774, 8: 0.11447811447811448},
'x': {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8},
'xticklabels': {0: '[0, 7.73)', 1: '[7.73, 7.9)', 2: '[7.9, 8.66)', 3: '[8.66, 13)',
4: '[13, 16.7)', 5: '[16.7, 26)', 6: '[26, 35.11)', 7: '[35.11, 73.5)',
8: '[73.5, 512.33]'}})
assert_frame_equal(count_data, expected, check_like=True, check_dtype=False)
def test_count_data_numeric_outlier(self, titanic_data):
# numeric feature with outlier values
count_data = _prepare_pdp_count_data(
feature='Fare', feature_type='numeric', data=titanic_data,
feature_grids=np.array([7.225, 7.75, 7.9104, 9., 13., 16.1, 26., 31., 56.4958, 112.07915]))
expected = pd.DataFrame(
{'count': {0: 43, 1: 63, 2: 117, 3: 88, 4: 75, 5: 99, 6: 80, 7: 101, 8: 89, 9: 91, 10: 45},
'count_norm': {0: 0.04826038159371493, 1: 0.0707070707070707, 2: 0.13131313131313133,
3: 0.09876543209876543, 4: 0.08417508417508418, 5: 0.1111111111111111,
6: 0.08978675645342311, 7: 0.11335578002244669, 8: 0.09988776655443322,
9: 0.10213243546576879, 10: 0.050505050505050504},
'x': {0: -1, 1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9},
'xticklabels': {0: '[0, 7.22)', 1: '[7.22, 7.75)', 2: '[7.75, 7.91)', 3: '[7.91, 9)',
4: '[9, 13)', 5: '[13, 16.1)', 6: '[16.1, 26)', 7: '[26, 31)',
8: '[31, 56.5)', 9: '[56.5, 112.08)', 10: '[112.08, 512.33]'}})
| assert_frame_equal(count_data, expected, check_like=True, check_dtype=False) | pandas.testing.assert_frame_equal |
from datetime import datetime
from pickle import dump, load
import numpy as np
import pandas as pd
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.preprocessing import OneHotEncoder
import seaborn as sns
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from vbz_training import fit_neural_network, fit_regression_model, preprocess_df
import warnings
warnings.filterwarnings("ignore")
sns.set(style="ticks", color_codes=True)
def get_data_github():
filenames_suffix = [
"aa",
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
"ak",
"al",
"am",
]
reisende_raws = []
for suffix in filenames_suffix:
url = f"https://raw.githubusercontent.com/marinom27/VersusCorona/master/data/vbz_fahrgastzahlen/REISENDE_PART{suffix}.csv"
reisende_raws.append(pd.read_csv(url, sep=";", header=None, low_memory=False))
url = f"https://raw.githubusercontent.com/marinom27/VersusCorona/master/data/vbz_fahrgastzahlen/LINIE.csv"
linie = | pd.read_csv(url, sep=";") | pandas.read_csv |
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import KFold
from catboost import CatBoostRegressor
from utils import *
import argparse
from sklearn import preprocessing
import wordbatch
from wordbatch.extractors import WordBag
from wordbatch.models import FM_FTRL
class TargetEncoder:
# Adapted from https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features
def __repr__(self):
return 'TargetEncoder'
def __init__(self, cols, smoothing=1, min_samples_leaf=1, noise_level=0, keep_original=False):
self.cols = cols
self.smoothing = smoothing
self.min_samples_leaf = min_samples_leaf
self.noise_level = noise_level
self.keep_original = keep_original
@staticmethod
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def encode(self, train, test, target):
for col in self.cols:
if self.keep_original:
train[col + '_te'], test[col + '_te'] = self.encode_column(train[col], test[col], target)
else:
train[col], test[col] = self.encode_column(train[col], test[col], target)
return train, test
def encode_column(self, trn_series, tst_series, target):
temp = | pd.concat([trn_series, target], axis=1) | pandas.concat |
#coding:utf-8
from typing import Set
from scipy.optimize.optimize import main
from basic_config import *
import seaborn as sns
import pandas as pd
def hist_attr(data, attr_names, logs, outpath, col=2, indexed=True):
indexes = 'abcdefghijklmn'
attr_num = len(attr_names)
if attr_num == 0:
logging.error('No attrname stated.')
return None
if attr_num != len(logs):
logging.error('log scale list do not has same length as attr_names.')
return None
if attr_num == 1:
indexed = False
row = attr_num // col
fig, axes = plt.subplots(row, col, figsize=(col * 4.5, row * 3.5))
for i, attr_name in enumerate(attr_names):
r = i // col
c = i % col
ax = axes[r][c]
log = logs[i]
hist_one_attr(data, attr_name, ax, log=log)
xlabel = attr_name
if indexed:
xlabel += '\n(' + indexes[i] + ')'
ax.set_xlabel(xlabel)
plt.tight_layout()
plt.savefig(outpath, dpi=400)
logging.info(f'fig saved to {outpath}')
#一个属性的分布
def hist_one_attr(data, attr_name, ax, log=True):
sns.histplot(data,
x=attr_name,
ax=ax,
log_scale=log,
kde=True,
stat='probability')
def hist_indicators():
# read data
data = pd.read_csv('data/author_topic_indicators.txt')
sns.set_theme(style='ticks')
# 每位作者所有了论文的unique 主题数量
data['NUNT'] = data['UNT'] / data['productivity']
prod_list = data['productivity']
hindex_list = data['hindex']
tnc_list = data['TNC']
anc_list = data['ANC']
utn_list = data['UNT']
attr1VsALL(utn_list, 'UNT', 'unique number of topics', prod_list,
hindex_list, tnc_list, anc_list)
nutn_list = data['NUNT']
attr1VsALL(nutn_list,
'NUNT',
'normalized unique number of topics',
prod_list,
hindex_list,
tnc_list,
anc_list,
is_con=True)
data['MAX PNUOT N'] = data['MAX PNUOT'] / data['productivity']
maxPNUOT = data['MAX PNUOT N']
attr1VsALL(maxPNUOT,
'MAX PNUOT N',
'normed max(PNUOT)',
prod_list,
hindex_list,
tnc_list,
anc_list,
is_con=True)
diversity_list = data['diversity']
attr1VsALL(diversity_list,
'diversity',
'diversity',
prod_list,
hindex_list,
tnc_list,
anc_list,
is_con=True)
persistance_list = data['persistance']
attr1VsALL(persistance_list,
'persistance',
'persistance',
prod_list,
hindex_list,
tnc_list,
anc_list,
is_con=True)
def attr1VsALL(utn_list,
attrName,
attrLabel,
prod_list,
hindex_list,
tnc_list,
anc_list=None,
is_con=False):
#MAX PNUOT 与个属性之间的关系
attr1_dis(utn_list, attrName, attrLabel, is_continous=is_con)
attr1_vs_attr2(utn_list,
prod_list,
attrLabel,
'number of papers',
f'{attrName}_productivity',
set([5, 6, 8, 10, 15, 20]),
is_con=is_con)
# unique主题数量与hindex的关系
attr1_vs_attr2(utn_list,
hindex_list,
attrLabel,
'hindex',
f'{attrName}_hindex',
logX=False,
sample_set=set([5, 10, 15, 20]),
is_con=is_con)
# unique主题数量与total nummber of citations的关系
attr1_vs_attr2(utn_list,
tnc_list,
attrLabel,
'TNC',
f'{attrName}_TNC',
logX=True,
sample_set=set([5, 10, 20, 50, 100]),
is_con=is_con)
# unique主题数量与平均值的关系
attr1_vs_attr2(utn_list,
anc_list,
attrLabel,
'ANC',
f'{attrName}_ANC',
logX=True,
sample_set=set([5, 7, 8, 10, 20]),
is_con=is_con)
def histXY(utn_list, continuous):
if not continuous:
xs = []
ys = []
unique_topic_numbers_counter = Counter(utn_list)
for utn in sorted(unique_topic_numbers_counter.keys()):
xs.append(utn)
ys.append(unique_topic_numbers_counter[utn])
ys = np.array(ys) / float(np.sum(ys))
else:
ys, edges = np.histogram(utn_list, bins=7)
xs = (edges[:-1] + edges[1:]) / 2
return xs, ys
def attr1_dis(utn_list, attr_name, attr_label, logX=False, is_continous=False):
mean = np.mean(utn_list)
median = np.median(utn_list)
xs, ys = histXY(utn_list, is_continous)
plt.figure(figsize=(5, 4))
plt.plot(xs, ys, '-o')
plt.plot([mean] * 10,
np.linspace(np.min(ys) * 1.5,
np.max(ys) * 1.2, 10),
'--',
label='mean')
plt.plot([median] * 10,
np.linspace(np.min(ys) * 1.5,
np.max(ys) * 1.2, 10),
'-.',
label='median')
plt.xlabel(attr_label)
plt.ylabel('number of authors')
if logX:
plt.xscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(f'fig/{attr_name}_dis.png', dpi=400)
def attr1_vs_attr2(utn_list,
prod_list,
xlabel,
ylabel,
saveName,
sample_set=None,
logX=True,
is_con=False):
xlabelS = xlabel.replace(' ', "_")
ylabelS = ylabel.replace(" ", "_")
prod_utns = defaultdict(list)
for i, utn in enumerate(utn_list):
prod = prod_list[i]
prod_utns[prod].append(utn)
prods = []
mean_Utns = []
median_utns = []
plt.figure(figsize=(5, 4))
for prod in sorted(prod_utns.keys()):
prods.append(prod)
mean_Utns.append(np.mean(np.array(prod_utns[prod])))
median_utns.append(np.median(np.array(prod_utns[prod])))
if sample_set and prod in set([5, 6, 8, 10, 15, 20]):
xs, ys = histXY(prod_utns[prod], is_con)
ys = np.array(ys) / float(np.sum(ys))
plt.plot(xs, ys, '-o', label=f"{ylabel}={prod}")
if sample_set:
plt.legend()
plt.tight_layout()
plt.savefig(f'fig/{saveName}_FACETs.png', dpi=400)
## 随着作者文章总数量的变化
plt.figure(figsize=(5, 4))
plt.plot(prods, mean_Utns, '-o', label='mean')
plt.plot(prods, median_utns, '-^', label='median')
plt.xlabel(f'{ylabel}')
plt.ylabel(f'{xlabel}')
if logX:
plt.xscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(f'fig/{saveName}_compare.png', dpi=400)
def dynamic_attrs():
author_dynamics = json.loads(open('data/trans_dynamic_data.json').read())
poses = []
directions = []
t_intervals = defaultdict(list)
t_poses = defaultdict(list)
for author in author_dynamics.keys():
pos, direction, t = author_dynamics[author]
# for i, d in enumerate(direction):
# if d <= 1:
# directions.append(d)
# poses.append(pos[i])
directions.extend(direction)
poses.extend(pos)
if t > 15:
t = 15
t_poses[t].extend(pos)
last_i = 0
for i, p in enumerate(pos):
interval = p - last_i
t_intervals[t].append(interval)
last_i = p
print(len(poses), len(directions))
data = pd.DataFrame.from_dict({'POS': poses, 'Direction': directions})
plt.figure(figsize=(5, 4))
# plt.plot(xs, ys)
sns.histplot(data, x='POS', bins=20, kde=True)
plt.xlabel('selection position')
plt.savefig('fig/dynamic_pos.png', dpi=400)
plt.figure(figsize=(5, 4))
sns.histplot(data, x='Direction', bins=20, kde=True)
plt.tight_layout()
plt.savefig('fig/dynamic_direction.png', dpi=400)
bins_directions = defaultdict(list)
for i, pos in enumerate(poses):
direction = directions[i]
posbin = pos_bin(pos)
bins_directions[posbin].append(direction)
xs = []
ys = []
for pos in sorted(bins_directions.keys()):
xs.append(pos)
ys.append(np.mean(bins_directions[pos]))
plt.figure(figsize=(5, 4))
plt.plot(xs, ys)
plt.tight_layout()
plt.savefig('fig/bin_directions.png', dpi=400)
for t in [5, 6, 7, 8, 10, 11]:
poses = t_poses[t]
plt.figure(figsize=(5, 4))
data = pd.DataFrame.from_dict({'POS': poses})
sns.histplot(data, x='POS', kde=True)
plt.tight_layout()
plt.savefig(f'fig/{t}_pos.png', dpi=400)
for t in [5, 6, 7, 8, 10, 11, 12, 15]:
poses = t_intervals[t]
plt.figure(figsize=(5, 4))
data = | pd.DataFrame.from_dict({'interval': poses}) | pandas.DataFrame.from_dict |
from io import BytesIO
import os
from typing import Optional, Tuple
import pandas as pd
from .loader_base import MovieLensBase
class MovieLens100kDataManager(MovieLensBase):
"""The Data manager for MovieLens 100k dataset."""
@property
def DOWNLOAD_URL(self) -> str:
"http://files.grouplens.org/datasets/movielens/ml-100k.zip"
@property
def DEFAULT_PATH(self) -> str:
return os.path.expanduser("~/.ml-100k.zip")
def _read_interaction(self, byte_stream: bytes) -> pd.DataFrame:
with BytesIO(byte_stream) as ifs:
data = pd.read_csv(
ifs,
sep="\t",
header=None,
names=["user_id", "movie_id", "rating", "timestamp"],
)
data["timestamp"] = pd.to_datetime(data["timestamp"], unit="s")
return data
def load_rating_all(self) -> pd.DataFrame:
"""Load the entire rating dataset.
Returns
-------
pd.DataFrame
all the available ratings.
"""
return self._read_interaction(self.zf.read("ml-100k/u.data"))
def load_rating_predefined_split(
self,
fold: int,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Read the pre-defined train/test split.
Fold index ranges from 1 to 5.
Parameters
----------
fold : int
specifies the fold index.
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
train and test dataframes.
"""
assert fold >= 1 and fold <= 5
train_path = "ml-100k/u{}.base".format(fold)
test_path = "ml-100k/u{}.test".format(fold)
df_train = self._read_interaction(self.zf.read(train_path))
df_test = self._read_interaction(self.zf.read(test_path))
return df_train, df_test
def load_user_info(self) -> pd.DataFrame:
"""load user meta information.
Returns
-------
pd.DataFrame
user infomation
"""
user_info_bytes = self.zf.read("ml-100k/u.user")
with BytesIO(user_info_bytes) as ifs:
return pd.read_csv(
ifs,
sep="|",
header=None,
names=["user_id", "age", "gender", "occupation", "zipcode"],
)
def load_movie_info(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""load movie meta information.
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
meta-information (id, title, release_date, url) and genre (many to many)
"""
MOVIE_COLUMNS = ["movie_id", "title", "release_date", "unk", "url"]
with BytesIO(self.zf.read("ml-100k/u.genre")) as ifs:
genres = list(pd.read_csv(ifs, sep="|", header=None)[0])
with BytesIO(self.zf.read("ml-100k/u.item")) as ifs:
df_mov = pd.read_csv(
ifs,
sep="|",
encoding="latin-1",
header=None,
)
df_mov.columns = MOVIE_COLUMNS + genres
df_mov["release_date"] = | pd.to_datetime(df_mov.release_date) | pandas.to_datetime |
from __future__ import print_function, division
# MIMIC IIIv14 on postgres 9.4
import os, psycopg2, re, sys, time, numpy as np, pandas as pd
from sklearn import metrics
from datetime import datetime
from datetime import timedelta
from os.path import isfile, isdir, splitext
import argparse
import pickle as cPickle
import numpy.random as npr
import spacy
# TODO(mmd): Upgrade to python 3 and use scispacy (requires python 3.6)
import scispacy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datapackage_io_util import (
load_datapackage_schema,
load_sanitized_df_from_csv,
save_sanitized_df_to_csv,
sanitize_df,
)
from heuristic_sentence_splitter import sent_tokenize_rules
from mimic_querier import *
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SQL_DIR = os.path.join(CURRENT_DIR, 'SQL_Queries')
STATICS_QUERY_PATH = os.path.join(SQL_DIR, 'statics.sql')
CODES_QUERY_PATH = os.path.join(SQL_DIR, 'codes.sql')
NOTES_QUERY_PATH = os.path.join(SQL_DIR, 'notes.sql')
# Output filenames
static_filename = 'static_data.csv'
static_columns_filename = 'static_colnames.txt'
dynamic_filename = 'vitals_hourly_data.csv'
columns_filename = 'vitals_colnames.txt'
subjects_filename = 'subjects.npy'
times_filename = 'fenceposts.npy'
dynamic_hd5_filename = 'vitals_hourly_data.h5'
dynamic_hd5_filt_filename = 'all_hourly_data.h5'
codes_hd5_filename = 'C.h5'
notes_hd5_filename = 'notes.hdf' # N.h5
idx_hd5_filename = 'C_idx.h5'
outcome_filename = 'outcomes_hourly_data.csv'
outcome_hd5_filename = 'outcomes_hourly_data.h5'
outcome_columns_filename = 'outcomes_colnames.txt'
# SQL command params
ID_COLS = ['subject_id', 'hadm_id', 'icustay_id']
ITEM_COLS = ['itemid', 'label', 'LEVEL1', 'LEVEL2']
def add_outcome_indicators(out_gb):
subject_id = out_gb['subject_id'].unique()[0]
hadm_id = out_gb['hadm_id'].unique()[0]
icustay_id = out_gb['icustay_id'].unique()[0]
max_hrs = out_gb['max_hours'].unique()[0]
on_hrs = set()
for index, row in out_gb.iterrows():
on_hrs.update(range(row['starttime'], row['endtime'] + 1))
off_hrs = set(range(max_hrs + 1)) - on_hrs
on_vals = [0]*len(off_hrs) + [1]*len(on_hrs)
hours = list(off_hrs) + list(on_hrs)
return pd.DataFrame({'subject_id': subject_id, 'hadm_id':hadm_id,
'hours_in':hours, 'on':on_vals}) #icustay_id': icustay_id})
def add_blank_indicators(out_gb):
subject_id = out_gb['subject_id'].unique()[0]
hadm_id = out_gb['hadm_id'].unique()[0]
#icustay_id = out_gb['icustay_id'].unique()[0]
max_hrs = out_gb['max_hours'].unique()[0]
hrs = range(max_hrs + 1)
vals = list([0]*len(hrs))
return pd.DataFrame({'subject_id': subject_id, 'hadm_id':hadm_id,
'hours_in':hrs, 'on':vals})#'icustay_id': icustay_id,
def continuous_outcome_processing(out_data, data, icustay_timediff):
"""
Args
----
out_data : pd.DataFrame
index=None
Contains subset of icustay_id corresp to specific sessions where outcome observed.
data : pd.DataFrame
index=icustay_id
Contains full population of static demographic data
Returns
-------
out_data : pd.DataFrame
"""
out_data['intime'] = out_data['icustay_id'].map(data['intime'].to_dict())
out_data['outtime'] = out_data['icustay_id'].map(data['outtime'].to_dict())
out_data['max_hours'] = out_data['icustay_id'].map(icustay_timediff)
out_data['starttime'] = out_data['starttime'] - out_data['intime']
out_data['starttime'] = out_data.starttime.apply(lambda x: x.days*24 + x.seconds//3600)
out_data['endtime'] = out_data['endtime'] - out_data['intime']
out_data['endtime'] = out_data.endtime.apply(lambda x: x.days*24 + x.seconds//3600)
out_data = out_data.groupby(['icustay_id'])
return out_data
#
def fill_missing_times(df_by_sid_hid_itemid):
max_hour = df_by_sid_hid_itemid.index.get_level_values(max_hours)[0]
missing_hours = list(set(range(max_hour+1)) - set(df_by_sid_hid_itemid['hours_in'].unique()))
# Add rows
sid = df_by_sid_hid_itemid.subject_id.unique()[0]
hid = df_by_sid_hid_itemid.hadm_id.unique()[0]
icustay_id = df_by_sid_hid_itemid.icustay_id.unique()[0]
itemid = df_by_sid_hid_itemid.itemid.unique()[0]
filler = pd.DataFrame({'subject_id':[sid]*len(missing_hours),
'hadm_id':[hid]*len(missing_hours),
'icustay_id':[icustay_id]*len(missing_hours),
'itemid':[itemid]*len(missing_hours),
'hours_in':missing_hours,
'value':[np.nan]*len(missing_hours),
'max_hours': [max_hour]*len(missing_hours)})
return pd.concat([df_by_sid_hid_itemid, filler], axis=0)
def save_pop(
data_df, outPath, static_filename, pop_size_int,
static_data_schema, host=None
):
# Connect to local postgres version of mimic
# Serialize to disk
csv_fpath = os.path.join(outPath, static_filename)
save_sanitized_df_to_csv(csv_fpath, data_df, static_data_schema)
return data_df
# From Dave's approach!
def get_variable_mapping(mimic_mapping_filename):
# Read in the second level mapping of the itemids
var_map = pd.read_csv(mimic_mapping_filename, index_col=None)
var_map = var_map.ix[(var_map['LEVEL2'] != '') & (var_map['COUNT']>0)]
var_map = var_map.ix[(var_map['STATUS'] == 'ready')]
var_map['ITEMID'] = var_map['ITEMID'].astype(int)
return var_map
def get_variable_ranges(range_filename):
# Read in the second level mapping of the itemid, and take those values out
columns = [ 'LEVEL2', 'OUTLIER LOW', 'VALID LOW', 'IMPUTE', 'VALID HIGH', 'OUTLIER HIGH' ]
to_rename = dict(zip(columns, [ c.replace(' ', '_') for c in columns ]))
to_rename['LEVEL2'] = 'VARIABLE'
var_ranges = pd.read_csv(range_filename, index_col=None)
var_ranges = var_ranges[columns]
var_ranges.rename(columns=to_rename, inplace=True)
var_ranges = var_ranges.drop_duplicates(subset='VARIABLE', keep='first')
var_ranges['VARIABLE'] = var_ranges['VARIABLE'].str.lower()
var_ranges.set_index('VARIABLE', inplace=True)
var_ranges = var_ranges.loc[var_ranges.notnull().all(axis=1)]
return var_ranges
UNIT_CONVERSIONS = [
('weight', 'oz', None, lambda x: x/16.*0.45359237),
('weight', 'lbs', None, lambda x: x*0.45359237),
('fraction inspired oxygen', None, lambda x: x > 1, lambda x: x/100.),
('oxygen saturation', None, lambda x: x <= 1, lambda x: x*100.),
('temperature', 'f', lambda x: x > 79, lambda x: (x - 32) * 5./9),
('height', 'in', None, lambda x: x*2.54),
]
def standardize_units(X, name_col='itemid', unit_col='valueuom', value_col='value', inplace=True):
if not inplace: X = X.copy()
name_col_vals = get_values_by_name_from_df_column_or_index(X, name_col)
unit_col_vals = get_values_by_name_from_df_column_or_index(X, unit_col)
try:
name_col_vals = name_col_vals.str
unit_col_vals = unit_col_vals.str
except:
print("Can't call *.str")
print(name_col_vals)
print(unit_col_vals)
raise
#name_filter, unit_filter = [
# (lambda n: col.contains(n, case=False, na=False)) for col in (name_col_vals, unit_col_vals)
#]
# TODO(mmd): Why does the above not work, but the below does?
name_filter = lambda n: name_col_vals.contains(n, case=False, na=False)
unit_filter = lambda n: unit_col_vals.contains(n, case=False, na=False)
for name, unit, rng_check_fn, convert_fn in UNIT_CONVERSIONS:
name_filter_idx = name_filter(name)
needs_conversion_filter_idx = name_filter_idx & False
if unit is not None: needs_conversion_filter_idx |= name_filter(unit) | unit_filter(unit)
if rng_check_fn is not None: needs_conversion_filter_idx |= rng_check_fn(X[value_col])
idx = name_filter_idx & needs_conversion_filter_idx
X.loc[idx, value_col] = convert_fn(X[value_col][idx])
return X
def range_unnest(df, col, out_col_name=None, reset_index=False):
assert len(df.index.names) == 1, "Does not support multi-index."
if out_col_name is None: out_col_name = col
col_flat = pd.DataFrame(
[[i, x] for i, y in df[col].iteritems() for x in range(y+1)],
columns=[df.index.names[0], out_col_name]
)
if not reset_index: col_flat = col_flat.set_index(df.index.names[0])
return col_flat
# TODO(mmd): improve args
def save_numerics(
data, X, I, var_map, var_ranges, outPath, dynamic_filename, columns_filename, subjects_filename,
times_filename, dynamic_hd5_filename, group_by_level2, apply_var_limit, min_percent
):
assert len(data) > 0 and len(X) > 0, "Must provide some input data to process."
var_map = var_map[
['LEVEL2', 'ITEMID', 'LEVEL1']
].rename_axis(
{'LEVEL2': 'LEVEL2', 'LEVEL1': 'LEVEL1', 'ITEMID': 'itemid'}, axis=1
).set_index('itemid')
X['value'] = pd.to_numeric(X['value'], 'coerce')
X.astype({k: int for k in ID_COLS}, inplace=True)
to_hours = lambda x: max(0, x.days*24 + x.seconds // 3600)
X = X.set_index('icustay_id').join(data[['intime']])
X['hours_in'] = (X['charttime'] - X['intime']).apply(to_hours)
X.drop(columns=['charttime', 'intime'], inplace=True)
X.set_index('itemid', append=True, inplace=True)
# Pandas has a bug with the below for small X
#X = X.join([var_map, I]).set_index(['label', 'LEVEL1', 'LEVEL2'], append=True)
X = X.join(var_map).join(I).set_index(['label', 'LEVEL1', 'LEVEL2'], append=True)
standardize_units(X, name_col='LEVEL1', inplace=True)
if apply_var_limit > 0:
X = apply_variable_limits(X, var_ranges, 'LEVEL2')
group_item_cols = ['LEVEL2'] if group_by_level2 else ITEM_COLS
X = X.groupby(ID_COLS + group_item_cols + ['hours_in']).agg(['mean', 'std', 'count'])
X.columns = X.columns.droplevel(0)
X.columns.names = ['Aggregation Function']
data['max_hours'] = (data['outtime'] - data['intime']).apply(to_hours)
# TODO(mmd): Maybe can just create the index directly?
missing_hours_fill = range_unnest(data, 'max_hours', out_col_name='hours_in', reset_index=True)
missing_hours_fill['tmp'] = np.NaN
# TODO(mmd): The below is a bit wasteful.
#itemids = var_map.join(I['label']).reset_index()[group_item_cols].drop_duplicates()
#itemids['tmp'] = np.NaN
#missing_hours_fill = missing_hours_fill.merge(itemids, on='tmp', how='outer')
fill_df = data.reset_index()[ID_COLS].join(missing_hours_fill.set_index('icustay_id'), on='icustay_id')
fill_df.set_index(ID_COLS + ['hours_in'], inplace=True)
# Pivot table droups NaN columns so you lose any uniformly NaN.
X = X.unstack(level = group_item_cols)
X.columns = X.columns.reorder_levels(order=group_item_cols + ['Aggregation Function'])
#X = X.reset_index().pivot_table(index=ID_COLS + ['hours_in'], columns=group_item_cols, values=X.columns)
X = X.reindex(fill_df.index)
#X.columns = X.columns.droplevel(0).reorder_levels(order=[1, 0])
#if group_by_level2:
# X.columns.names = ['LEVEL2', 'Aggregation Function'] # Won't work with ungrouped!
#else:
# X.columns.names = ['itemid', 'Aggregation Function']
# X.columms = X.MultiIndex.from_frame(X[ITEM_COLS])
X = X.sort_index(axis=0).sort_index(axis=1)
print("Shape of X : ", X.shape)
# Turn back into columns
if columns_filename is not None:
col_names = [str(x) for x in X.columns.values]
with open(os.path.join(outPath, columns_filename), 'w') as f: f.write('\n'.join(col_names))
# Get the max time for each of the subjects so we can reconstruct!
if subjects_filename is not None:
np.save(os.path.join(outPath, subjects_filename), data['subject_id'].as_matrix())
if times_filename is not None:
np.save(os.path.join(outPath, times_filename), data['max_hours'].as_matrix())
#fix nan in count to be zero
idx = pd.IndexSlice
if group_by_level2:
X.loc[:, idx[:, 'count']] = X.loc[:, idx[:, 'count']].fillna(0)
else:
X.loc[:, idx[:,:,:,:, 'count']] = X.loc[:, idx[:,:,:,:, 'count']].fillna(0)
# Drop columns that have very few recordings
n = round((1-min_percent/100.0)*X.shape[0])
drop_col = []
for k in X.columns:
if k[-1] == 'mean':
if X[k].isnull().sum() > n:
drop_col.append(k[:-1])
X = X.drop(columns = drop_col)
########
if dynamic_filename is not None: np.save(os.path.join(outPath, dynamic_filename), X.as_matrix())
if dynamic_hd5_filename is not None: X.to_hdf(os.path.join(outPath, dynamic_hd5_filename), 'X')
return X
def save_notes(notes, outPath=None, notes_h5_filename=None):
notes_id_cols = list(set(ID_COLS).intersection(notes.columns))# + ['row_id'] TODO: what is row_id?
notes_metadata_cols = ['chartdate', 'charttime', 'category', 'description']
notes.set_index(notes_id_cols + notes_metadata_cols, inplace=True)
# preprocessing!!
# TODO(Scispacy)
# TODO(improve)
# TODO(spell checking)
# TODO(CUIs)
# TODO This takes forever. At the very least add a progress bar.
def sbd_component(doc):
for i, token in enumerate(doc[:-2]):
# define sentence start if period + titlecase token
if token.text == '.' and doc[i+1].is_title:
doc[i+1].sent_start = True
if token.text == '-' and doc[i+1].text != '-':
doc[i+1].sent_start = True
return doc
#convert de-identification text into one token
def fix_deid_tokens(text, processed_text):
deid_regex = r"\[\*\*.{0,15}.*?\*\*\]"
indexes = [m.span() for m in re.finditer(deid_regex,text,flags=re.IGNORECASE)]
for start,end in indexes:
processed_text.merge(start_idx=start,end_idx=end)
return processed_text
nlp = spacy.load('en_core_web_sm') # Maybe try lg model?
nlp.add_pipe(sbd_component, before='parser') # insert before the parser
disabled = nlp.disable_pipes('ner')
def process_sections_helper(section, note, processed_sections):
processed_section = nlp(section['sections'])
processed_section = fix_deid_tokens(section['sections'], processed_section)
processed_sections.append(processed_section)
def process_note_willie_spacy(note):
note_sections = sent_tokenize_rules(note)
processed_sections = []
section_frame = pd.DataFrame({'sections':note_sections})
section_frame.apply(process_sections_helper, args=(note,processed_sections,), axis=1)
return processed_sections
def text_process(sent, note):
sent_text = sent['sents'].text
if len(sent_text) > 0 and sent_text.strip() != '\n':
if '\n'in sent_text:
sent_text = sent_text.replace('\n', ' ')
note['text'] += sent_text + '\n'
def get_sentences(processed_section, note):
sent_frame = pd.DataFrame({'sents': list(processed_section['sections'].sents)})
sent_frame.apply(text_process, args=(note,), axis=1)
def process_frame_text(note):
try:
note_text = str(note['text'])
note['text'] = ''
processed_sections = process_note_willie_spacy(note_text)
ps = {'sections': processed_sections}
ps = pd.DataFrame(ps)
ps.apply(get_sentences, args=(note,), axis=1)
return note
except Exception as e:
print('error', e)
#raise e
notes = notes.apply(process_frame_text, axis=1)
if outPath is not None and notes_h5_filename is not None:
notes.to_hdf(os.path.join(outPath, notes_h5_filename), 'notes')
return notes
def save_icd9_codes(codes, outPath, codes_h5_filename):
codes.set_index(ID_COLS, inplace=True)
codes.to_hdf(os.path.join(outPath, codes_h5_filename), 'C')
return codes
def save_outcome(
data, querier, outPath, outcome_filename, outcome_hd5_filename,
outcome_columns_filename, outcome_schema, host=None
):
""" Retrieve outcomes from DB and save to disk
Vent and vaso are both there already - so pull the start and stop times from there! :)
Returns
-------
Y : Pandas dataframe
Obeys the outcomes data spec
"""
icuids_to_keep = get_values_by_name_from_df_column_or_index(data, 'icustay_id')
icuids_to_keep = set([str(s) for s in icuids_to_keep])
# Add a new column called intime so that we can easily subtract it off
data = data.reset_index()
data = data.set_index('icustay_id')
data['intime'] = pd.to_datetime(data['intime']) #, format="%m/%d/%Y"))
data['outtime'] = | pd.to_datetime(data['outtime']) | pandas.to_datetime |
import pandas as pd
import numpy as np
from io import StringIO
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.base import clone
from sklearn.metrics import accuracy_score
from itertools import combinations
import matplotlib.pyplot as plt
# for sklearn 0.18's alternative syntax
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.grid_search import train_test_split
else:
from sklearn.model_selection import train_test_split
#############################################################################
print(50 * '=')
print('Section: Dealing with missing data')
print(50 * '-')
csv_data = '''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
10.0,11.0,12.0,'''
# If you are using Python 2.7, you need
# to convert the string to unicode:
# csv_data = unicode(csv_data)
df = pd.read_csv(StringIO(csv_data))
print(df)
print('\n\nExecuting df.isnull().sum():')
print(df.isnull().sum())
#############################################################################
print(50 * '=')
print('Section: Eliminating samples or features with missing values')
print(50 * '-')
print('\n\nExecuting df.dropna()')
print(df.dropna())
print('\n\nExecuting df.dropna(axis=1)')
print(df.dropna(axis=1))
print("\n\nExecuting df.dropna(thresh=4)")
print("(drop rows that have not at least 4 non-NaN values)")
print(df.dropna(thresh=4))
print("\n\nExecuting df.dropna(how='all')")
print("(only drop rows where all columns are NaN)")
print(df.dropna(how='all'))
print("\n\nExecuting df.dropna(subset=['C'])")
print("(only drop rows where NaN appear in specific columns (here: 'C'))")
print(df.dropna(subset=['C']))
#############################################################################
print(50 * '=')
print('Section: Imputing missing values')
print(50 * '-')
imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
imr = imr.fit(df)
imputed_data = imr.transform(df.values)
print('Input Array:\n', df.values)
print('Imputed Data:\n', imputed_data)
#############################################################################
print(50 * '=')
print('Section: Handling categorical data')
print(50 * '-')
df = pd.DataFrame([['green', 'M', 10.1, 'class1'],
['red', 'L', 13.5, 'class2'],
['blue', 'XL', 15.3, 'class1']])
df.columns = ['color', 'size', 'price', 'classlabel']
print('Input Array:\n', df)
#############################################################################
print(50 * '=')
print('Section: Mapping ordinal features')
print(50 * '-')
size_mapping = {'XL': 3,
'L': 2,
'M': 1}
df['size'] = df['size'].map(size_mapping)
print('Mapping:\n', df)
inv_size_mapping = {v: k for k, v in size_mapping.items()}
df_inv = df['size'].map(inv_size_mapping)
print('\nInverse mapping:\n', df_inv)
#############################################################################
print(50 * '=')
print('Section: Encoding class labels')
print(50 * '-')
class_mapping = {label: idx for idx, label
in enumerate(np.unique(df['classlabel']))}
print('\nClass mapping:\n', class_mapping)
df['classlabel'] = df['classlabel'].map(class_mapping)
print('Mapping:\n', df)
inv_class_mapping = {v: k for k, v in class_mapping.items()}
df_inv = df['classlabel'] = df['classlabel'].map(inv_class_mapping)
print('\nInverse mapping:\n', df_inv)
class_le = LabelEncoder()
y = class_le.fit_transform(df['classlabel'].values)
print('Label encoder tansform:\n', y)
y_inv = class_le.inverse_transform(y)
print('Label encoder inverse tansform:\n', y_inv)
#############################################################################
print(50 * '=')
print('Section: Performing one hot encoding on nominal features')
print(50 * '-')
X = df[['color', 'size', 'price']].values
color_le = LabelEncoder()
X[:, 0] = color_le.fit_transform(X[:, 0])
print("Input array:\n", X)
ohe = OneHotEncoder(categorical_features=[0])
X_onehot = ohe.fit_transform(X).toarray()
print("Encoded array:\n", X_onehot)
df_dummies = | pd.get_dummies(df[['price', 'color', 'size']]) | pandas.get_dummies |
import pandas as pd
import pytest
from powersimdata.input.tests.test_helpers import check_dataframe_matches
from powersimdata.tests.mock_scenario import MockScenario
from pytest import approx
from postreise.analyze.generation.capacity import (
calculate_net_load_peak,
calculate_NLDC,
get_capacity_by_resources,
get_capacity_factor_time_series,
get_storage_capacity,
sum_capacity_by_type_zone,
)
mock_plant = {
"plant_id": [101, 102, 103],
"type": ["solar", "wind", "wind"],
"Pmax": [9000, 5000, 4000],
"zone_name": ["Washington", "Washington", "Oregon"],
"zone_id": [201, 201, 202],
}
mock_bus = {
"bus_id": [1, 2, 3, 4],
"zone_id": [201, 201, 202, 202],
}
mock_storage = {
"bus_id": [1, 2, 3],
"Pmax": [10, 10, 10],
}
mock_demand = pd.DataFrame(
{
"201": [
133335,
133630,
131964,
133614,
134298,
136032,
136260,
133757,
129943,
133440,
135238,
135242,
133018,
132799,
133861,
133275,
130403,
]
}
)
mock_pg = pd.DataFrame(
{
101: [
6459,
4084,
1015,
8004,
7373,
6161,
3999,
909,
40,
7332,
6112,
3725,
795,
7188,
6082,
3786,
838,
],
102: [
2205,
2757,
3190,
603,
1402,
1838,
1948,
2478,
3186,
1559,
1752,
2033,
2400,
1352,
2160,
2472,
3217,
],
103: [
2206,
2758,
3191,
603,
1402,
1838,
1949,
2478,
3187,
1560,
1752,
2034,
2401,
1352,
2160,
2472,
3217,
],
}
)
scenario = MockScenario(
grid_attrs={"plant": mock_plant, "bus": mock_bus, "storage_gen": mock_storage},
demand=mock_demand,
pg=mock_pg,
)
scenario.info["start_date"] = "2016-01-01 00:00:00"
scenario.info["end_date"] = "2016-01-01 10:00:00"
scenario.state.grid.zone2id = {
"Washington": 201,
"Oregon": 202,
}
def test_NLDC_calculation_wind_str(): # noqa: N802
assert calculate_NLDC(scenario, "wind", 10) == approx(3496.1)
def test_NLDC_calculation_wind_set(): # noqa: N802
assert calculate_NLDC(scenario, {"wind"}, 10) == approx(3496.1)
def test_NLDC_calculation_wind_tuple(): # noqa: N802
assert calculate_NLDC(scenario, ("wind",), 10) == approx(3496.1)
def test_NLDC_calculation_wind_list(): # noqa: N802
assert calculate_NLDC(scenario, ["wind"], 10) == approx(3496.1)
def test_NLDC_calculation_wind_5_hour(): # noqa: N802
assert calculate_NLDC(scenario, {"wind"}, hours=5) == approx(3343)
def test_NLDC_calculation_solar(): # noqa: N802
assert calculate_NLDC(scenario, {"solar"}, 10) == approx(3720)
def test_NLDC_calculation_wind_solar(): # noqa: N802
assert calculate_NLDC(scenario, ["wind", "solar"], 10) == approx(8478.9)
def test_NLDC_calculation_solar_wind(): # noqa: N802
assert calculate_NLDC(scenario, ["solar", "wind"], 10) == approx(8478.9)
def test_calculate_net_load_peak_solar():
assert calculate_net_load_peak(scenario, {"solar"}, 10) == approx(2535.2)
def test_calculate_net_load_peak_solar_5():
assert calculate_net_load_peak(scenario, {"solar"}, 5) == approx(2088.6)
def test_calculate_net_load_peak_wind():
assert calculate_net_load_peak(scenario, {"wind"}, 10) == approx(3370.8)
def test_calculate_net_load_peak_wind_5():
assert calculate_net_load_peak(scenario, {"wind"}, 5) == approx(3017.4)
def test_calculate_net_load_peak_solar_wind():
capacity_value = calculate_net_load_peak(scenario, {"solar", "wind"}, 10)
assert capacity_value == approx(8211.5)
def test_calculate_net_load_peak_solar_wind_5():
capacity_value = calculate_net_load_peak(scenario, {"solar", "wind"}, 5)
assert capacity_value == approx(7397.2)
def test_failure_scenario_type():
with pytest.raises(TypeError):
calculate_net_load_peak("scenario", ["solar", "wind"], hours=10)
def test_failure_resources_type_dict():
with pytest.raises(TypeError):
calculate_net_load_peak(scenario, {"solar": "wind"}, hours=10)
def test_failure_hours_type():
with pytest.raises(TypeError):
calculate_net_load_peak(scenario, ["solar", "wind"], hours=10.0)
def test_failure_no_resources_present():
with pytest.raises(ValueError):
calculate_net_load_peak(scenario, ["geothermal"], hours=10)
def test_failure_one_resource_not_present():
with pytest.raises(ValueError):
calculate_net_load_peak(scenario, ["wind", "geothermal"], 10)
def test_failure_no_resources():
with pytest.raises(ValueError):
calculate_net_load_peak(scenario, [], 10)
def test_failure_zero_hours():
with pytest.raises(ValueError):
calculate_net_load_peak(scenario, ["solar"], hours=0)
def test_failure_too_many_hours():
with pytest.raises(ValueError):
calculate_net_load_peak(scenario, ["solar"], hours=100)
def test_get_capacity_by_resources():
arg = [(scenario, "Oregon", "wind"), (scenario, "all", "wind")]
expected = [4000, 9000]
for a, e in zip(arg, expected):
assert get_capacity_by_resources(*a).values == e
def test_get_storage_capacity():
arg = [(scenario, "Washington"), (scenario, "all")]
expected = [20, 30]
for a, e in zip(arg, expected):
assert get_storage_capacity(*a) == e
def test_sum_capacity_by_type_zone():
expected_df = pd.DataFrame(
{201: [9000, 5000], 202: [0, 4000]},
index=["solar", "wind"],
)
check_dataframe_matches(expected_df, sum_capacity_by_type_zone(scenario))
def test_get_capacity_factor_time_series():
expected_df = | pd.DataFrame({101: mock_pg[101] / 9000}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# custom
import data_processing as dp
dfs, sh_int, fin_sh = dp.load_stocks(stocks=None, TAs=False, finra_shorts=False, short_interest=False, earliest_date=None)
# full_df = pd.concat([dfs[s] for s in dfs.keys()])
stocks = ['LNG', 'CHK', 'AMD']
small_df = pd.concat(dfs[s] for s in stocks)
abbrev_df = small_df[['Ticker', 'Adj_Close']]
table = abbrev_df.pivot(columns='Ticker')
table_monthly = table.resample('MS').first()#, closed='left')
# daily returns of stocks
returns_daily = table.pct_change()
# calculate monthly returns of the stocks
returns_monthly = table_monthly.pct_change()
# calculate monthly moving average of stocks
ewma_daily = returns_daily.ewm(span=30).mean()
ewma_monthly = ewma_daily.resample('MS').first()
# daily covariance of stocks (for each monthly period)
covariances = {}
for i in returns_monthly.index:
rtd_idx = returns_daily.index
mask = (rtd_idx.month == i.month) & (rtd_idx.year == i.year)
covariances[i] = returns_daily[mask].cov()
# empty dictionaries to store returns, volatility and weights of imiginary portfolios
port_returns = {}
port_volatility = {}
stock_weights = {}
sharpe_ratio = {}
max_sharpe = {}
# set the number of combinations for imaginary portfolios
num_assets = len(stocks)
num_portfolios = 5000
# get portfolio performances at each month
# populate the empty lists with each portfolios returns,risk and weights
for date in covariances.keys():
print(date)
cov = covariances[date]
for single_portfolio in range(num_portfolios):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
returns = np.dot(weights, returns_monthly.loc[date])
volatility = np.sqrt(np.dot(weights.T, np.dot(cov, weights)))
port_returns.setdefault(date, []).append(returns)
port_volatility.setdefault(date, []).append(volatility)
stock_weights.setdefault(date, []).append(weights)
sharpe = returns / volatility
sharpe_ratio.setdefault(date, []).append(sharpe)
max_sharpe[date] = np.argmax(sharpe_ratio[date])
# make features and targets
targets = []
features = []
for date in covariances.keys():
best_idx = max_sharpe[date]
targets.append(stock_weights[date][best_idx])
features.append(ewma_monthly.loc[date].values)
targets = np.array(targets)
features = np.array(features)
feat_dict = {'feature_' + str(i): features[:, i] for i in range(features.shape[1])}
targ_dict = {'target_' + str(i): targets[:, i] for i in range(targets.shape[1])}
feat_targ_df = | pd.DataFrame({**feat_dict, **targ_dict}) | pandas.DataFrame |
import importlib
import os
from pathlib import Path
import pandas as pd
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from livelossplot.inputs.keras import PlotLossesCallback
from sklearn.model_selection import KFold
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.layers import Conv2D, MaxPool2D, Flatten
from tensorflow.python.keras.models import Sequential
MODELS_FILE_DIR = Path(__file__).resolve().parent
MODELS_JSON_FILE_PATH = os.path.join(MODELS_FILE_DIR, 'models.json')
def get_vgg_block(num_blocks=1, input_shape=(150, 150, 3),
padding='same'):
assert 4 > num_blocks > 0, f'Number of blocks should be in range 1 and 3'
model = Sequential()
dropout_list = [.2, .2, .3, .3]
filter_list = [32, 64, 128, 128]
def dropout_value(index):
return dropout_list[index] if index <= len(dropout_list) else dropout_list[-1]
def filter_value(index):
return filter_list[index] if index <= len(filter_list) else filter_list[-1]
for i in range(num_blocks):
if i == 0:
model.add(Conv2D(filters=filter_value(i), kernel_size=3,
activation='relu', input_shape=input_shape,
padding=padding)
)
else:
model.add(Conv2D(filters=filter_value(i),
kernel_size=3, activation='relu', padding=padding))
# model.add(Conv2D(filters=filter_value(i), kernel_size=3,
# activation='relu', padding=padding))
model.add(MaxPool2D(pool_size=2, strides=2))
model.add(Dropout(dropout_value(i)))
model.add(Flatten())
return model
class CNNModel:
def __init__(self, model_name, weights='imagenet', input_shape=(224, 224, 3),
optimizer=Adam(), loss='categorical_crossentropy', metrics=None):
"""
Constructor method
:param model_name: Base model name
:param weights: Weights of the model, initialized to imagenet
"""
if metrics is None:
metrics = ['accuracy']
self.model_name = model_name
self.metrics = metrics
self.weights = weights
self.input_shape = input_shape
self.model = None
self.loss = loss
self.optimizer = optimizer
self.preprocessing_function = None
def _get_base_module(self, model_name):
"""
Get the base model based on the base model name
:param model_name: Base model name
:return: Base models' library
"""
import json
with open(MODELS_JSON_FILE_PATH) as model_json_file:
models = json.load(model_json_file)
if model_name not in models.keys():
raise Exception(f"Invalid model name, should have one of the value {models.keys()}")
self.base_model_name = models[model_name]['model_name']
model_package = models[model_name]['model_package']
print(f"{model_package}.{self.base_model_name}")
self.base_module = importlib.import_module(model_package)
def build(self):
"""
Build the CNN model for Neural Image Assessment
"""
# Load pre trained model
base_cnn = getattr(self.base_module, self.base_model_name)
self.preprocessing_function = getattr(self.base_module, 'preprocess_input')
self.model_name = base_cnn(input_shape=self.input_shape, weights=self.weights,
pooling='avg', include_top=False)
return self.model
def compile(self):
"""
Compile the Model
"""
self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)
def summary(self):
self.model.summary()
def get_preprocess_input(self):
return self.preprocessing_function
def train_model_from_dataframe(self, df, img_directory, model, x_col, y_col, monitor='val_accuracy',
weight_prefix=None, weights_dir=None, class_mode='category',
batch_size=32, epochs=25, verbose=0):
# Assign current directory if no directory passed to save weights
if weights_dir is None:
weights_dir = os.path.join(os.getcwd(), 'weights')
# create directory if not exists
if not os.path.isdir(weights_dir):
os.mkdir(weights_dir)
else:
assert (os.path.isdir(weights_dir), 'Invalid directory ' + weights_dir)
train_result_df = []
target_size = (self.input_shape[0], self.input_shape[1])
# Take a 5 fold cross validation
cv = KFold(n_splits=5, shuffle=True, random_state=1024)
fold = 1
# Loop for each fold
for train_index, val_index in cv.split(df[x_col]):
train_df, val_df = df.iloc[train_index], df.iloc[val_index]
# Define Generators
train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True,
vertical_flip=True)
train_gen = train_datagen.flow_from_dataframe(train_df, directory=img_directory,
x_col=x_col, y_col=y_col,
batch_size=batch_size, class_mode=class_mode,
target_size=target_size,
preprocessing_function=self.preprocessing_function)
valid_gen = train_datagen.flow_from_dataframe(val_df, directory=img_directory,
x_col=x_col, y_col=y_col,
batch_size=batch_size, class_mode=class_mode,
target_size=target_size,
preprocessing_function=self.preprocessing_function)
# compile model
model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)
# Define the callbacks
es = EarlyStopping(monitor=monitor, patience=4)
weight_prefix = weight_prefix if weight_prefix is not None else self.base_model_name
weight_filepath = os.path.join(weights_dir, f'{weight_prefix}_weight_best_fold_{fold}.hdf5')
print(f'\tModel Weight file : {weight_filepath}')
mckpt = ModelCheckpoint(
filepath=weight_filepath,
save_weights_only=True,
monitor=monitor,
mode="max",
save_best_only=True,
)
lr = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1)
plot_loss = PlotLossesCallback()
# start training
history = model.fit(train_gen, validation_data=valid_gen,
epochs=epochs, callbacks=[es, mckpt, lr, plot_loss],
verbose=verbose)
result_df = pd.DataFrame(history.history)
result_df['fold'] = fold
train_result_df.append(result_df)
fold += 1
return | pd.concat(train_result_df) | pandas.concat |
import pandas as pd
import pytest
from evalml.exceptions import MethodPropertyNotFoundError
from evalml.pipelines.components import (
ComponentBase,
FeatureSelector,
RFClassifierSelectFromModel,
RFRegressorSelectFromModel
)
def make_rf_feature_selectors():
rf_classifier = RFClassifierSelectFromModel(
number_features=5,
n_estimators=10,
max_depth=7,
percent_features=0.5,
threshold=0,
)
rf_regressor = RFRegressorSelectFromModel(
number_features=5,
n_estimators=10,
max_depth=7,
percent_features=0.5,
threshold=0,
)
return rf_classifier, rf_regressor
def test_init():
rf_classifier, rf_regressor = make_rf_feature_selectors()
assert rf_classifier.name == "RF Classifier Select From Model"
assert rf_regressor.name == "RF Regressor Select From Model"
def test_component_fit(X_y_binary, X_y_multi, X_y_regression):
X_binary, y_binary = X_y_binary
X_multi, y_multi = X_y_multi
X_reg, y_reg = X_y_regression
rf_classifier, rf_regressor = make_rf_feature_selectors()
assert isinstance(rf_classifier.fit(X_binary, y_binary), ComponentBase)
assert isinstance(rf_classifier.fit(X_multi, y_multi), ComponentBase)
assert isinstance(rf_regressor.fit(X_reg, y_reg), ComponentBase)
def test_feature_selector_missing_component_obj():
class MockFeatureSelector(FeatureSelector):
name = "Mock Feature Selector"
def fit(self, X, y):
return self
mock_feature_selector = MockFeatureSelector()
mock_feature_selector.fit(pd.DataFrame(), pd.Series())
with pytest.raises(MethodPropertyNotFoundError, match="Feature selector requires a transform method or a component_obj that implements transform"):
mock_feature_selector.transform(pd.DataFrame())
with pytest.raises(MethodPropertyNotFoundError, match="Feature selector requires a transform method or a component_obj that implements transform"):
mock_feature_selector.fit_transform(pd.DataFrame())
def test_feature_selector_component_obj_missing_transform():
class MockFeatureSelector(FeatureSelector):
name = "Mock Feature Selector"
def __init__(self):
self._component_obj = None
def fit(self, X, y):
return self
mock_feature_selector = MockFeatureSelector()
mock_feature_selector.fit( | pd.DataFrame() | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype,
)
from pandas.core.index import _ensure_index
from pandas.core.base import DataError
from modin.error_message import ErrorMessage
from modin.engines.base.block_partitions import BaseBlockPartitions
class PandasQueryCompiler(object):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self,
block_partitions_object: BaseBlockPartitions,
index: pandas.Index,
columns: pandas.Index,
dtypes=None,
):
assert isinstance(block_partitions_object, BaseBlockPartitions)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
"""By default, constructor method will invoke an init"""
return type(self)(block_paritions_object, index, columns, dtypes)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
map_func = self._prepare_method(lambda df: df.dtypes)
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
self._dtype_cache = self.data.full_reduce(map_func, dtype_builder, 0)
self._dtype_cache.index = self.columns
elif not self._dtype_cache.index.equals(self.columns):
self._dtype_cache.index = self.columns
return self._dtype_cache
def _set_dtype(self, dtypes):
self._dtype_cache = dtypes
dtypes = property(_get_dtype, _set_dtype)
# These objects are currently not distributed.
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _validate_set_axis(self, new_labels, old_labels):
new_labels = _ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = _ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = _ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
# END Index and columns objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix):
new_column_names = self.columns.map(lambda x: str(prefix) + str(x))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
def add_suffix(self, suffix):
new_column_names = self.columns.map(lambda x: str(x) + str(suffix))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(), self.index.copy(), self.columns.copy(), self._dtype_cache
)
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if isinstance(other, list):
return self._join_list_of_managers(other, **kwargs)
else:
return self._join_query_compiler(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
# Concatenating two managers requires aligning their indices. After the
# indices are aligned, it should just be a simple concatenation of the
# `BaseBlockPartitions` objects. This should not require remote compute.
joined_axis = self._join_index_objects(
axis,
[other.columns if axis == 0 else other.index for other in others],
join,
sort=sort,
)
# Since we are concatenating a list of managers, we will align all of
# the indices based on the `joined_axis` computed above.
to_append = [other.reindex(axis ^ 1, joined_axis).data for other in others]
new_self = self.reindex(axis ^ 1, joined_axis).data
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_query_compiler(self, other, **kwargs):
assert isinstance(
other, type(self)
), "This method is for data manager objects only"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(1, other.index, how, sort=sort)
to_join = other.reindex(0, joined_index).data
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# We are using proxy DataFrame objects to build the columns based on
# the `lsuffix` and `rsuffix`.
self_proxy = pandas.DataFrame(columns=self.columns)
other_proxy = pandas.DataFrame(columns=other.columns)
new_columns = self_proxy.join(
other_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of DataManager objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(
1, [other.index for other in others], how, sort=sort
)
to_join = [other.reindex(0, joined_index).data for other in others]
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
self_proxy = pandas.DataFrame(columns=self.columns)
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
joined_index = self._join_index_objects(1, other.index, how_to_join, sort=False)
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
reindexed_other = other.reindex(0, joined_index).data
reindexed_self = self.reindex(0, joined_index).data
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, self_cols, other_cols, func):
left.columns = self_cols
right.columns = other_cols
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1,
lambda l, r: inter_data_op_builder(l, r, self_cols, other_cols, func),
reindexed_other,
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
"""
axis = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from openpyxl import load_workbook
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import plotly.graph_objs as go
import plotly.io as pio
from tabulate import tabulate
def border_msg(msg):
row = len(msg)
h = ''.join(['+'] + ['-' *row] + ['+'])
result= h + '\n'"|"+msg+"|"'\n' + h
print(result)
def striplist(l):
return([x.strip() for x in l])
def transpose(dataframe,condition): #FUNCTION THAT TRANSPOSES THE DATAFRMES CONDITION=1 (ONLY FOR TOP AGES DATAFRAME)
if condition==1:
dataframe = dataframe.iloc[:,1:].copy()
else:
dataframe = dataframe.iloc[:,:].copy()
dataframe = dataframe.T
return dataframe
def print_dataframes_education(regions,dataframe):
border_msg('EDUCATIONAL STATS')
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
regions_labels_list = list(regions['Regions']) #LIST WITH THE REGIONS THAT EXIST IN FB PAGE
print(regions_labels_list)
region_df = pd.DataFrame() #NEW DF TO STORE THE FORMATED DATA
region_df['Region']=dataframe['Region']
region_df['Gender']=dataframe['Gender']
region_df['Total']=dataframe['Total']
#print(dataframe)
#HIGHER EDUCATION = SUM OF PEOPLE THAT ARE IN PHD+MASTERS+BACHERLOR+ATEI LEVEL
#MIDDLE EDUCATION = SUM OF PEOPLE THAT ARE IN UPRS+IEK+HIGH SCHOOL+ VUPS+VPS LEVEL
#LOWER EDUCATION = SUM OF PEOPLE THAT ARE IN MIDDLE SCHOOL + ELEMENTARY SCHOOL + ABANDONED +ILITERATE LEVEL
higher_edu = (dataframe['PhD']+dataframe['Masters']+dataframe['Bachelor']+dataframe['Technological Educational Institute'])
middle_edu = (dataframe['Upper Vocational Private Schools']+dataframe['Institute Vocational Training - IEK']+dataframe['High School']+dataframe['Vocational upper secondary schools']+dataframe['Vocational Private Schools'])
lower_edu = (dataframe['Middle School\n']+dataframe['Elementary School']+dataframe['Abandoned Elementary - Can Read Write']+dataframe['Illeterate'])
#CALCULATE THE PERCENTS OF EACH CATEGORY
percen_higher = ((higher_edu/dataframe['Total'])*100).round(2)
percen_middle = ((middle_edu/dataframe['Total'])*100).round(2)
percen_lower = ((lower_edu/dataframe['Total'])*100).round(2)
#FORMATION IN DF : VALUE--[PERCENT OF VALUE%]
region_df['Higher Education']= (higher_edu.map(str)+"--["+percen_higher.map(str)+ "%]")
region_df['Middle Education']= (middle_edu.map(str)+"--["+percen_middle.map(str)+ "%]")
region_df['Lower Education']= (lower_edu.map(str)+"--["+percen_lower.map(str)+ "%]")
#PERCENTAGES WILL BE USED IN FANS PERCENTAGE CORELLATION
region_df['Higher Education Percentage[%]']= percen_higher
region_df['Middle Education Percentage[%]']= percen_middle
region_df['Lower Education Percentage[%]']= percen_lower
#ALL THE REGIONS IN GREECE THAT FB CAN USE. UNUSED
#print(regions_labels_list)
#print(region_df)
#print(region_df.loc[region_df['Region']=='Eastern Macedonia and Thrace, Greece',['Gender','Total','Higher Education','Middle Education','Lower Education']])
for item in regions_labels_list: #item is a region of the region_list
print("\n\n")
print(50*"--")
#PRINT REGION
print(item)
#PRINT THE STATS OF ELSTAT FOR THIS REGION
print(region_df.loc[region_df['Region']==item,['Gender','Total','Higher Education','Middle Education','Lower Education']])
print("\nFacebook Page Correlation:\n")
#ADD A LEADING WHITESPACE TO THE NAME INORDER TO BE MATCHED WITH THE INDEXES OF THE REGIONS DF
#region = ""+item
value = regions.loc[regions['Regions']==item]['Fans'].item()
#PASS THE VALUES OF MALES AND FEMALES FOR HIGHER, MID, LOWER EDUCATION
higher_percent_male = region_df.loc[region_df['Region']==item,['Higher Education Percentage[%]']].values[0]
higher_percent_female = region_df.loc[region_df['Region']==item,['Higher Education Percentage[%]']].values[1]
middle_percent_male = region_df.loc[region_df['Region']==item,['Middle Education Percentage[%]']].values[0]
middle_percent_female = region_df.loc[region_df['Region']==item,['Middle Education Percentage[%]']].values[1]
lower_percent_male = region_df.loc[region_df['Region']==item,['Lower Education Percentage[%]']].values[0]
lower_percent_female = region_df.loc[region_df['Region']==item,['Lower Education Percentage[%]']].values[1]
#PRETTY PRINTING
print("Higher Edu: Male={a}\t Female={b}\t".format(
a=(value*higher_percent_male/100).round(2),
b=(value*higher_percent_female/100).round(2),
))
print("Middle Edu: Male={a}\t Female={b}\t".format(
a=(value*middle_percent_male/100).round(2),
b=(value*middle_percent_female/100).round(2),
))
print("Lower Edu: Male={a}\t Female={b}\t".format(
a=(value*lower_percent_male/100).round(2),
b=(value*lower_percent_female/100).round(2),
))
border_msg('END OF EDUCATIONAL STATS')
def print_dataframe_age_occupation(regions,dataframe):
border_msg('AGE AND OCCUPATIONAL STATS')
#LOAD AGE DATAFRAME
age_dataframe = | pd.read_excel('excels/lite/lite-Ages-Gender.xlsx') | pandas.read_excel |
import json
import os
import sqlite3
import pyAesCrypt
import pandas
from os import stat
from datetime import datetime
import time
import numpy
# Global variables for use by this file
bufferSize = 64*1024
password = os.environ.get('ENCRYPTIONPASSWORD')
# py -c 'import databaseAccess; databaseAccess.reset()'
def reset():
resetActivities()
resetSplits()
# py -c 'import databaseAccess; databaseAccess.resetActivities()'
def resetActivities():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS activities;')
conn.commit()
conn.close()
encryptDatabase()
# py -c 'import databaseAccess; databaseAccess.resetSplits()'
def resetSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS splits;')
conn.commit()
conn.close()
encryptDatabase()
def getLastDate():
decryptDatabase()
lastActivityDate = '1970-01-01T00:00:00Z'
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
if result is not None:
# There is data, so let's grab the max datetime
cur.execute("SELECT MAX(start_date_local) FROM activities;")
result = cur.fetchone()
if result is not None:
# Found a max date
lastActivityDate, = result
conn.commit()
conn.close()
encryptDatabase()
return lastActivityDate
def setConfig(strava_tokens):
decryptDatabase()
print('Lets put the tokens into the database')
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS config;')
cur.execute('CREATE TABLE config (token_type VARCHAR, access_token VARCHAR, expires_at BIGINT, expires_in INT, refresh_token VARCHAR);')
cur.execute('INSERT INTO config (token_type, access_token, expires_at, expires_in, refresh_token) values (?, ?, ?, ?, ?);', (strava_tokens['token_type'], strava_tokens['access_token'], strava_tokens['expires_at'], strava_tokens['expires_in'], strava_tokens['refresh_token']))
conn.commit()
conn.close()
encryptDatabase()
def getConfig():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('SELECT * FROM config')
rows = cur.fetchall()
conn.commit()
conn.close()
encryptDatabase()
return json.loads(json.dumps( [dict(ix) for ix in rows] ))[0]
# Must be called to access the database, otherwise it can't be read
# py -c 'import databaseAccess; databaseAccess.decryptDatabase()'
def decryptDatabase():
if os.path.exists('strava_temp.sqlite'):
print('Database already decrypted! Skipping. . .')
else:
if os.path.exists('strava.sqlite'):
encFileSize = stat('strava.sqlite').st_size
with open('strava.sqlite', 'rb') as fIn:
with open('strava_temp.sqlite', 'wb') as fOut:
pyAesCrypt.decryptStream(fIn, fOut, password, bufferSize, encFileSize)
else:
print('Unable to find database to decrypt! Skipping. . .')
# Always call this after you touch the database to re-encrypt it
def encryptDatabase():
if os.path.exists('strava_temp.sqlite'):
if os.path.exists('strava.sqlite'):
os.remove('strava.sqlite')
with open('strava_temp.sqlite', 'rb') as fIn:
with open('strava.sqlite', 'wb') as fOut:
pyAesCrypt.encryptStream(fIn, fOut, password, bufferSize)
if os.path.exists('strava_temp.sqlite'):
os.remove('strava_temp.sqlite')
else:
print('Unable to find database to encrypt, skipping...')
def setActvities(activities):
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS activities (id BIGINT, name NVARCHAR, upload_id BIGINT, type VARCHAR, distance NUMERIC, moving_time INT, average_speed NUMERIC, max_speed NUMERIC, total_elevation_gain NUMERIC, start_date_local DATETIME, average_cadence NUMERIC, average_watts NUMERIC, average_heartrate NUMERIC, UNIQUE(id));')
conn.commit()
for _, currentActivity in activities.iterrows():
acitivityName = currentActivity['name']
activityId = currentActivity['id']
print(f'Insert activity id [{activityId}], [{acitivityName}] to database')
cur.execute('INSERT OR IGNORE INTO activities (id, name, upload_id, type, distance, moving_time, average_speed, max_speed, total_elevation_gain, start_date_local, average_cadence, average_watts, average_heartrate) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', (activityId, acitivityName, currentActivity['upload_id'], currentActivity['type'], currentActivity['distance'], currentActivity['moving_time'], currentActivity['average_speed'], currentActivity['max_speed'], currentActivity['total_elevation_gain'], currentActivity['start_date_local'], currentActivity['average_cadence'], currentActivity['average_watts'], currentActivity['average_heartrate']))
conn.commit()
print(f'[{acitivityName}] done. . .')
conn.close()
encryptDatabase()
def setSplits(splits):
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS splits (split_id INT, activity_id BIGINT, activity_date DATETIME, average_speed NUMERIC, distance NUMERIC, elapsed_time INT, elevation_difference NUMERIC, moving_time INT, pace_zone INT, split INT, average_grade_adjusted_speed NUMERIC, average_heartrate NUMERIC, UNIQUE(split_id, activity_id));')
conn.commit()
for index, row in splits.iterrows():
cur.execute('INSERT OR IGNORE INTO splits (split_id, activity_id, activity_date, average_speed, distance, elapsed_time, elevation_difference, moving_time, pace_zone, split, average_grade_adjusted_speed, average_heartrate) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', (index, row['id'], row['date'], row['average_speed'], row['distance'], row['elapsed_time'], row['elevation_difference'], row['moving_time'], row['pace_zone'], row['split'], row['average_grade_adjusted_speed'], row['average_heartrate']))
conn.commit()
conn.close()
encryptDatabase()
def getActvitiesMissingSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedActivities = pandas.DataFrame()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities WHERE id NOT IN (SELECT activity_id FROM splits)', conn)
else:
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedActivities
def deleteActvitiesMissingSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
if result is not None:
cur = conn.cursor()
cur.execute('DELETE FROM activities WHERE id NOT IN (SELECT activity_id FROM splits)')
conn.commit()
conn.close()
encryptDatabase()
def getSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedSplits = pandas.DataFrame()
if result is not None:
storedSplits = pandas.read_sql_query('SELECT s.split_id, s.activity_id, s.activity_date, s.average_speed, s.distance, s.elapsed_time, s.elevation_difference, s.moving_time, s.pace_zone, s.split, s.average_grade_adjusted_speed, s.average_heartrate, a.name, a.upload_id, a.type, a.distance AS total_distance, a.moving_time AS total_moving_time, a.average_speed AS total_average_speed, a.max_speed, a.total_elevation_gain, a.start_date_local, a.average_cadence FROM splits s INNER JOIN activities a ON a.id = s.activity_id', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedSplits
def getMonthSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedSplits = pandas.DataFrame()
if result is not None:
storedSplits = pandas.read_sql_query('SELECT split_id, activity_id, STRFTIME("%Y-%m", activity_date) AS activity_month, activity_date, average_speed, distance, elapsed_time, elevation_difference, moving_time, pace_zone, split, average_grade_adjusted_speed, average_heartrate FROM splits', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedSplits
def getActivityDistances():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
activityCount = pandas.DataFrame()
if result is not None:
activityCount = pandas.read_sql_query("SELECT COUNT(*) AS cnt, CAST(CAST(nearest_5miles AS INT) AS VARCHAR(1000)) || ' < ' || CAST(CAST(nearest_5miles + 5 AS INT) AS VARCHAR(1000)) AS nearest_5miles FROM (SELECT id, ROUND((distance* 0.000621371)/5,0)*5 AS nearest_5miles FROM activities) a GROUP BY nearest_5miles", conn)
conn.commit()
conn.close()
encryptDatabase()
return activityCount
def getActivityRideDistances():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
activityCount = pandas.DataFrame()
if result is not None:
activityCount = pandas.read_sql_query("SELECT COUNT(*) AS cnt,distance, CAST(CAST(nearest_5miles AS INT) AS VARCHAR(1000)) || ' < ' || CAST(CAST(nearest_5miles + 5 AS INT) AS VARCHAR(1000)) AS nearest_5miles FROM (SELECT id,distance,type, ROUND((distance* 0.000621371)/5,0)*5 AS nearest_5miles FROM activities WHERE type = 'Ride') a GROUP BY nearest_5miles,type ORDER BY distance", conn)
conn.commit()
conn.close()
encryptDatabase()
return activityCount
# py -c 'import databaseAccess; databaseAccess.getLastRun()'
def getlastActivity():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
startDateTime = datetime.strptime('Jan 1 1970', '%b %d %Y')
if result is not None:
cur = conn.cursor()
cur.execute("SELECT start_date_local FROM activities ORDER BY start_date_local DESC LIMIT 1;")
row = cur.fetchone()
cur.execute("SELECT start_date_local,name,type,id FROM activities ORDER BY start_date_local DESC LIMIT 1;")
lastActivity = cur.fetchone()
conn.commit()
conn.close()
encryptDatabase()
return lastActivity
def getActivities():
decryptDatabase()
print('Getting Activities')
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
storedActivities = pandas.DataFrame()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities;', conn)
else:
storedActivities = ''
conn.commit()
conn.close()
encryptDatabase()
#print('Adding some calulated columns')
storedActivities['start_date_local'] = | pandas.to_datetime(storedActivities['start_date_local'],errors='coerce') | pandas.to_datetime |
'''
Title: QuickView
Purpose: Provides a Glance at the dataset with one line of code!
GitHub: http://github.com/avannaldas/QuickView
Author: <NAME> (Twitter @avannaldas)
'''
import pandas as _pd
import matplotlib.pyplot as _plt
from matplotlib.pyplot import cm
'''Number of rows in the dataframe'''
row_count = -1
'''Number of columns in the dataframe'''
column_count = -1
''' List of numeric column names'''
numeric_column_names = []
'''List of text column names'''
text_column_names = []
'''List of categorical column names'''
categorical_column_names = []
'''Dict of Column names and distinct categrical values'''
categorical_column_values = dict()
''' Dict of Column names and number of distinct categrical values'''
categorical_column_values_count = dict()
'''Pandas dataframe object containing rows with at least one null/na values'''
rows_with_nulls = _pd.DataFrame()
'''Dict of Column names and number of null/na values'''
columnwise_null_values_count = dict()
'''Pandas dataframe object with min, max, mean and std of all numeric columns'''
min_max_mean_std = | _pd.DataFrame() | pandas.DataFrame |
# coding: utf8
import collections
import argparse
import pprint
import json
from pathlib import Path
from .score import subtaskA, subtaskB, compute_metrics
from .utils import Collection
def evaluate_scenario(submit, gold, scenario):
submit_input = submit / ("output_scenario%i.txt" % scenario)
if not submit_input.exists():
submit_input = submit / ("input_scenario%i.txt" % scenario)
if not submit_input.exists():
raise ValueError("Input file not found in '%s'" % submit)
submit = Collection().load(submit_input)
resultA = subtaskA(gold, submit)
resultB = subtaskB(gold, submit, resultA)
results = {}
for k,v in list(resultA.items()) + list(resultB.items()):
results[k] = len(v)
metrics = compute_metrics(dict(resultA, **resultB), skipA=scenario==3, skipB=scenario==2)
results.update(metrics)
return results
def evaluate_one(submit: Path, scenario1_gold, scenario2_gold, scenario3_gold):
scenario1_submit = submit / "scenario1-main"
scenario2_submit = submit / "scenario2-taskA"
scenario3_submit = submit / "scenario3-taskB"
scenario1 = dict(evaluate_scenario(scenario1_submit, scenario1_gold, 1), submit=submit.name)
scenario2 = dict(evaluate_scenario(scenario2_submit, scenario2_gold, 2), submit=submit.name)
scenario3 = dict(evaluate_scenario(scenario3_submit, scenario3_gold, 3), submit=submit.name)
return dict(submit=submit.name,
scenario1=scenario1,
scenario2=scenario2,
scenario3=scenario3)
def main(submits:Path, gold:Path, best=False, single=False, csv=False, pretty=False, final=False):
users = collections.defaultdict(list)
if csv and not best:
raise ValueError("Error: --csv implies --best")
if final and (not csv or not best):
raise ValueError("Error: --final implies --csv and --best")
scenario1_gold = Collection().load(gold / "scenario1-main" / "input_scenario1.txt")
scenario2_gold = Collection().load(gold / "scenario2-taskA" / "input_scenario2.txt")
scenario3_gold = Collection().load(gold / "scenario3-taskB" / "input_scenario3.txt")
if single:
for subfolder in submits.iterdir():
users[submits.name].append(evaluate_one(subfolder, scenario1_gold, scenario2_gold, scenario3_gold))
else:
for userfolder in submits.iterdir():
for subfolder in userfolder.iterdir():
users[userfolder.name].append(evaluate_one(subfolder, scenario1_gold, scenario2_gold, scenario3_gold))
results = dict(users)
if best:
results = filter_best(results)
if csv:
import pandas as pd
items = []
for user, data in results.items():
userdata = dict(name=user)
for k, metrics in data.items():
userdata.update({"%s-%s"%(k,m):v for m,v in metrics.items()})
items.append(userdata)
df = | pd.DataFrame(items) | pandas.DataFrame |
from datetime import time
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from multipledispatch.conflict import ambiguities
from pandas.api.types import CategoricalDtype, DatetimeTZDtype
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
import ibis.expr.types as ir
def test_no_infer_ambiguities():
assert not ambiguities(dt.infer.funcs)
@pytest.mark.parametrize(
('value', 'expected_dtype'),
[
# numpy types
(np.int8(5), dt.int8),
(np.int16(-1), dt.int16),
(np.int32(2), dt.int32),
(np.int64(-5), dt.int64),
(np.uint8(5), dt.uint8),
(np.uint16(50), dt.uint16),
(np.uint32(500), dt.uint32),
(np.uint64(5000), dt.uint64),
(np.float32(5.5), dt.float32),
(np.float32(5.5), dt.float),
(np.float64(5.55), dt.float64),
(np.float64(5.55), dt.double),
(np.bool_(True), dt.boolean),
(np.bool_(False), dt.boolean),
# pandas types
(
pd.Timestamp('2015-01-01 12:00:00', tz='US/Eastern'),
dt.Timestamp('US/Eastern'),
),
],
)
def test_infer_dtype(value, expected_dtype):
assert dt.infer(value) == expected_dtype
@pytest.mark.parametrize(
('value', 'expected_dtypes'),
[
# Explicitly-defined dtype
(np.array([1, 2, 3], dtype='int8'), (dt.Array(dt.int8),)),
(np.array([1, 2, 3], dtype='int16'), (dt.Array(dt.int16),)),
(np.array([1, 2, 3], dtype='int32'), (dt.Array(dt.int32),)),
(np.array([1, 2, 3], dtype='int64'), (dt.Array(dt.int64),)),
(np.array([1, 2, 3], dtype='uint8'), (dt.Array(dt.uint8),)),
(np.array([1, 2, 3], dtype='uint16'), (dt.Array(dt.uint16),)),
(np.array([1, 2, 3], dtype='uint32'), (dt.Array(dt.uint32),)),
(np.array([1, 2, 3], dtype='uint64'), (dt.Array(dt.uint64),)),
(np.array([1.0, 2.0, 3.0], dtype='float32'), (dt.Array(dt.float32),)),
(np.array([1.0, 2.0, 3.0], dtype='float64'), (dt.Array(dt.float64),)),
(np.array([True, False, True], dtype='bool'), (dt.Array(dt.boolean),)),
# Implicit dtype
# Integer array could be inferred to int64 or int32 depending on system
(np.array([1, 2, 3]), (dt.Array(dt.int64), dt.Array(dt.int32))),
(np.array([1.0, 2.0, 3.0]), (dt.Array(dt.float64),)),
(np.array([np.nan, np.nan, np.nan]), (dt.Array(dt.float64),)),
(np.array([True, False, True]), (dt.Array(dt.boolean),)),
(np.array(['1', '2', '3']), (dt.Array(dt.string),)),
(
np.array(
[
pd.Timestamp('2015-01-01 12:00:00'),
pd.Timestamp('2015-01-02 12:00:00'),
pd.Timestamp('2015-01-03 12:00:00'),
]
),
(dt.Array(dt.timestamp),),
),
# Implied from object dtype
(np.array([1, 2, 3], dtype=object), (dt.Array(dt.int64),)),
(np.array([1.0, 2.0, 3.0], dtype=object), (dt.Array(dt.float64),)),
(np.array([True, False, True], dtype=object), (dt.Array(dt.boolean),)),
(np.array(['1', '2', '3'], dtype=object), (dt.Array(dt.string),)),
(
np.array(
[
pd.Timestamp('2015-01-01 12:00:00'),
pd.Timestamp('2015-01-02 12:00:00'),
pd.Timestamp('2015-01-03 12:00:00'),
],
dtype=object,
),
(dt.Array(dt.timestamp),),
),
],
)
def test_infer_np_array(value, expected_dtypes):
assert dt.infer(value) in expected_dtypes
@pytest.mark.parametrize(
('numpy_dtype', 'ibis_dtype'),
[
(np.bool_, dt.boolean),
(np.int8, dt.int8),
(np.int16, dt.int16),
(np.int32, dt.int32),
(np.int64, dt.int64),
(np.uint8, dt.uint8),
(np.uint16, dt.uint16),
(np.uint32, dt.uint32),
(np.uint64, dt.uint64),
(np.float16, dt.float16),
(np.float32, dt.float32),
(np.float64, dt.float64),
(np.double, dt.double),
(np.str_, dt.string),
(np.datetime64, dt.timestamp),
(np.timedelta64, dt.interval),
],
)
def test_numpy_dtype(numpy_dtype, ibis_dtype):
assert dt.dtype(np.dtype(numpy_dtype)) == ibis_dtype
@pytest.mark.parametrize(
('pandas_dtype', 'ibis_dtype'),
[
(
DatetimeTZDtype(tz='US/Eastern', unit='ns'),
dt.Timestamp('US/Eastern'),
),
(CategoricalDtype(), dt.Category()),
],
)
def test_pandas_dtype(pandas_dtype, ibis_dtype):
assert dt.dtype(pandas_dtype) == ibis_dtype
def test_series_to_ibis_literal():
values = [1, 2, 3, 4]
s = pd.Series(values)
expr = ir.as_value_expr(s)
expected = ir.sequence(list(s))
assert expr.equals(expected)
@pytest.mark.parametrize(
('col_data', 'schema_type'),
[
([True, False, False], 'bool'),
(np.int8([-3, 9, 17]), 'int8'),
(np.int16([-5, 0, 12]), 'int16'),
(np.int32([-12, 3, 25000]), 'int32'),
(np.int64([102, 67228734, -0]), 'int64'),
(np.float32([45e-3, -0.4, 99.0]), 'float'),
(np.float64([-3e43, 43.0, 10000000.0]), 'double'),
(np.uint8([3, 0, 16]), 'uint8'),
(np.uint16([5569, 1, 33]), 'uint16'),
(np.uint32([100, 0, 6]), 'uint32'),
(np.uint64([666, 2, 3]), 'uint64'),
(
[
pd.Timestamp('2010-11-01 00:01:00'),
pd.Timestamp('2010-11-01 00:02:00.1000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
],
'timestamp',
),
(
[
pd.Timedelta('1 days'),
pd.Timedelta('-1 days 2 min 3us'),
pd.Timedelta('-2 days +23:57:59.999997'),
],
"interval('ns')",
),
(['foo', 'bar', 'hello'], "string"),
(pd.Series(['a', 'b', 'c', 'a']).astype('category'), dt.Category()),
(pd.Series([b'1', b'2', b'3']), dt.string),
(pd.Series([1, 2, '3']), dt.binary),
(pd.Series([1, 2, 3.0]), dt.float64),
(
pd.Series([Decimal('1.0'), Decimal('2.0'), Decimal('3.0')]),
dt.binary,
),
(pd.Series([1 + 1j, 1 + 2j, 1 + 3j], dtype=object), dt.binary),
(
pd.Series(
[
pd.to_datetime('2010-11-01'),
pd.to_datetime('2010-11-02'),
pd.to_datetime('2010-11-03'),
]
),
dt.timestamp,
),
(pd.Series([time(1), time(2), time(3)]), dt.time),
(
pd.Series(
[
pd.Period('2011-01'),
pd.Period('2011-02'),
| pd.Period('2011-03') | pandas.Period |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import pandas as pd
import matplotlib
import os
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import optimize, signal
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
import scipy.integrate as integrate
from lmfit import models
from lmfit import Model, Parameter, report_fit
import math
import numpy as np
input_folder = 'Experiment_1-description/python_results'
output_folder = f"{input_folder}/GaussianFits" ### modify for each experiment
if not os.path.exists(output_folder):
os.makedirs(output_folder)
filename = f'{input_folder}/Cleaned_FRET_histogram_data.csv'
compiled_df = pd.read_csv(filename, header="infer")
############
############
############
############ Hsp70 low-FRET peak not constrained here
def fit_gauss_dif_constrained_nativespont(df, treatment, mu_1, sigma_1, amplitude_1, gamma_1, mu_2, sigma_2, amplitude_2, mu_3, sigma_3, amplitude_3, gamma_3):
filt_df = df[df['treatment_name'] == treatment]
bins = np.arange(-0.21, 1.1, 0.025)
inds = np.digitize(filt_df['FRET'].astype(float), bins)
xdata, ydata = np.unique(inds, return_counts=True)
ydata = ydata[1:-1] #### trim off outside range bins at the end
xdata = [np.mean(bins[x : x + 2]) for x in range(len(bins)- 1)] ##### convert bin edges to bin centres, therefore end up with one less bin
sns.lineplot(xdata, ydata)
model_1 = models.SkewedGaussianModel(prefix='m1_')
model_2 = models.GaussianModel(prefix='m2_')
model_3 = models.SkewedGaussianModel(prefix='m3_')
model = model_1 + model_2 + model_3
model_1.set_param_hint('m1_center', vary=False)
model_2.set_param_hint('m2_sigma', vary=False)
model_2.set_param_hint('m2_center', vary=False)
model_3.set_param_hint('m3_gamma', vary=False)
model_3.set_param_hint('m3_sigma', vary=False)
model_3.set_param_hint('m3_center', vary=False)
params_1 = model_1.make_params(center = mu_1, sigma = sigma_1, amplitude = amplitude_1, gamma = gamma_1, min = 0)
params_2 = model_2.make_params(center = mu_2, sigma = sigma_2, amplitude = amplitude_2, min = 0)
params_3 = model_3.make_params(center = mu_3, sigma = sigma_3, amplitude = amplitude_3, gamma = gamma_3, min = 0)
params = params_1.update(params_2)
params = params.update(params_3)
output = model.fit((ydata/np.max(ydata)), params, x=xdata)
fig = output.plot(data_kws={'markersize': 3})
paramaters = {name:output.params[name].value for name in output.params.keys()}
fitx = np.arange(-0.2, 1.2, 0.025)
fit1 = model_1.eval(x = fitx, center = paramaters['m1_center'], amplitude = abs(paramaters['m1_amplitude']), sigma = paramaters['m1_sigma'], gamma = paramaters['m1_gamma'])
fit2 = model_2.eval(x = fitx, center = paramaters['m2_center'], amplitude = abs(paramaters['m2_amplitude']), sigma = paramaters['m2_sigma'], fwhm = paramaters['m2_fwhm'])
fit3 = model_3.eval(x = fitx, center = paramaters['m3_center'], amplitude = abs(paramaters['m3_amplitude']), sigma = paramaters['m3_sigma'], gamma = paramaters['m3_gamma'])
sns.lineplot(fitx, fit1)
sns.lineplot(fitx, fit2)
sns.lineplot(fitx, fit3)
plt.show()
# Calculate area under the curve for each gaussian
aoc_m1 = paramaters['m1_amplitude']
aoc_m2 = paramaters['m2_amplitude']
aoc_m3 = paramaters['m3_amplitude']
# aoc_m1 = (paramaters['m1_amplitude']*paramaters['m1_sigma'])/0.3989
# aoc_m2 = (paramaters['m2_amplitude']*paramaters['m2_sigma'])/0.3989
# aoc_m3 = (paramaters['m3_amplitude']*paramaters['m3_sigma'])/0.3989
sum_aoc = aoc_m1 + aoc_m2 + aoc_m3
aoc_m1_percent_of_total = (aoc_m1/sum_aoc)*100
aoc_m2_percent_of_total = (aoc_m2/sum_aoc)*100
aoc_m3_percent_of_total = (aoc_m3/sum_aoc)*100
list_of_gaus_proportion = [aoc_m1_percent_of_total, aoc_m2_percent_of_total, aoc_m3_percent_of_total]
labels_of_gaus_proportion = ['m1', 'm2', 'm3']
proportion_df = pd.DataFrame([labels_of_gaus_proportion, list_of_gaus_proportion])
proportion_df.columns = proportion_df.iloc[0]
proportion_df = proportion_df.drop(0)
proportion_df['treatment'] = treatment
proportion_df.to_csv(f'{output_folder}/gaussian_proportions_for_{treatment}.csv')
return proportion_df
gaussian_kj_skew_con_nat = fit_gauss_dif_constrained_nativespont(compiled_df, 'KJ', 0.05, .1, 1, 10, .63, .1, .05, .95, .22, .03, -2.7)
gaussian_high_skew_con_na2 = fit_gauss_dif_constrained_nativespont(compiled_df, 'high', 0, .1, .1, 10, .63, .13, .5, .95, .2, 1, -2.7)
gaussian_medium_skew_con2 = fit_gauss_dif_constrained_nativespont(compiled_df, 'medium', 0.00, .1, .5, 10, .63, .13, 1, .95, .2, .5, -2.7)
gaussian_low_skew_con2 = fit_gauss_dif_constrained_nativespont(compiled_df, 'low', 0.02, .1, .5, 10, .63, .1, 1, .95, .2, .5, -2.7)
collated = pd.concat([gaussian_kj_skew_con_nat,gaussian_low_skew_con2, gaussian_medium_skew_con2, gaussian_high_skew_con_na2 ])
collated.to_csv(f'{output_folder}/histogram_proportions.csv', index = False)
###### - Do not change - these conditions are now set (if you want to play around just duplicate the function)
def fit_gauss_dif_constrained_allpeaks(df, treatment, mu_1, sigma_1, amplitude_1, gamma_1, mu_2, sigma_2, amplitude_2, mu_3, sigma_3, amplitude_3, gamma_3):
"""Set paramaters and fit histogram data to a 3-gaussian model.
Args:
df (dataframe): dataframe containing cleaned FRET values used to plot histogram
treatment (str): determines what treatment you want to look at within the dataset
mu_1 (float): set the mean of the first gaussian
sigma_1 (float): set the value of the width of the first gaussian
amplitude_1 (float): estimate for the height of the first gaussian
gamma_1 (float): sets the skew parameter - positive values result in skew to right and negative values result in skew to the left
mu_2 (float): set the mean of the second gaussian
sigma_2 (float): estimate for the width of the second gaussian
amplitude_2 (float): estimate for the height of the second gaussian
mu_3 (float): estimate for the mean of the third gaussian
sigma_3 (float): set the width of the third gaussian
amplitude_3 (float): estimate for the height of the third gaussian
gamma_3 (float): set the skew parameter - positive values result in skew to right and negative values result in skew to the left
Returns:
dataframe, plots: returns the proportional area of each gausssian relative to the sum of all three gaussians. Also shows what the fit of each gaussian looks like.
"""
filt_df = df[df['treatment_name'] == treatment]
bins = np.arange(-0.21, 1.1, 0.025)
inds = np.digitize(filt_df['FRET'].astype(float), bins)
xdata, ydata = np.unique(inds, return_counts=True)
ydata = ydata[1:-1] #### trim off outside range bins at the end
xdata = [np.mean(bins[x : x + 2]) for x in range(len(bins)- 1)] ##### convert bin edges to bin centres, therefore end up with one less bin
sns.lineplot(xdata, ydata)
model_1 = models.SkewedGaussianModel(prefix='m1_')
model_2 = models.GaussianModel(prefix='m2_')
model_3 = models.SkewedGaussianModel(prefix='m3_')
model = model_1 + model_2 + model_3
model_1.set_param_hint('m1_gamma', vary=False)
model_1.set_param_hint('m1_sigma', vary=False)
model_1.set_param_hint('m1_center', vary=False)
model_2.set_param_hint('m2_sigma', vary=False)
model_2.set_param_hint('m2_center', vary=False)
model_3.set_param_hint('m3_gamma', vary=False)
model_3.set_param_hint('m3_sigma', vary=False)
params_1 = model_1.make_params(center = mu_1, sigma = sigma_1, amplitude = amplitude_1, gamma = gamma_1, min = 0)
params_2 = model_2.make_params(center = mu_2, sigma = sigma_2, amplitude = amplitude_2, min = 0)
params_3 = model_3.make_params(center = mu_3, sigma = sigma_3, amplitude = amplitude_3, gamma = gamma_3, min = 0)
params = params_1.update(params_2)
params = params.update(params_3)
output = model.fit((ydata/np.max(ydata)), params, x=xdata)
fig = sns.set_style('darkgrid')
fig = output.plot(data_kws={'markersize': 3})
paramaters = {name:output.params[name].value for name in output.params.keys()}
fitx = np.arange(-0.2, 1.2, 0.025)
fit1 = model_1.eval(x = fitx, center = paramaters['m1_center'], amplitude = abs(paramaters['m1_amplitude']), sigma = paramaters['m1_sigma'], gamma = paramaters['m1_gamma'])
fit2 = model_2.eval(x = fitx, center = paramaters['m2_center'], amplitude = abs(paramaters['m2_amplitude']), sigma = paramaters['m2_sigma'], fwhm = paramaters['m2_fwhm'])
fit3 = model_3.eval(x = fitx, center = paramaters['m3_center'], amplitude = abs(paramaters['m3_amplitude']), sigma = paramaters['m3_sigma'], gamma = paramaters['m3_gamma'])
sns.lineplot(fitx, fit1)
sns.lineplot(fitx, fit2)
sns.lineplot(fitx, fit3)
fig.savefig(f'{output_folder}/{treatment}_gaussfit.svg', dpi = 600)
plt.show()
# Calculate area under the curve for each gaussian
aoc_m1 = paramaters['m1_amplitude']
aoc_m2 = paramaters['m2_amplitude']
aoc_m3 = paramaters['m3_amplitude']
# aoc_m1 = (paramaters['m1_amplitude']*paramaters['m1_sigma'])/0.3989
# aoc_m2 = (paramaters['m2_amplitude']*paramaters['m2_sigma'])/0.3989
# aoc_m3 = (paramaters['m3_amplitude']*paramaters['m3_sigma'])/0.3989
sum_aoc = aoc_m1 + aoc_m2 + aoc_m3
aoc_m1_percent_of_total = (aoc_m1/sum_aoc)*100
aoc_m2_percent_of_total = (aoc_m2/sum_aoc)*100
aoc_m3_percent_of_total = (aoc_m3/sum_aoc)*100
list_of_gaus_proportion = [aoc_m1_percent_of_total, aoc_m2_percent_of_total, aoc_m3_percent_of_total]
labels_of_gaus_proportion = ['m1', 'm2', 'm3']
proportion_df = pd.DataFrame([labels_of_gaus_proportion, list_of_gaus_proportion])
proportion_df.columns = proportion_df.iloc[0]
proportion_df = proportion_df.drop(0)
proportion_df['treatment'] = treatment
proportion_df.to_csv(f'{output_folder}/gaussian_proportions_for_{treatment}.csv')
return proportion_df
gaussian_medium_constrainall = fit_gauss_dif_constrained_allpeaks(compiled_df, 'medium', 0.02, .1, .5, 3, .63, .12, 1, .9, .22, .5, -2.7)
gaussian_col_high = fit_gauss_dif_constrained_allpeaks(compiled_df, 'high', 0.0, .20, .1, 10, .64, .1, .6, .9, .18, 1, -2.7)
gaussian_low_constrainall = fit_gauss_dif_constrained_allpeaks(compiled_df, 'low', 0.00, .1, .8, 2, .63, .11, .5, .95, .22, .4, -2.7)
collated_allconstrained = pd.concat([gaussian_kj_skew_con_nat,gaussian_low_constrainall, gaussian_medium_constrainall, gaussian_col_high ])
collated_allconstrained.to_csv(f'{output_folder}/histogram_proportions_constrained.csv', index = False)
##########
########## Theoretical demonstration of skew
##########
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
x_values = np.linspace(0, 1, 100)
for mu, sig in [(.8, .1)]:
plt.plot(x_values, gaussian(x_values, mu, sig))
plt.show()
pair_distance = np.linspace(0, 1, 100)
forster_radius = 0.51
FRET = 1/(1 + np.power((pair_distance/forster_radius), 6.))
sns.scatterplot(x = pair_distance, y = FRET)
plt.show()
Forster_values = np.linspace(0.5, 1, 6)
for mu, sig in [(.404, .05)]:
plt.plot(FRET, gaussian(pair_distance, mu, sig))
plt.xlabel('FRET')
FRET_peaks = np.linspace(0, 1, 9)
FRET_peaks_df = pd.DataFrame(FRET_peaks)
FRET_peaks_df.columns = ['FRET_peak']
for value in FRET_peaks:
FRET_2 = 1/(1 + np.power((pair_distance/forster_radius), 6.))
for mu, sig in [(value, .1)]:
plt.plot(FRET_2, gaussian(pair_distance, mu, sig))
plt.xlabel('FRET')
plt.show()
plot1 = plt.figure()
plt.rcParams['svg.fonttype'] = 'none'
for mu, sig in [(.8, .05)]:
plt.plot(x_values, gaussian(x_values, mu, sig))
for mu, sig in [(.404, .05)]:
plt.plot(FRET, gaussian(pair_distance, mu, sig))
sns.scatterplot(x = FRET, y = pair_distance)
# plot1.legend(['Pair-distance', 'FRET transformed pair-distance', 'Pair-distance/FRET relationship'])
plot1.savefig(f'{output_folder}/comparison-with-fret-on-xaxis.svg', dpi = 600)
plt.show
pair_distance = np.linspace(0, 1, 100)
forster_radius = 0.51
FRET_peaks = np.linspace(0, 1, 9)
FRET_peaks_df = pd.DataFrame(FRET_peaks)
FRET_peaks_df.columns = ['FRET_peak']
def plot_skew(df, sigma, pair_distance = pair_distance, forster_radius = 0.51):
y_data = []
for value, dfs in df.groupby('FRET_peak'):
FRET_2 = 1/(1 + np.power((pair_distance/forster_radius), 6.))
for mu, sig in [(value, sigma)]:
y = gaussian(pair_distance, mu, sig)
y_df = | pd.DataFrame(y) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
#--------------------------------------------------
import pandas as pd
import numpy as np
import Auxiliary.auxiliary_functions as aux_fun
#--------------------------------------------------
def read_and_rename():
'''
Function that reads the original data from the VH DB and renames the
columns to the names provided by VH in the auxiliary dictionary.
'''
# Obtaining the raw data.
df = pd.read_csv("./Temp/UIC.csv")
# Merging two columns that reference the same variable, introducing all the values
# only in of them.
for i,row in df.iterrows():
val = np.nan
if not pd.isnull(row.columna9):
val = row.columna9
elif not pd.isnull(row.columna10):
val = row.columna10
df.at[i,"columna10"] = val
# Droping the useless column.
df.drop(columns = "columna10", inplace = True)
# Deleting the previous temporary files
del_csvs = ["UIC"]
aux_fun.delete_csvs(del_csvs,"./Temp/")
# Reading the original column names from an auxiliary file.
column_names = open("./Auxiliary/column_names.txt", "r")
rename_columns = column_names.read().split(',')
# Modifiying the dataframe column names with the ones previously read.
df.columns = rename_columns
return df
#--------------------------------------------------
def identify_visit_type(df):
'''
In the raw dataframe, each row corresponds to an specific visit, eventhough this is not
explicitly declared. This function identifies each visit type by consulting which
columns for each row contain values. The columns with values depending on the visit
type are the following:
· Generic patient info: Only filled in the first visita basal. [3,4]
· Visita basal: [6,194]
· Visita seguiment: [195,320]
· Events: [321,329]
· Ultima visita: [330,448]
· Titulacio: [449,474]
· Questionari basal: [475,479]
· Questionari final: [480,484]
'''
for i, row in df.iterrows():
# VISITA BASAL
if any(not pd.isnull(x) for x in row.values[6:195].tolist()):
df.at[i,"redcap_repeat_instrument"]= "visita_basal"
# VISITA SEGUIMENT
elif any(not pd.isnull(x) for x in row.values[195:321].tolist()):
df.at[i,"redcap_repeat_instrument"]= "visita_seguiment"
# EVENTS
elif any(not pd.isnull(x) for x in row.values[321:330].tolist()):
df.at[i,"redcap_repeat_instrument"]= "events"
# ULTIMA VISITA
elif any(not | pd.isnull(x) | pandas.isnull |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
concat(
[Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
).dtype
== np.object_
)
def test_concat_empty_series_dtype_category_with_array(self):
# GH#18515
assert (
concat(
[Series(np.array([]), dtype="category"), Series(dtype="float64")]
).dtype
== "float64"
)
def test_concat_empty_series_dtypes_sparse(self):
result = concat(
[
Series(dtype="float64").astype("Sparse"),
Series(dtype="float64").astype("Sparse"),
]
)
assert result.dtype == "Sparse[float64]"
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
result = concat(
[ | Series(dtype="float64") | pandas.Series |
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
"""
References:
Title: matplotlib
Author: matplotlib Team
Availability: https://github.com/matplotlib/matplotlib
Version: 3.4.2
Title: numpy
Author: numpy Team
Availability: https://github.com/numpy/numpy
Version: 1.19.5
Title: pandas
Author: pandas Team
Availability: https://github.com/pandas-dev/pandas
Version: 1.2.4
"""
desired_width = 320
pd.set_option('display.width',desired_width)
np.set_printoptions(linewidth=desired_width)
pd.set_option('display.max_columns',10)
df = pd.read_csv('practiceallfighterinfo.csv')
# convert career avg stats into floats
df[['SLpM', 'Str. Acc.', 'SApM', 'Str. Def', 'TD Avg.','TD Acc.',
'TD Def.', 'Sub. Avg.']] = df[['SLpM', 'Str. Acc.', 'SApM', 'Str. Def', 'TD Avg.','TD Acc.','TD Def.', 'Sub. Avg.']].apply(pd.to_numeric, errors = 'coerce')
#convert record to wins,loss columns
#wins
df['Wins'] = df.Record.str.extract('(\d+)')
df['Losses'] = df.Record.str.extract('(-\d+)')
df['Losses'] = df['Losses'].map(lambda x: x.lstrip('-'))
#reformat df
df.drop(['Record'], 1, inplace= True)
titles = list(df.columns)
df = df[titles[0:4]+[titles[-2]]+[titles[-1]]+titles[4:15]]
df[['Wins','Losses']] = df[['Wins','Losses']].apply(pd.to_numeric, errors = 'coerce')
# convert height from feet inches string to cm int
df['Feet'] = df.Height.str.extract('(\d)')
df['Inches'] = df.Height.str.extract("('\s\d)")
df['Inches'] = df.Inches.str.extract("(\d)")
df[['Feet','Inches']] = df[['Feet','Inches']].apply(pd.to_numeric, errors = 'coerce')
df['Height_cm'] = (df['Feet']*30.48)+(df['Inches']*2.54)
#reformat df
del df['Height']
del df['Feet']
del df['Inches']
titles = list(df.columns)
df = df[titles[0:3]+[titles[-1]]+titles[3:16]]
#convert reach into int datatype
df['Reach'] = df.Reach.str.extract('(\d{2})')
df['Reach'] = df['Reach'].apply(pd.to_numeric, errors = 'coerce')
#convert DOB into Year of B int
df['DOB'] = df.DOB.str.extract('(\d{4})')
df['DOB'] = df['DOB'].apply(pd.to_numeric, errors = 'coerce')
# remove any records with 0 for all stats
df= df.loc[((df['SLpM']!= 0)|(df['Str. Acc.']!=0)| (df['SApM']!= 0) | (df['Str. Def']!=0) | (df['TD Avg.']!= 0) | (df['TD Acc.']!=0) | (df['TD Def.']!= 0)| (df['Sub. Avg.']!=0))]
# deleted weight and set name as index
df.drop(['Weight','Fighter_iD'], 1,inplace = True)
df.set_index('Name', inplace = True)
# incorporate catgories with a hanful of rows into larger categories and name the the missing values 'unk' = unknown
df['STANCE'].replace({'Open Stance':'Orthodox', 'Sideways':'Orthodox'}, inplace= True)
df['STANCE'].fillna('unk',inplace=True)
#impute missing numeric values
#1% of records were missing height value - just removed those rows
df.dropna(axis = 0 , subset = ['Height_cm'], inplace = True)
#dealing with outliers
max_threshold = df['SLpM'].quantile(0.995)
df = df[(df.SLpM<max_threshold)]
df = df[(df['Str. Acc.']<df['Str. Acc.'].quantile(0.985)) & (df['Str. Acc.']>df['Str. Acc.'].quantile(0.03))]
df = df[(df['SApM']<df['SApM'].quantile(0.995)) & (df['SApM']>df['SApM'].quantile(0.005))]
df = df[(df['Str. Def']<df['Str. Def'].quantile(0.9955)) & (df['Str. Def']>df['Str. Def'].quantile(0.005))]
df = df[(df['TD Avg.']<=df['TD Avg.'].quantile(0.98))]
df = df[(df['TD Acc.']<df['TD Acc.'].quantile(0.93))]
df = df[(df['Sub. Avg.']<df['Sub. Avg.'].quantile(0.995))]
df = df[(df['Wins']<df['Wins'].quantile(0.99))]
df = df[(df['Losses']<df['Losses'].quantile(0.99))]
#Now the matchup info
matches_frame = | pd.read_csv('newfightinfo.csv') | pandas.read_csv |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = pd.read_sql(sql_query_coordinates, self.conn)
masts = masts.merge(coordinates, left_on='wmm_id', right_on='wmm_id')
masts.set_index(['Project', 'wmm_id', 'WMM_Latitude', 'WMM_Longitude', 'Type'], inplace=True)
masts.sort_index(inplace=True)
return masts
def mvs_ids(self):
masts = self.masts()
mvs_ids = masts.mvs_id.values.tolist()
return mvs_ids
def valid_signal_labels(self):
signal_type_query = '''
SELECT [MDVT_ID]
,[MDVT_Name]
FROM [M2D2_DB_BE].[dbo].[MDataValueType]'''
signal_types = pd.read_sql(signal_type_query, self.conn, index_col='MDVT_Name').MDVT_ID
return signal_types
def column_labels_for_masts(self):
masts = self.masts()
mvs_ids = masts.mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def column_labels_for_data_from_mvs_ids(self, data):
masts = self.masts()
names_map = pd.Series(index=masts.mvs_id.values, data=masts.Name.values).to_dict()
types = self.valid_signal_labels()
types.loc['FLAG'] = 'Flag'
types_map = pd.Series(index=types.values.astype(str), data=types.index.values).to_dict()
data = data.rename(lambda x: rename_mvs_id_column(x, names=names_map, types=types_map), axis=1)
return data
def column_labels_for_wmm_id(self, wmm_id):
masts = self.masts()
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def data_from_sensors_mvs_ids(self, mvs_ids, signal_type='AVG'):
'''Download sensor data from M2D2
:Parameters:
mvs_ids: int or list
Virtual sensor IDs (mvs_ids) in M2D2, can be singular
signal_type: str, default 'AVG' - NOT SUPPORTED AT THIS TIME
Signal type for download
For example: 'AVG', 'SD', 'MIN', 'MAX', 'GUST'
:Returns:
out: DataFrame with signal data from virtual sensor
'''
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
valid_mvs_ids = self.mvs_ids()
assert all([mvs_id in valid_mvs_ids for mvs_id in mvs_ids]), f'One of the following is not a valid mvs_id: {mvs_ids}'
mvs_ids_list = sql_list_from_mvs_ids(mvs_ids)
sql_query= f"""
SET NOCOUNT ON
DECLARE @ColumnListID NVARCHAR(4000)
,@startDate DATETIME2
,@endDate DATETIME2
SET @ColumnListID= '{mvs_ids_list}'
SET @startDate = NULL
SET @endDate = NULL
EXECUTE [dbo].[proc_DataExport_GetDataByColumnList]
@ColumnListID
,@startDate
,@endDate
"""
data = pd.read_sql(sql_query, self.conn, index_col='CorrectedTimestamp')
data.index.name = 'stamp'
data.columns.name = 'sensor'
data = self.column_labels_for_data_from_mvs_ids(data)
return data
def data_from_mast_wmm_id(self, wmm_id):
'''Download data from all sensors on a mast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with signal data from each virtual sensor on the mast
'''
masts = self.masts()
wmm_ids = masts.index.get_level_values('wmm_id').sort_values().unique().tolist()
assert wmm_id in wmm_ids, f'the following is not a valid wmm_id: {wmm_id}'
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.values.tolist()
data = self.data_from_sensors_mvs_ids(mvs_ids)
return data
def metadata_from_mast_wmm_id(self, wmm_id):
'''Download mast metadata from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with mast metadata
'''
sql_query= '''
SELECT [WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]
WHERE wmm_id = {}
'''.format(wmm_id)
mast_metadata = pd.read_sql(sql_query, self.conn)
return mast_metadata
def mast_from_wmm_id(self, wmm_id):
'''Download an.MetMast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: an.MetMast with data and metadata from M2D2
'''
print(f'Downloading Mast {wmm_id} from M2D2')
data = self.data_from_mast_wmm_id(wmm_id=wmm_id)
metadata = self.metadata_from_mast_wmm_id(wmm_id=wmm_id)
mast = an.MetMast(data=data,
name=wmm_id,
lat=metadata.WMM_Latitude[0],
lon=metadata.WMM_Longitude[0],
elev=metadata.WMM_Elevation[0])
return mast
def masts_from_project(self, project):
'''Download an.MetMasts from M2D2 for a given project
:Parameters:
project_name: str
Project name in M2D2
:Returns:
out: List of an.MetMasts with data and metadata from M2D2 for a given project
'''
masts = self.masts()
projects = masts.index.get_level_values('Project').unique().tolist()
assert project in projects, f'Project {project} not found in M2D2'.format(project)
wmm_ids = masts.loc[project,:].index.get_level_values('wmm_id').sort_values().unique().tolist()
masts = [self.mast_from_wmm_id(wmm_id) for wmm_id in wmm_ids]
return masts
# Define Turbine class
class Turbine(object):
'''Class to connect to EDF Wind Turbine database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is:
import anemoi as an
turb_db = an.io.database.Turbine()
:Parameters:
:Returns:
out: an.Turbine object connected to Turbine database
'''
self.database = 'Turbine'
server = '10.1.15.53'
db = 'Turbine_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def metadata(self):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_turbines = '''
SELECT [TUR_Manufacturer]
,[TUR_RatedOutputkW]
,[TPC_MaxOutput]
,[TUR_RotorDia]
,[TUR_Model]
,[AllHubHeights]
,[TPC_DocumentDate]
,[TUR_ID]
,[IECClass]
,[TPG_ID]
,[TPG_Name]
,[TPC_ID]
,[TVR_VersionName]
,[TPC_dbalevel]
,[TPC_TIScenario]
,[TPC_BinType]
,[TTC_ID]
,[TRPMC_ID]
,[P_ID]
,[P_Name]
FROM [Turbine_DB_BE].[NodeEstimate].[AllPowerCurves]
WHERE TPC_Type = 'Manufacturer General Spec'
'''
turbines = pd.read_sql(sql_query_turbines, self.conn)
return turbines
def power_curve_from_tpc_id(self, tpc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TPCD_AirDensity,
TPCD_WindSpeedBin,
TPCD_OutputKW
FROM TPCDETAILS
WHERE TPC_id = {} AND TPCD_IsDeleted = 0;
'''.format(tpc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
def trust_curve_from_ttc_id(self, ttc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TTCD_AirDensity,
TTCD_WindSpeedBin,
TTCD_ThrustValue
FROM TTCDETAILS
WHERE TTC_id = {} AND TTCD_IsDeleted = 0;
'''.format(ttc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
# Define Padre class
class Padre(object):
'''Class to connect to PRE Padre database
'''
def __init__(self, database='PADREScada', conn_str=None, conn=None, domino=False):
'''Data structure with both database name and connection string.
:Parameters:
database: string, default None
Name of the padre database to connect to
conn_str: string, default None
SQL connection string needed to connect to the database
conn: object, default None
SQL connection object to database
'''
self.database = database
if self.database == 'PADREScada':
server = '10.1.106.44'
db = 'PADREScada'
elif self.database == 'PadrePI':
server = '10.1.106.44'
db = 'PADREScada'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str
try:
self.conn = pyodbc.connect(self.conn_str)
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def assets(self, project=None, turbines_only=False):
'''Returns:
DataFrame of all turbines within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
sql_query_assets = '''
SELECT [AssetKey]
,Projects.[ProjectName]
,[AssetType]
,[AssetName]
,Turbines.[Latitude]
,Turbines.[Longitude]
,[elevation_mt]
FROM [PADREScada].[dbo].[Asset] as Turbines
WITH (NOLOCK)
INNER JOIN [PADREScada].[dbo].[Project] as Projects on Turbines.ProjectKey = Projects.ProjectKey
'''
assets = | pd.read_sql(sql_query_assets, self.conn) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 11:36:45 2018
@author: suvod
"""
from __future__ import division
from . import git_access
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import networkx as nx
class git_api_access(object):
def __init__(self,token,repo_owner,source_type,git_url,api_base_url,repo_name):
self.access_token = token
self.repo_owner = repo_owner
self.source_type = source_type
self.git_url = git_url
self.api_base_url = api_base_url
self.repo_name = repo_name
#self.client = self.get_git_client()
self.get_git_client()
def get_git_client(self):
self.client = git_access.GitHubClient({'access_token': self.access_token,
'repo_owner': self.repo_owner,
'source_type': self.source_type,
'git_url': self.git_url,
'api_base_url': self.api_base_url,
'repo_name': self.repo_name})
def create_base_url(self, url_type):
self.url_type = url_type
self.base_url = self.api_base_url + '/repos/' + self.repo_owner + '/' + self.repo_name + '/' + self.url_type
def create_search_url(self):
return self.api_base_url + '/search/repositories?'
def create_search_query(self,language,forks=0,stars=0):
query = 'q=language:' + language + '+forks:' + str(forks) + '..' + str(50+forks)# + '+stars:>=' + str(stars)
return self.create_search_url() + query
def create_advanced_url(self, url_details = ''):
if url_details != '':
self.advanced_url = self.base_url + '/' + url_details
else:
self.advanced_url = self.base_url
def get_comments(self,url_type,url_details = ''):
self.create_base_url(url_type)
self.create_advanced_url(url_details)
x = [0]*100
page_number = 1
comments_details = []
while len(x) >= 100 and page_number<=400:
paged_url = self.advanced_url + '?page=' + str(page_number) + '&per_page=100'
page_number += 1
print(paged_url)
res = self.client.get(paged_url)
x = json.loads(res.content)
for i in range(len(x)):
issue_number = x[i]['issue_url'][len(self.base_url)+2:]
user_logon = x[i]['user']['login']
author_association = x[i]['author_association']
body = x[i]['body']
created_at = x[i]['created_at']
comments_details.append([issue_number,user_logon,author_association,body,created_at])
self.set_uniq_users(comments_details)
return comments_details
def get_releases(self,url_type,url_details = ''):
self.create_base_url(url_type)
self.create_advanced_url(url_details)
x = [0]*100
page_number = 1
release_details = []
while len(x) >= 100 and page_number<=400:
paged_url = self.advanced_url + '?page=' + str(page_number) + '&per_page=100'
page_number += 1
print(paged_url)
res = self.client.get(paged_url)
x = json.loads(res.content)
for i in range(len(x)):
release_id = x[i]['id']
author_logon = x[i]['author']['login']
tag_name = x[i]['tag_name']
created_at = x[i]['created_at']
description = x[i]['body']
release_details.append([release_id,author_logon,tag_name,created_at,description])
return release_details
def get_tagged_release(self,url_type,url_details = ''):
self.create_base_url(url_type)
self.create_advanced_url(url_details)
x = [0]*100
page_number = 1
release_details = []
while len(x) >= 100 and page_number<=400:
paged_url = self.advanced_url + '?page=' + str(page_number) + '&per_page=100'
page_number += 1
print(paged_url)
res = self.client.get(paged_url)
x = json.loads(res.content)
print(len(x))
for i in range(len(x)):
sha_url = x[i]['commit']['url']
print(sha_url)
tag_data = self.client.get(sha_url)
tag_data = json.loads(tag_data.content)
release_id = tag_data['node_id']
author_logon = tag_data['commit']['author']['email']
tag_name = x[i]['name']
tag_message = tag_data['commit']['message']
created_at = tag_data['commit']['author']['date'].split('T')[0]
description = "None"
release_details.append([release_id,author_logon,tag_name,created_at,description])
return release_details
def set_uniq_users(self,comment_details):
comment_df = | pd.DataFrame(comment_details, columns = ['issue_number','user_logon','author_association','body','created_at']) | pandas.DataFrame |
import warnings
from pandas import DataFrame, to_datetime, read_csv, notnull
from pyramm.helpers import _map_json
from pyramm.geometry import transform, loads
DEFAULT_DATE_COLUMNS = ["added_on", "chgd_on"]
class BaseTable:
table_name = None
index_name = None
get_geometry = False
date_columns = []
def __init__(self, ramm, road_id=None, latest=False):
self.df = DataFrame()
if ramm is None:
return
self._get_data(ramm, road_id, latest)
self._convert_dates()
self._replace_nan()
if self.index_name:
self.df.set_index(self.index_name, drop=True, inplace=True)
def _get_data(self, ramm, road_id, latest):
self.df = ramm.get_data(
self.table_name, road_id, latest, self.get_geometry
).copy()
if "wkt" in self.df.columns:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.df["geometry"] = [transform(loads(ww)) for ww in self.df["wkt"]]
def _convert_dates(self):
date_columns = set(self.date_columns + DEFAULT_DATE_COLUMNS)
for cc in date_columns:
try:
self.df[cc] = to_datetime(self.df[cc])
except KeyError:
pass
def _replace_nan(self):
self.df = self.df.where( | notnull(self.df) | pandas.notnull |
import numpy as np
import xml.etree.ElementTree as ET
import gzip
import pandas as pd
import os
import gavia
def loadlog(projectdir):
'''
Load the gps log file for the specified project, given by the projectdir
parameter
Parameters
----------
projectdir : string
path to the gavia project
Returns
-------
Pandas DataFrame containing gps logs
'''
PROCESS = True
ROOT = False
# check for processed log file in directory
dirfiles = os.listdir(projectdir)
# projectdir may not be given as the root folder
if 'log_processed' in dirfiles:
if 'gps.csv' in os.listdir(os.path.join(projectdir,'log_processed')):
PROCESS = False
elif 'files'in dirfiles:
ROOT = True
dirfiles = os.listdir(os.path.join(projectdir,'files'))
# check if folders/files are contained within a 'files' directory
if 'log_processed' in dirfiles:
if 'gps.csv' in os.listdir(os.path.join(projectdir,'files','log_processed')):
PROCESS = False
if ROOT:
# change the projectdir to the files\ directory
projectdir = os.path.join(projectdir,'files')
if PROCESS:
print('Processing gps logs...')
logdir = os.path.join(projectdir,'log')
loglist = gavia.log.getlogs(logdir,'gps')
if len(loglist) == 0: raise ValueError('no gps logs found in project')
nlogs = len(loglist)
# combine log DataFrame
df = | pd.DataFrame() | pandas.DataFrame |
import traceback
from typing import Union
import sys
import numpy as np
import pandas as pd
from importers.base import BaseImporter
from .state_decorator import ImporterStatus, Status
from .attr_range_decorator import update_attribute_ranges
sys.path.append("../..")
from settings import GetConfig
@GetConfig("TfL_BikePoints", 'api_endpoints', 'tfl_bike_points')
class TfL_BikePoints(BaseImporter):
"""
TFL Bike points importer
The importer gets extended from BaseImporter and doesn't have any bespoke code apart from defining the
structure of the api, like sensor, attributes, data tables and values
"""
importer_status = ImporterStatus.get_importer_status()
def __init__(self) -> None:
"""
Get Import Config
Instantiate BaseImporter
"""
super().__init__(self.API_NAME, self.BASE_URL, self.REFRESH_TIME,
self.API_KEY, self.API_CLASS, self.TOKEN_EXPIRY)
@update_attribute_ranges
def _create_datasource(self, headers: Union[str, None] = None) -> None:
"""
Create DataSource
:param headers: Request Headers
"""
try:
super()._create_datasource(headers)
self.df = self.create_dataframe(ignore_object_tags=['$type'], object_separator='id')
self.df.dropna(inplace=True)
self.df = self.df[self.df.key == 'NbEmptyDocks']
self.df['modified'] = self.df.modified.apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
import cv2
#データ読み込みクラス
class Class_File_Data_Reader():
'''
各種データ形式をpandasに読み込むクラス
日本語データに対応 (Shift-JIS前提 xlsx⇒csvした日本語では使える)
'''
def __init__(self, ext="ALL"):
'''
コンストラクタ
(読み込み対象とする拡張子を引数extで指定可能)
'''
if ext !="ALL":
self.target_ext = [ ext ]
else:
self.target_ext = [".csv",".tsv",".xls",".xlsx",".json"]
return
def read_file_in_folder(self,folder_path):
'''
入力フォルダパス直下にあるファイルを読み込む
各ファイルの読み込み結果(pandas dataframe)とファイル名のリストを出力
'''
filelist = self.get_file_path_list_in_folder(folder_path)
result = []
for file in filelist:
dfdata = self.read_file_autohandle(file)
result.append(dfdata)
return result,filelist
def read_file_in_folder_recrussive(self,folder_path):
'''
入力フォルダパスの配下(サブフォルダ含む)にあるファイルを読み込む
各ファイルの読み込み結果(pandas dataframe)とファイル名のリストを出力
'''
filelist = self.get_file_path_list_in_folder_recrussive(folder_path)
result = []
for file in filelist:
dfdata = self.read_file_autohandle(file)
result.append(dfdata)
return result,filelist
def get_file_path_list_in_folder(self,folder_path):
'''
入力フォルダパス直下にあるファイルのリストを作成する
'''
if folder_path[-1] != "\\":
folder_path = folder_path + "\\"
file_full_path_list = []
# Search_Current_Directry
for file in os.listdir(folder_path):
root, ext = os.path.splitext(file)
for indx2 in range(len(self.target_ext)):
if ext == self.target_ext[indx2]:
file_fullpath = folder_path + file
file_full_path_list.append(file_fullpath)
return file_full_path_list
def get_file_path_list_in_folder_recrussive(self,folder_path):
'''
入力フォルダパスの配下(サブフォルダ含む)にあるファイルのリストを作成する
'''
if folder_path[-1] != "\\":
folder_path = folder_path + "\\"
file_full_path_list = []
for folder, subfolders, files in os.walk(folder_path):
for indx in range(len(files)):
root, ext = os.path.splitext(files[indx])
for indx2 in range (len(self.target_ext)):
if ext == self.target_ext[indx2]:
file_fullpath = folder+'/'+ files[indx]
file_full_path_list.append(file_fullpath)
return file_full_path_list
def read_file_autohandle(self,input_path):
'''
入力ファイルから拡張子を自動判定してpandasに読み込む
'''
root, ext = os.path.splitext(input_path) #拡張子の取得
#拡張子毎に読み込みを実施
if ext == ".csv":
result = self.read_csv_data(input_path)
elif ext == ".tsv":
result = self.read_tsv_data(input_path)
elif (ext == ".xls") or (ext == ".xlsx"):
result = self.read_xls_data_firstseat(input_path)
elif ext == ".json":
result = self.read_json_data(input_path)
return result
def read_csv_data(self,filename):
'''
csvデータを読みとる
'''
result = pd.read_csv(filename, encoding="shift-jis") #日本語データ(Shift-Jis)を含む場合を想定
return result
def read_tsv_data(self,filename):
'''
tsvデータを読みとる
'''
result = pd.read_table(filename, encoding="shift-jis") #日本語データ(Shift-Jis)を含む場合を想定
return result
def read_xls_data_firstseat(self,filename):
'''
xlsx or xlsファイルの1枚目のシートを読みとる
'''
result = pd.read_excel(filename, encoding="shift-jis") #日本語データ(Shift-Jis)を含む場合を想定
return result
def read_xls_data_allsheat(self,filename):
'''
xlsx or xlsファイルの全てのシートを読み込む
'''
result = pd.read_excel(filename, encoding="shift-jis", sheetname =None) #日本語データ(Shift-Jis)を含む場合を想定
return result
def read_json_data(self,filename):
'''
jsonファイルを読み込む
'''
result = pd.read_json(filename, encoding="shift-jis") #日本語データ(Shift-Jis)を含む場合を想定
return result
def merge_data_simlpe(self,input_pd_list):
'''
最もシンプルなデータのマージ (pandas concatを使った縦方向の結合)
'''
result = input_pd_list[0]
for dnameindx in range(len(input_pd_list)-1) :
result = | pd.concat([result,input_pd_list[dnameindx+1]]) | pandas.concat |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
import pandas as pd
import pyspark
from pyspark.sql import Column
from databricks.koala.testing.utils import ReusedSQLTestCase, TestUtils
class DataFrameTest(ReusedSQLTestCase, TestUtils):
@property
def full(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def df(self):
return self.spark.from_pandas(self.full)
def test_Dataframe(self):
d = self.df
full = self.full
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='(a + 1)') # TODO: name='a'
self.assert_eq(d['a'] + 1, expected)
self.assert_eq(d.columns, pd.Index(['a', 'b']))
self.assert_eq(d[d['b'] > 2], full[full['b'] > 2])
# TODO: self.assert_eq(d[['a', 'b']], full[['a', 'b']])
self.assert_eq(d.a, full.a)
# TODO: assert d.b.mean().compute() == full.b.mean()
# TODO: assert np.allclose(d.b.var().compute(), full.b.var())
# TODO: assert np.allclose(d.b.std().compute(), full.b.std())
assert repr(d)
def test_head_tail(self):
d = self.df
full = self.full
self.assert_eq(d.head(2), full.head(2))
self.assert_eq(d.head(3), full.head(3))
self.assert_eq(d['a'].head(2), full['a'].head(2))
self.assert_eq(d['a'].head(3), full['a'].head(3))
# TODO: self.assert_eq(d.tail(2), full.tail(2))
# TODO: self.assert_eq(d.tail(3), full.tail(3))
# TODO: self.assert_eq(d['a'].tail(2), full['a'].tail(2))
# TODO: self.assert_eq(d['a'].tail(3), full['a'].tail(3))
@unittest.skip('TODO: support index')
def test_index_head(self):
d = self.df
full = self.full
self.assert_eq(d.index[:2], full.index[:2])
self.assert_eq(d.index[:3], full.index[:3])
def test_Series(self):
d = self.df
full = self.full
self.assertTrue(isinstance(d.a, Column))
self.assertTrue(isinstance(d.a + 1, Column))
# TODO: self.assert_eq(d + 1, full + 1)
@unittest.skip('TODO: support index')
def test_Index(self):
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = self.spark.from_pandas(case)
self.assert_eq(ddf.index, case.index)
def test_attributes(self):
d = self.df
self.assertIn('a', dir(d))
self.assertNotIn('foo', dir(d))
self.assertRaises(AttributeError, lambda: d.foo)
df = self.spark.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}))
self.assertNotIn('a b c', dir(df))
df = self.spark.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}))
self.assertIn('a', dir(df))
self.assertNotIn(5, dir(df))
def test_column_names(self):
d = self.df
self.assert_eq(d.columns, pd.Index(['a', 'b']))
# TODO: self.assert_eq(d[['b', 'a']].columns, pd.Index(['b', 'a']))
self.assertEqual(d['a'].name, 'a')
self.assertEqual((d['a'] + 1).name, '(a + 1)') # TODO: 'a'
self.assertEqual((d['a'] + d['b']).name, '(a + b)') # TODO: None
@unittest.skip('TODO: support index')
def test_index_names(self):
d = self.df
self.assertIsNone(d.index.name)
idx = | pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x') | pandas.Index |
# -*- coding: utf-8 -*-
# """@author: Elie"""
# run locally on python 3.8.5('dec1st_py38_xgboostetal':conda)
# =============================================================================
# %% Libraries
# =============================================================================
import pandas as pd
import numpy as np
import datetime
from functools import partial, reduce
from joblib import load, dump
import os
import sys
#plotting
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
plt.rcParams["font.size"] = "4"
import seaborn as sns
import matplotlib as mpl
#ML/Stats
from sklearn.model_selection import train_test_split, GridSearchCV, KFold, cross_val_score, StratifiedKFold
from sklearn.metrics import roc_curve, auc,precision_recall_curve, f1_score
from sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix
import shap
import xgboost
from xgboost import XGBClassifier
pd.options.mode.chained_assignment = None
# import matplotlib as mpl
# mpl.matplotlib_fname()
# plt.matplotlib_fname()
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
# =============================================================================
# %% define these feature/headers here in case the headers
# are out of order in input files (often the case)
# =============================================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
### ==========================================================
# make concat sig dataframe
# ============================================================
"""load the 3 data frames and merge to one df"""
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
def get_data_and_labels_from_df(df, gene_name):
#first encode gene lable as binary
combined_matrix_for_gene = df.copy(deep=True)
gene_name = str(gene_name)
combined_matrix_for_gene.loc[(combined_matrix_for_gene["primary_label"] == gene_name), 'primary_label'] = 1
combined_matrix_for_gene.loc[(combined_matrix_for_gene["primary_label"] != 1), 'primary_label'] = 0
#amazingly stupid, if dont specify astype int, the 1/0 remain an object and dont work with gridsearchcv
combined_matrix_for_gene["primary_label"] = combined_matrix_for_gene["primary_label"].astype('int')
#now extract 2d matrix of feature values and 1d matrix of labels
features_list = snv_categories[1:] + indel_categories[1:] + cnv_categories[1:]
X_data = combined_matrix_for_gene[features_list]
X_data.columns = X_data.columns.str.replace("[", "mm").str.replace("]", "nn").str.replace(">", "rr")
Y_labels = combined_matrix_for_gene["primary_label"]
return X_data, Y_labels
"""Can use this function on the server with many cores, takes long time without many cores"""
def do_grid_search_for_best_params(xtrain, ytrain, xtest, ytest, paramgrid):
estimator = XGBClassifier(objective='binary:logistic', nthread=1, seed=42)
grid_search = GridSearchCV(estimator=estimator, param_grid=paramgrid, scoring = 'roc_auc', n_jobs = 60, cv = 10, verbose=True)
fit_params={"eval_metric" : ['auc', 'error', 'logloss'], "eval_set" : [[xtest, ytest]]}
fitted_model = grid_search.fit(xtrain, ytrain, **fit_params)
cv_results = pd.DataFrame(fitted_model.cv_results_)
return fitted_model.best_score_, fitted_model.best_params_, fitted_model.best_estimator_, cv_results
def model_with_params(trainX, trainY, testX, testY, params, max_rounds):
estimator = XGBClassifier(n_estimators=max_rounds, nthread=10, **params)
fitted_model = estimator.fit(trainX, trainY, verbose=True)
prediction_binary_test = fitted_model.predict(testX, ntree_limit=max_rounds)
prediction_probability_test = fitted_model.predict_proba(testX, ntree_limit=max_rounds)
prediction_prob_of_true_test = prediction_probability_test[:,1]
prediction_binary_train = fitted_model.predict(trainX, ntree_limit=max_rounds)
prediction_probability_train = fitted_model.predict_proba(trainX, ntree_limit=max_rounds)
prediction_prob_of_true_train = prediction_probability_train[:,1]
return fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train
def kfold_cv(Knumber, Xdata, Ylabels, model):
kfold = KFold(n_splits=Knumber)
results = cross_val_score(model, Xdata, Ylabels, cv=kfold)
return results
def shapely_values(model, Xdata, Nvalues):
import inspect
print(os.path.abspath(inspect.getfile(shap.summary_plot)))
X = Xdata.copy(deep=True)
shap_values = shap.TreeExplainer(model, feature_perturbation='tree_path_dependent').shap_values(X, check_additivity=False)
X.columns = X.columns.str.replace("mm", "[").str.replace("nn", "]").str.replace("rr", ">")
fig, ax = plt.subplots(figsize=(7,4))
shap.summary_plot(shap_values, X, plot_type="dot", max_display=Nvalues, show=False, plot_size=(6,3), alpha=0.7)
plt.subplots_adjust(left=0.3, right=0.94, top=0.9, bottom=0.1)
ax = plt.gca()
fig = plt.gcf()
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
return fig, ax
def my_roc(data, prob_of_true):
fpr, tpr, thresholds = roc_curve(data, prob_of_true)
roc_auc = auc(fpr, tpr)
fig, ax = plt.subplots(figsize=(1.3,1.4))
lw = 1
ax.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.0, 1.02])
ax.set_xlabel('False Positive Rate', fontsize=4, labelpad=0.75)
ax.set_ylabel('True Positive Rate', fontsize=4, labelpad=0.75)
#ax.set_title('ROC curve', fontsize=6, pad=1)
ax.legend(loc="lower right", fontsize=4)
tick_numbers = [round(x,1) for x in np.arange(0, 1.1, 0.2)]
ax.set_xticks(tick_numbers)
ax.tick_params(axis='both', which="major", length=2, labelsize=4, pad=0.5, reset=False)
fig.subplots_adjust(left=0.15, right=0.965, top=0.98, bottom=0.12)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return fig, ax
def precision_recall(data, prob_of_true):
precision, recall, thresholds = precision_recall_curve(data, prob_of_true)
fig, ax = plt.subplots(figsize=(1.3,1.4))
lw = 1
# ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.plot(recall, precision, color='darkorange', lw=lw, label='PR curve')
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.5, 1.05])
# axis labels
ax.set_xlabel('Recall', fontsize=4, labelpad=0.75)
ax.set_ylabel('Precision', fontsize=4, labelpad=0.75)
ax.legend(loc="lower left", fontsize=4)
tick_numbers = [round(x,1) for x in np.arange(0, 1.1, 0.2)]
ax.set_xticks(tick_numbers)
ax.tick_params(axis='both', which="major", length=2, labelsize=4, pad=0.5, reset=False)
fig.subplots_adjust(left=0.15, right=0.965, top=0.98, bottom=0.12)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return fig, ax
def plot_precision_recall_vs_threshold(data, prob_of_true):
"""Modified from: Hands-On Machine learning with Scikit-Learn
and TensorFlow; p.89
"""
#first generate and find fscores for all possible thresholds:
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
#evaluate each threshold
thresholds = np.arange(0, 1, 0.001)
scores = [f1_score(data, to_labels(prob_of_true, t)) for t in thresholds]
ix = np.argmax(scores)
print('Threshold=%.3f, F-Score=%.5f' % (thresholds[ix], scores[ix]))
best_threshold = thresholds[ix]
Fscore = scores[ix]
#now plot precision recall as a function of threshold
precisions, recalls, thresholds = precision_recall_curve(data, prob_of_true)
fig, ax = plt.subplots(figsize=(1.3,1.4))
lw = 1
#plt.title("Precision and Recall Scores as a function of the decision threshold")
ax.plot(thresholds, precisions[:-1], color="#CD5C5C", label="Precision", lw=lw)
ax.plot(thresholds, recalls[:-1], "#197419", label="Recall", lw=lw)
ax.axvline(x=best_threshold, color="b",linestyle="--", label=f'Threshold={best_threshold:.2f},\nF-Score={Fscore:.2f}')
ax.set_ylabel("Score", fontsize=4, labelpad=0.75)
ax.set_xlabel("Decision Threshold", fontsize=4, labelpad=0.75)
ax.legend(loc="lower center", fontsize=4)
tick_numbers = [round(x,1) for x in np.arange(0, 1.1, 0.2)]
ax.set_xticks(tick_numbers)
ax.tick_params(axis='both', which="major", length=2, labelsize=4, pad=0.5, reset=False)
fig.subplots_adjust(left=0.15, right=0.965, top=0.98, bottom=0.12)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return fig, ax, best_threshold, Fscore
def makepredictions(loadedmodel, dfgood, xdata, ylabels):
prediction_probability = loadedmodel.predict_proba(xdata)
pred_prob = prediction_probability[:,1]
allpredprob_df = pd.DataFrame(data={"labels":ylabels.values, "prob_of_true": pred_prob})
all_data_with_preds = pd.merge(dfgood, allpredprob_df, left_index=True, right_index=True)
pred_data = all_data_with_preds[["sample", "primary_label", "prob_of_true"]]
pred_data["primary_label"] = pred_data["primary_label"].fillna("DRp")
all_data_with_preds = all_data_with_preds.drop(columns=snv_categories[1:]).drop(columns=indel_categories[1:]).drop(columns=cnv_categories[1:])
return pred_prob, pred_data
def least_sub_rank1_model_params(cv_results_path):
rank1_cv_results = pd.read_csv(cv_results_path, sep="\t").query('(rank_test_score < 2)').query('(param_colsample_bylevel > 0.3) and (param_colsample_bynode > 0.3) and (param_colsample_bytree > 0.3) and (param_subsample > 0.3)')
rank1_cv_results["total_subsample"] = rank1_cv_results['param_colsample_bylevel'] * rank1_cv_results['param_colsample_bynode'] * rank1_cv_results['param_colsample_bytree'] * rank1_cv_results['param_subsample']
rank1_cv_results = rank1_cv_results.sort_values(by="total_subsample", ascending=False).head(n=1)
params = rank1_cv_results["params"].iloc[0]
params_dict = eval(params)
return params_dict
def probability_bar_graph(gene_oi, pos_color, neg_color, legend_d, legend_p, all_data_with_preds):
all_prob_table = all_data_with_preds.copy(deep=True)
pos = all_prob_table.query('(primary_label == @gene_oi)').sort_values(f"{gene_oi}_prob_of_true", ascending=False)
pos["color"] = pos_color
neg = all_prob_table.query('(primary_label != @gene_oi)').sort_values(f"{gene_oi}_prob_of_true", ascending=False)
neg["color"] = neg_color
bargraph = pd.concat([pos, neg]).reset_index(drop=True)
def fig_aesthetic(ax, df):
ax.set_ylim(0,1)
ax.set_xlim(df.index[0]-0.5,df.index[-1]+0.5)
ax.grid(b=False, which='both', axis='y', color='0.4', linewidth=0.9, linestyle='dotted', zorder=0)
ax.tick_params(axis='both', which="major", length=3, labelsize=5, pad=1, reset=False)
ax.set_xticks([])
# ax[0].set_ylabel("Signature Weights", fontsize=8, horizontalalignment="center", labelpad=0.5)
ax.set_yticks([0.25, 0.50, 0.75])
ax.set_xlabel("")
ax.set_ylabel("Probability", fontsize=5, horizontalalignment="center", labelpad=0.6)
ax.yaxis.set_label_coords(-0.08, 0.5)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
return ax
fig, ax = plt.subplots(figsize=(3.2,1.5))
ax.bar(x=bargraph.index, height=bargraph[f"{gene_oi}_prob_of_true"], width=0.8, edgecolor=None, linewidth=0, color=bargraph["color"], zorder=10)
ax = fig_aesthetic(ax, bargraph)
handles = []
handles.append(mlines.Line2D([], [], color=pos_color, markeredgecolor=pos_color, marker='s', lw=0, markersize=8, label=legend_d))
handles.append(mlines.Line2D([], [], color=neg_color, markeredgecolor=neg_color, marker='s', lw=0, markersize=8, label=legend_p))
ax.legend(handles=handles,loc='upper left', edgecolor='0.5', frameon=False, ncol=2, fontsize=5, handletextpad=0.001, bbox_to_anchor=(0.45, 0.72), borderpad=0, columnspacing=0.9)
fig.subplots_adjust(left=0.1, right=0.995, top=0.99, bottom=0.03)
return fig, ax
def conf_matrix(df, label, threshold):
table = df.copy(deep=True) #df is all data_with_preds
label = str(label) #label is primary label column
threshold = float(threshold)
prob_column = f"{label}_prob_of_true"
table["TP"] = 0
table.loc[(table['primary_label'] == label) & (table[prob_column] >= threshold), 'TP'] = 1
table["FP"] = 0
table.loc[(table['primary_label'] != label) & (table[prob_column] >= threshold), 'FP'] = 1
table["FN"] = 0
table.loc[(table['primary_label'] == label) & (table[prob_column] <= threshold), 'FN'] = 1
table["TN"] = 0
table.loc[(table['primary_label'] != label) & (table[prob_column] <= threshold), 'TN'] = 1
TP = table["TP"].sum()
FP = table["FP"].sum()
FN = table["FN"].sum()
TN = table["TN"].sum()
return np.array([[TP, FP], [FN, TN]])
def accuracy(TP, TN, FP, FN):
return ((TP+TN)/(TP + TN + FP + FN))
def precision(TP, TN, FP, FN):
return ((TP)/(TP + FP))
def recall(TP, TN, FP, FN):
return ((TP)/(TP + FN))
def plot_matrix(cm_array):
fig, ax = plt.subplots(figsize=(3, 3))
group_names = ['True Pos', 'False Pos', 'False Neg', 'True Neg']
group_counts = cm_array.flatten()
labels = [f"{name}\n{count}" for name, count in zip(group_names,group_counts)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cm_array, annot=labels, annot_kws={"size":8}, fmt='', cmap='Blues', ax=ax)
ax.set_xlabel("Published labels", fontsize=8)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_ylabel("Predicted labels", fontsize=8)
ax.set_xticklabels(["yes", "no"])
ax.set_yticklabels(["yes", "no"])
ax.tick_params(axis = 'both', which="major", length=0, pad=0, labelsize=8, reset=False)
cbar = ax.collections[0].colorbar
# here set the labelsize by 20
cbar.ax.tick_params(labelsize=8)
return fig, ax
### ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#files from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
outputdir = os.path.dirname(__file__)
cv_results_dir = os.path.dirname(__file__)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
df = pd.merge(sample_labels, sigs, how='left', on='sample').query('(cancer == "PC")').reset_index(drop=True)
print('Finished loading data at '+str(datetime.datetime.now()))
all_probabilites_list = []
# color list for bargraphs
color_list = list(sns.color_palette().as_hex())
blue = color_list[0] #drp
orange = color_list[1] #atm
green = color_list[2] #cdk12
red = color_list[3] #brca2
purple = color_list[4] #mmr
# %%
# model BRCA2
# =============================================================================
goi = "BRCA2d"
goi = str(goi)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
df = pd.merge(sample_labels, sigs, how='left', on='sample').query('(cancer == "PC")').reset_index(drop=True)
print('Finished loading data at '+str(datetime.datetime.now()))
print(f"start splitting data for {goi} at {str(datetime.datetime.now())}")
X_data, Y_labels = get_data_and_labels_from_df(df_good, goi)
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_labels, test_size=0.4, random_state=42, stratify=Y_labels)
# model_path = "c:/Users/ElieRitch/Desktop/signatures_aug2021/gridsearch_models6/BRCA2_gridparams_refitmodel.joblib.model.dat"
# modelpath = os.path.expanduser(model_path)
# model = load(modelpath)
# PredProbs, PredData = makepredictions(model, df_good, X_data, Y_labels)
print(f"start making model for {goi} at {str(datetime.datetime.now())}")
max_rounds = 1000000
# cv_grid_path = f"{cv_results_dir}/{goi}_cv_results.tsv"
# best_params_ = least_sub_rank1_model_params(cv_grid_path)
best_params_ = {'colsample_bylevel': 0.3, 'colsample_bynode': 0.3, 'colsample_bytree': 0.3, 'eta': 0.001, 'max_depth': 3, 'seed': 32, 'subsample': 0.4}
fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train = model_with_params(X_train, Y_train, X_test, Y_test, best_params_, max_rounds)
test_df = pd.DataFrame(data={"labels":Y_test.values, "prob_of_true": prediction_prob_of_true_test, "pred_binary":prediction_binary_test})
test_df.index = Y_test.index
train_df = pd.DataFrame(data={"labels":Y_train.values, "prob_of_true": prediction_prob_of_true_train, "pred_binary":prediction_binary_train})
train_df.index = Y_train.index
all_preds_df = pd.concat([test_df, train_df])
all_data_with_preds = pd.merge(df_good, all_preds_df, left_index=True, right_index=True)
all_data_with_preds = all_data_with_preds.drop(columns=snv_categories[1:]).drop(columns=indel_categories[1:]).drop(columns=cnv_categories[1:])
all_data_with_preds = all_data_with_preds.drop(columns="labels").rename(columns={"prob_of_true": goi+"_prob_of_true", "pred_binary": goi+"_pred_binary"})
all_probabilites_list.append(all_data_with_preds)
all_data_with_preds.to_csv(outputdir+"/"+goi+"_predictions.tsv",sep='\t', index=False)
fitted_model.save_model(fr"{model_output_dir}\{goi}.xgb_py37_xgboost_ml.model.txt")
all_data = pd.concat([Y_test, Y_train])
all_prob_of_true = np.concatenate([prediction_prob_of_true_test, prediction_prob_of_true_train])
print(f"finished making model for {goi} at {str(datetime.datetime.now())}")
#####ROC for all data and for test ##############
print(f"start graphing model for {goi} at {str(datetime.datetime.now())}")
fig, ax = my_roc(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_ROC.png", dpi=500)
plt.close()
fig, ax = my_roc(Y_test, prediction_prob_of_true_test)
plt.savefig(outputdir+"/"+goi+"_test_ROC.png", dpi=500)
plt.close()
fig, ax = precision_recall(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_PreRec.png", dpi=500)
fig, ax = precision_recall(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_PreRec.png", dpi=500)
plt.close()
# plt.savefig(outputdir+"/"+goi+"_PreRec.pdf", dpi=500)
plt.close()
fig, ax, best_threshold, Fscore = plot_precision_recall_vs_threshold(all_data, all_prob_of_true)
plt.savefig(outputdir+"/"+goi+"_PreRec_vs_Thresh.png", dpi=500)
# plt.savefig(outputdir+"/"+goi+"_PreRec_vs_Thresh.pdf", dpi=500)
plt.close()
print(f"start graphing shap for {goi} at {str(datetime.datetime.now())}")
fig, ax = shapely_values(fitted_model, X_data, 15)
ax.set_xticks([-0.5, 0,0.5,1])
plt.savefig(outputdir+"/"+goi+"_shap15.png", dpi=500)
# plt.savefig(outputdir+"/"+goi+"_shap15.pdf", dpi=500)
plt.close()
print(f"start graphing bars for {goi} at {str(datetime.datetime.now())}")
fig, ax = probability_bar_graph(goi, red, blue, f"{goi}d", f"{goi}p", all_data_with_preds)
plt.savefig(f"{outputdir}/{goi}_prob_of_class.png", dpi=500, transparent=False, facecolor="w")
plt.close()
print(f"finished graphing model for {goi} at {str(datetime.datetime.now())}")
print(f"Confusion metric and graph for {goi} at {str(datetime.datetime.now())}")
confusion_matrix = conf_matrix(all_data_with_preds, goi, best_threshold)
TruePos = confusion_matrix.flatten()[0]
FalsePos = confusion_matrix.flatten()[1]
FalseNeg = confusion_matrix.flatten()[2]
TrueNeg = confusion_matrix.flatten()[3]
accuracy_of_model = accuracy(TruePos, TrueNeg, FalsePos, FalseNeg)
precision_of_model = precision(TruePos, TrueNeg, FalsePos, FalseNeg)
recall_of_model = recall(TruePos, TrueNeg, FalsePos, FalseNeg)
print(confusion_matrix)
print(f"{goi} model accuracy = {accuracy_of_model}")
print(f"{goi} model precision = {precision_of_model}")
print(f"{goi} model recall = {recall_of_model}")
fig, ax = plot_matrix(confusion_matrix)
plt.savefig(f"{outputdir}/{goi}_confusion_matrix.png", dpi=500, transparent=False, facecolor="w")
plt.close()
# %%
# model CDK12
# =============================================================================
goi = "CDK12"
goi = str(goi)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = | pd.read_csv(cohort_data, sep='\t', low_memory=False) | pandas.read_csv |
# coding=utf-8
import os
import os.path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from loganalysis.const import *
class Log(object):
''' 调度模块Log分析接口类
主要提供如下3类功能:
a) 信息呈现
b)问题发现
c)问题定位
要求所有文件命名符合EI命名格式:子系统_时间.csv
'''
def __init__(self, directory, time_interval=None, product_type='Micro'):
'''初始化Log实例,把所有Log按照类型分类
Args:
directory: Log所在目录
time_interval: 时间范围[start, end],格式为yyyymmddhhmmss
product_type:产品类型['Macro', 'Micro'],默认为micro
'''
self._directory = directory
self._product_type = product_type
self._logfiles={}
self._time_interval = time_interval
@property
def product_type(self):
return self._product_type
@property
def directory(self):
return self._directory
def _filenames_of_type(self, filetype):
'''获取指定文件类型的所有文件名
Args:
filetype:文件类型
time_interval: 时间范围[start, end],格式为yyyymmddhhmmss
Returns:
文件名列表
'''
names_of_filetype = []
for name in np.sort(os.listdir(self._directory)):
if not name.endswith(r'.csv'):
continue
if -1 == name.find(filetype):
continue
if self._time_interval:
time = np.uint64(name.rsplit(r'.')[0].rsplit(r'_')[-1])
if time < self._time_interval[0] or time > self._time_interval[1]:
continue
names_of_filetype.append(name)
return names_of_filetype
def describle(self):
'''当前目录下相关Log文件总体描述,每类Log文件合并为一个文件
输出文件名,大小,行数,时间范围,airtime范围等,每个Log文件一列
'''
df = pd.DataFrame()
for type, logfile in self._logfiles.items():
df.at[type, 'size'] = logfile.size
df.at[type, 'num_of_files'] = len(logfile.files)
df.at[type, 'num_of_lines'] = logfile.lines
df.at[type, 'pctime_start'] = logfile.pctimes[0]
df.at[type, 'pctime_end'] = logfile.pctimes[1]
df.at[type, 'airtime_start'] = logfile.airtimes[0]
df.at[type, 'airtime_end'] = logfile.airtimes[1]
df.index.name = 'filename'
return df
class LogFile(object):
'''Log文件接口类'''
def __init__(self, type, directory, files, id_filter=None):
'''初始化Log实例,把所有Log按照类型分类
Args:
file: 文件名
type: log类型
'''
self._files = files
self._type = type
self._directory = directory
self._id_filter = id_filter
self._time_filter = None
self._size = sum([os.path.getsize(os.path.join(directory, file)) for file in files])
self._pctimes = [-1, -1]
self._airtimes = [-1, -1]
self._lines = 0
cols = ['LocalTime', 'AirTime']
for data in self.gen_of_cols(cols):
if len(data.index) == 0:
self._lines = 0
return
self._lines = self._lines + data.index.max()
if self._pctimes[0] == -1:
self._pctimes[0] = data.iat[0, 0]
self._pctimes[1] = data.iat[-1, 0]
if self._airtimes[0] == -1:
self._airtimes[0] = data.iat[0, 1]
self._airtimes[1] = data.iat[-1, 1]
@property
def type(self):
return self._type
@property
def files(self):
return self._files
@property
def size(self):
return self._size
@property
def id_filter(self):
return self._id_filter
@property
def lines(self):
'''获取文件总行数'''
return self._lines
@property
def pctimes(self):
'''PC时间范围'''
return tuple(self._pctimes)
@property
def airtimes(self):
'''AirTime时间范围'''
return tuple(self._airtimes)
@staticmethod
def addtime(time1, time2):
time1 = np.uint32(time1)
time2 = np.uint32(time2)
frm = time1 // 16 + time2 // 16
subfrm = time1 % 16 + time2 % 16
if subfrm >= 10:
subfrm -= 10
frm += 1
return frm % 0x10000000 * 16 + subfrm
@staticmethod
def difftime(time1, time2):
time1 = np.uint32(time1)
time2 = np.uint32(time2)
subfrm1 = time1 % 16
subfrm2 = time2 % 16
frm = time1 // 16 + 0x10000000 - time2 // 16
if subfrm1 >= subfrm2:
subfrm = subfrm1 - subfrm2
else:
subfrm = subfrm1 + 10 - subfrm2
frm = frm - 1
frm = frm % 0x10000000
return frm * 16 + subfrm
@staticmethod
def dectime(hextime):
hextime = np.uint32(hextime)
return hextime // 16 * 10 + hextime % 16
@staticmethod
def hextime(dectime):
dectime = np.uint32(dectime)
return dectime // 10 * 16 + dectime % 10
def gen_of_cols(self, cols=None, val_filter=None):
'''获取指定列的生成器
Args:
cols: 列名列表,如果为None,表示获取全部列
col_val_filter: 过滤条件,字典格式{'colname': [val1,]}
Yields:
生成器格式
'''
filters = {}
if val_filter:
filters.update(val_filter)
if self._id_filter:
filters.update(self._id_filter)
totcols = cols
aircol = 'AirTime'
if self._time_filter and aircol not in totcols :
totcols.append(aircol)
if cols is not None:
totcols = list(set.union(set(filters), set(cols)))
for file in self._files:
filename = os.path.join(self._directory, file)
data = pd.read_csv(filename, na_values='-', usecols=totcols)
if self._time_filter:
start, end = self._time_filter
data = data[(start<= data[aircol]) & (data[aircol]<=end)]
if not filters:
yield data
continue
mask = data[list(filters.keys())].isin(filters).all(1)
if cols is not None:
yield data[mask][cols]
else:
yield data[mask]
def get_filename_by_airtime(self, airtime):
'''根据指定时间获取文件名
Args:
airtime:
Returns:
文件名
'''
col = ['AirTime']
for file in self._files:
filename = os.path.join(self._directory, file)
data = | pd.read_csv(filename, na_values='-', usecols=col) | pandas.read_csv |
"""
This script is interesting but has abug that needs fixing.
"""
import seaborn as sns
from tqdm import tqdm
import matplotlib.pyplot as plt
import ast
from sklearn.model_selection import StratifiedKFold
import os
import warnings
from datetime import datetime
from collections import Counter
import gc
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional
from itertools import chain
from functools import partial
from ast import literal_eval
import torch.nn as f
import torch
from sklearn.metrics import precision_recall_fscore_support
# import plotly.express as px
# import plotly.offline as pyo
# pyo.init_notebook_mode()
import pandas as pd
import numpy as np
# from datasets import load_dataset, Dataset
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
logging,
)
from transformers.modeling_outputs import TokenClassifierOutput
# environment
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
warnings.filterwarnings("ignore")
logging.set_verbosity(logging.WARNING)
TOKENIZERS_PARALLELISM = True
# data
df = pd.read_csv("data/features.csv")
print(df.info())
print(df.head())
notes = pd.read_csv('data/patient_notes.csv')
print(notes.info())
print(notes.head(3))
train = | pd.read_csv("data/train.csv") | pandas.read_csv |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
X = np.linspace(-1.2, 2, n_samples)
epsilon = np.random.normal(0, noise, n_samples)
polynom = lambda x: (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
noiseless_y = polynom(X)
y = noiseless_y + epsilon
train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), | pd.Series(y) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/" + \
"csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_{}_global.csv"
deaths = pd.read_csv(url.format('deaths'), index_col=1)
cases = pd.read_csv(url.format('confirmed'), index_col=1)
def get_country_data(country):
c = cases.loc[country]
if c.ndim > 1:
c = c.sum()
c = c.iloc[3:]
c.index= | pd.to_datetime(c.index, errors="coerce", format="%m/%d/%y") | pandas.to_datetime |
import streamlit as st
import pandas as pd
from utils import *
from modules import *
import os
import numpy as np
import altair as alt
import plotly.graph_objects as go
absolute_path = os.path.abspath(__file__)
path = os.path.dirname(absolute_path)
ipl_ball = pd.read_csv(path+'/2008_2021_updated_ball.csv')
ipl_match = pd.read_csv(path+'/2008_2021_data_matches.csv')
season_list = ['2007/08','2009','2009/10','2011','2012','2013','2014','2015','2016','2017','2018','2019','2020/21','2021']
season_dict = {2008:'2007/08',2009:'2009',2010:'2009/10',2011:'2011',2012:'2012',2013:'2013',2014:'2014',2015:'2015',2016:'2016',2017:'2017',2018:'2018',2019:'2019',2020:'2020/21',2021:'2021'}
team_dict = { 'Delhi Capitals':'Delhi Daredevils' , 'Punjab Kings':'Kings XI Punjab' }
GRID = True
WIDTH = 0
def formatt(df):
temp = []
for i in df.columns:
if i in ['SR', 'Avg', 'Eco','Win Percent']:
temp.append(i)
return df.style.format(subset=temp, formatter="{:.2f}")
def player_career():
st.title('Player Career')
player = st.sidebar.selectbox('Player', get_player_name(ipl_ball))
bat = pd.DataFrame(get_run( ipl_ball, [player] ))
bat = bat.drop(['batsman'], axis = 1)
bat['M'] = 'IPL'
bat = bat.set_index('M')
bowl = pd.DataFrame(get_wicket( ipl_ball, [player] ))
bowl = bowl.drop(['bowler'], axis = 1)
bowl['M'] = 'IPL'
bowl = bowl.set_index('M')
st.subheader('Batting Career')
bat['Runs'] = bat.apply(lambda x: "{:,}".format(x['Runs']), axis=1)
st.table(formatt(bat))
st.subheader('Bowling Career')
st.table(formatt(bowl))
result = pd.DataFrame()
for i in season_list:
match = ipl_match[ipl_match['season'] == i]
id = list(match['id'].unique())
ball = ipl_ball[ipl_ball['id'].isin(id)]
temp = get_run(ball, batsman = [player], choice = ['Innings','Runs','HS'])
temp['year'] = i
result = pd.concat([result,pd.DataFrame(temp)])
st.subheader('Yearly Performance')
result = result.drop(['batsman'], axis = 1)
c = alt.Chart(result).mark_trail().encode(
x='year:T',
y='Runs:Q',
size = 'Runs:Q',
tooltip=['Runs:Q']
).configure_axis(
grid= GRID
).configure_view(
strokeWidth= WIDTH
).interactive()
st.altair_chart(c, use_container_width=True)
result_bat = result.set_index('year')
result = pd.DataFrame()
for i in season_list:
match = ipl_match[ipl_match['season'] == i]
id = list(match['id'].unique())
ball = ipl_ball[ipl_ball['id'].isin(id)]
temp = get_wicket(ball, bowler= [player], choice = ['Innings','Wickets','BBI'])
temp['year'] = i
result = pd.concat([result,pd.DataFrame(temp)])
result = result.drop(['bowler'], axis = 1)
c = alt.Chart(result).mark_trail().encode(
x='year:T',
y='Wickets:Q',
size = 'Wickets:Q',
tooltip = ['Wickets:Q'],
color=alt.value("#FFAA00")
).configure_axis(
grid= GRID
).configure_view(
strokeWidth= WIDTH
).interactive()
st.altair_chart(c, use_container_width=True)
result_bowl = result.set_index('year')
result = pd.merge(result_bat, result_bowl, how = 'outer', left_on = ['year'], right_on = ['year'])
result = result[ ~ ((result['Innings_x'] == 0) & (result['Innings_y'] == 0))]
result = result.rename(columns = {'Innings_x':'Innings Bat' ,'Innings_y':'Innings Bowl' })
st.table(formatt(result))
def sesonal_stat():
st.title('Sesonal Stats')
result = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
import os
import joblib
import json, codecs
import numpy as np
from sklearn.cross_decomposition import PLSRegression
from datetime import date
import Classes.Configurations as cfg
from Classes import Configurations
def import_excel_data():
try:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Import data (Excel file)", "Choose your matrix of inputs.")
file_path = filedialog.askopenfilename()
file_name = os.path.basename(file_path)
labels = pd.read_excel(file_path, 0)
label_df = pd.DataFrame(labels)
xdata = pd.read_excel(file_path, 1)
x_df = pd.DataFrame(xdata)
ydata = pd.read_excel(file_path, 2)
y_df = pd.DataFrame(ydata)
return label_df, x_df, y_df, file_name
except:
print("An exception occurred while importing excel file.")
def import_excel_data_single():
try:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Import data (Excel file)", "Choose your matrix of inputs.")
file_path = filedialog.askopenfilename()
file_name = os.path.basename(file_path)
data = pd.read_excel(file_path, 0)
data_df = pd.DataFrame(data)
return data_df
except:
print("An exception occurred while importing excel file.")
def import_excel_data_epo():
try:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Import data (Excel file)", "Choose your matrix of inputs.")
file_path = filedialog.askopenfilename()
file_name = os.path.basename(file_path)
labels = pd.read_excel(file_path, 0)
label_df = pd.DataFrame(labels)
xdatau = pd.read_excel(file_path, 1)
x_df_u = pd.DataFrame(xdatau)
xdatas = pd.read_excel(file_path, 2)
x_df_s = pd.DataFrame(xdatas)
ydata = pd.read_excel(file_path, 3)
y = pd.DataFrame(ydata)
return label_df, x_df_u, x_df_s, y, file_name
except:
print("An exception occurred while importing excel file.")
def data_to_excel(file_name, all_df, outliers_df):
path = os.path.expanduser("~/Desktop") + '/foodscienceml' + '/' + file_name.replace('.xlsx', '').replace('.xls', '')
if not os.path.exists(path):
os.makedirs(path)
writer = pd.ExcelWriter(path + '\Mahalanobis_report.xlsx')
all_data = pd.DataFrame(all_df)
all_data.to_excel(writer, 'Selected data')
stats_all = pd.DataFrame(all_data.describe())
stats_all.to_excel(writer, 'Stats selected data')
outliers_data = | pd.DataFrame(outliers_df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
| tm.assert_index_equal(res2, expected) | pandas.util.testing.assert_index_equal |
import streamlit as st
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from datetime import datetime
import requests
class DataFetcher:
def __init__(self):
self.url_brazil_general = 'https://covid19-brazil-api.now.sh/api/report/v1/brazil/'
self.url_brazil_states = 'https://covid19-brazil-api.now.sh/api/report/v1'
self.url_world_cases = 'https://pomber.github.io/covid19/timeseries.json'
self.brazil_general_json = requests.get(self.url_brazil_general).json()
self.brazil_states_json = requests.get(self.url_brazil_states).json()
self.world_cases_json = requests.get(self.url_world_cases).json()
def get_apis_status_code(self):
brazil_general = requests.get(self.url_brazil_general).status_code
brazil_states = requests.get(self.url_brazil_states).status_code
world_cases = requests.get(self.url_world_cases).status_code
return brazil_general, brazil_states, world_cases
def get_main_counters(self):
brazil_counters = self.brazil_general_json
confirmed = brazil_counters['data']['confirmed']
deaths = brazil_counters['data']['deaths']
recovered = brazil_counters['data']['recovered']
return confirmed, deaths, recovered
def get_update_time(self):
update_time = self.brazil_general_json['data']['updated_at']
update_time_brazil = pd.to_datetime(update_time) - pd.Timedelta(hours=3)
date = str(update_time_brazil.day) + '/' + str(update_time_brazil.month) + '/' + str(update_time_brazil.year)
time = str(update_time_brazil.hour) + 'hrs'
return date, time
def get_cases_timeline(self):
dates = []
confirmed = []
deaths = []
for day in self.world_cases_json['Brazil']:
dates.append(day['date'])
confirmed.append(day['confirmed'])
deaths.append(day['deaths'])
cases_df = pd.DataFrame({'date': dates, 'confirmed': confirmed, 'deaths': deaths})
cases_df['date'] = pd.to_datetime(cases_df['date'])
cases_df = cases_df[cases_df['date'] >= pd.to_datetime('2020-02-15')]
cases_df['daily'] = cases_df['confirmed'] - cases_df['confirmed'].shift(1)
return cases_df
def get_state_cases(self):
state_name = []
states_sigla = []
cases = []
deaths = []
for state in self.brazil_states_json['data']:
state_name.append(state['state'])
states_sigla.append(state['uf'])
cases.append(state['cases'])
deaths.append(state['deaths'])
states_table = pd.DataFrame({'Estado': state_name, 'Casos Confirmados': cases, 'Mortes': deaths})
states_table['Letalidade'] = np.round((states_table['Mortes'] / states_table['Casos Confirmados'])*100, 2)
states_table['Letalidade'] = states_table['Letalidade'].map(lambda x: str(x) + '%')
siglas_df = pd.DataFrame({'uf': states_sigla, 'cases': cases})
return states_table, siglas_df
def get_states_cases_plot(self):
coord_dict = {
'AC': [ -8.77, -70.55]
, 'AL': [ -9.71, -35.73]
, 'AM': [ -3.07, -61.66]
, 'AP': [ 1.41, -51.77]
, 'BA': [-12.96, -38.51]
, 'CE': [ -3.71, -38.54]
, 'DF': [-15.83, -47.86]
, 'ES': [-19.19, -40.34]
, 'GO': [-16.64, -49.31]
, 'MA': [ -2.55, -44.30]
, 'MT': [-12.64, -55.42]
, 'MS': [-20.51, -54.54]
, 'MG': [-18.10, -44.38]
, 'PA': [ -5.53, -52.29]
, 'PB': [ -7.06, -35.55]
, 'PR': [-24.89, -51.55]
, 'PE': [ -8.28, -35.07]
, 'PI': [ -8.28, -43.68]
, 'RJ': [-22.84, -43.15]
, 'RN': [ -5.22, -36.52]
, 'RO': [-11.22, -62.80]
, 'RS': [-30.01, -51.22]
, 'RR': [ 1.89, -61.22]
, 'SC': [-27.33, -49.44]
, 'SE': [-10.90, -37.07]
, 'SP': [-23.55, -46.64]
, 'TO': [-10.25, -48.25]
}
_, siglas_df = self.get_state_cases()
list_states = [state for state in coord_dict.keys()]
lat_coords = [coord[0] for coord in coord_dict.values()]
long_coords = [coord[1] for coord in coord_dict.values()]
coord_df = | pd.DataFrame({'state': list_states, 'lat': lat_coords, 'long': long_coords}) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=6)
r = df.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c2': 'category'})
result = r.execute().fetch()
expected = raw.astype({'c2': 'category'})
pd.testing.assert_frame_equal(expected, result)
# test series category
raw = pd.Series(np.random.choice(['a', 'b', 'c'], size=(10,)))
series = from_pandas_series(raw, chunk_size=4)
result = series.astype('category').execute().fetch()
expected = raw.astype('category')
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=3)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b']), copy=False).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b']), copy=False)
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=6)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b', 'd'])).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b', 'd']))
pd.testing.assert_series_equal(expected, result)
def test_drop(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
columns = ['c2', 'c4', 'c5', 'c6']
index = [3, 6, 7]
r = df.drop(columns=columns, index=index)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(columns=columns, index=index))
idx_series = from_pandas_series(pd.Series(index))
r = df.drop(idx_series)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(pd.Series(index)))
df.drop(columns, axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns, axis=1))
del df['c3']
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3'], axis=1))
ps = df.pop('c8')
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3', 'c8'], axis=1))
pd.testing.assert_series_equal(ps.execute().fetch(),
raw['c8'])
# test series drop
raw = pd.Series(rs.randint(1000, size=(20,)))
series = from_pandas_series(raw, chunk_size=3)
r = series.drop(index=index)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.drop(index=index))
# test index drop
ser = pd.Series(range(20))
rs.shuffle(ser)
raw = pd.Index(ser)
idx = from_pandas_index(raw)
r = idx.drop(index)
pd.testing.assert_index_equal(r.execute().fetch(),
raw.drop(index))
def test_melt(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
r = df.melt(id_vars=['c1'], value_vars=['c2', 'c4'])
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values(['c1', 'variable']).reset_index(drop=True),
raw.melt(id_vars=['c1'], value_vars=['c2', 'c4']).sort_values(['c1', 'variable']).reset_index(drop=True)
)
def test_drop_duplicates(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
for ignore_index in [True, False]:
try:
r = df.drop_duplicates(method=method, subset=subset,
keep=keep, ignore_index=ignore_index)
result = r.execute().fetch()
try:
expected = raw.drop_duplicates(subset=subset,
keep=keep, ignore_index=ignore_index)
except TypeError:
# ignore_index is supported in pandas 1.0
expected = raw.drop_duplicates(subset=subset,
keep=keep)
if ignore_index:
expected.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}, ignore_index={ignore_index}') from e
# test series and index
s = raw['c3']
ind = pd.Index(s)
for tp, obj in [('series', s), ('index', ind)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.drop_duplicates(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.drop_duplicates(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
# test inplace
series = from_pandas_series(s, chunk_size=11)
series.drop_duplicates(inplace=True)
result = series.execute().fetch()
expected = s.drop_duplicates()
pd.testing.assert_series_equal(result, expected)
def test_duplicated(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
try:
r = df.duplicated(method=method, subset=subset, keep=keep)
result = r.execute().fetch()
expected = raw.duplicated(subset=subset, keep=keep)
pd.testing.assert_series_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}') from e
# test series
s = raw['c3']
for tp, obj in [('series', s)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.duplicated(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.duplicated(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
def test_memory_usage_execution(setup):
dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
data = dict([(t, np.ones(shape=500).astype(t))
for t in dtypes])
raw = pd.DataFrame(data)
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.DataFrame(data, index=np.arange(500).astype('object'))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.Series(np.ones(shape=500).astype('object'), name='s')
series = from_pandas_series(raw)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=False)
assert r.execute().fetch() == raw.memory_usage(index=False)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Series(np.ones(shape=500).astype('object'),
index=np.arange(500).astype('object'), name='s')
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Index(np.arange(500), name='s')
index = from_pandas_index(raw)
r = index.memory_usage()
assert r.execute().fetch() == raw.memory_usage()
index = from_pandas_index(raw, chunk_size=100)
r = index.memory_usage()
assert r.execute().fetch() == raw.memory_usage()
def test_select_dtypes_execution(setup):
raw = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(10, size=10)})
df = from_pandas_df(raw, chunk_size=5)
r = df.select_dtypes(include=['float64'])
result = r.execute().fetch()
expected = raw.select_dtypes(include=['float64'])
pd.testing.assert_frame_equal(result, expected)
def test_map_chunk_execution(setup):
raw = pd.DataFrame(np.random.rand(10, 5),
columns=[f'col{i}' for i in range(5)])
df = from_pandas_df(raw, chunk_size=(5, 3))
def f1(pdf):
return pdf + 1
r = df.map_chunk(f1)
result = r.execute().fetch()
expected = raw + 1
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import sys
import uncertainty_rfr
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import pandas.api.types as ptypes
sys.path.append("../")
df_test = pd.read_csv('./xiaofeng_lasso/unittest_dummy.csv', nrows=5)
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test, d_start=5,
o=0)
def test_uncertainty_rfr_qfr():
'''
Test function for uncertainty_rfr_qfr. Checks values in actual are 0 when
true_y = False, and that the output df has the correct number of rows.
'''
df_test = pd.read_csv('./xiaofeng_lasso/unittest_dummy.csv')
X = df_test.iloc[range(3)]
err_df_test = \
uncertainty_rfr.uncertainty_rfr_qfr(df_test, X[X.columns[5:]],
Y='none', true_y=False, o=0,
d_start=5)
assert err_df_test['actual'][0] == err_df_test['actual'][1], \
'with true_y = False, all values in "actual" should be equal (0.0)'
assert len(err_df_test) == len(X), \
'length of predicting df should equal length of output df'
def test_descriptors_outputs():
'''
Test function for descriptors_outputs. Checks the shape of X, and checks
that the correct type of value (numeric) is in the columns.
'''
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test, d_start=5,
o=0)
assert X_test.shape[1] == 5, \
'array shape is incorrect. should be ({}, 7), got ({}, {})'\
.format(X_test.shape[0], X_test.shape[0], X_test.shape[1])
assert all(ptypes.is_numeric_dtype(X_test[col]) for col in
list(X_test[X_test.columns[:]])), \
'data type in columns is of incorrect type, must be numeric'
assert ptypes.is_numeric_dtype(y_test), \
'data type in columns is of incorrect type, must be numeric'
def test_traintest():
'''
Test function for traintest. Checks that the length of X_train and
y_train are the same.
'''
train_idx_test = np.array([0, 1, 2])
test_idx_test = np.array([3, 4])
X_train_test, y_train_test = \
uncertainty_rfr.traintest(X_test, y_test, train_idx_test,
test_idx_test)
assert X_train_test.shape[0] == y_train_test.shape[0], \
'X_train and y_train datapoints do not have the same num of values'
def test_predict_append():
'''
Test function for predict_append. Checks that the func appends one value
at a time, and that the output is a list.
'''
df_test2 = df_test[df_test.columns[:7]]
X_test, y_test = uncertainty_rfr.descriptors_outputs(df_test2, d_start=5,
o=0)
clf_test = RandomForestRegressor(random_state=130)
clf_test.fit(X_test, y_test)
N_arr_test = np.array([[3.98069889, 0.38048415],
[-0.78001682, 0.20058657]])
n_test = 0
preds_test = []
preds_test = uncertainty_rfr.predict_append(clf_test, N_arr_test, n_test,
preds_test)
assert len(preds_test) == 1, \
'preds_test needs to be length 1. Got {}'.format(len(preds_test))
assert isinstance(preds_test, list), \
'preds_test needs to be a list, got {}'.format(type(preds_test))
def test_dft_points():
'''
Test functino for dft_points. Checks that when true_y = True, the output
array is equal to Y_test, adn when true_y = False the output arry is the
same length as N_arr_test.
'''
Y_test = [3, 5]
N_arr_test = np.array([[3.98069889, 0.38048415],
[-0.78001682, 0.20058657]])
Y_arr_test = uncertainty_rfr.dft_points(True, Y_test, N_arr_test)
Y_arr_test2 = uncertainty_rfr.dft_points(False, Y_test, N_arr_test)
assert Y_arr_test[0] == Y_test[0], \
'Y_arr_test got unexpected result. Expected np.array([3,5]), got{}'.\
format(Y_arr_test)
assert len(Y_arr_test2) == N_arr_test.shape[0], \
'length of Y_arr_test2 should be equal to the number of rows of \
N_arr_test. Got Y_arr: {}, N_arr {}'.\
format(len(Y_arr_test2), N_arr_test.shape[0])
def test_uncert_table():
'''
Test function for uncert_table. Checks that the columns in the df are in
the correct place, the length of the output dataframe the correct
length, and that the last three columns in the output df are numeric.
'''
N_test = df_test[df_test.columns[5:]].iloc[[0, 1]]
X = df_test.iloc[[0, 1]]
Y_arr_test = np.array([3, 5])
pred_desc_test = pd.DataFrame(data={'mean': [1, 2], 'std': [3, 4]}).T
err_df = uncertainty_rfr.uncert_table(N_test, X, 1, 2, 3, 4,
Y_arr_test, pred_desc_test)
assert err_df.columns[0] == 'Type', \
'first column got unexpected value {}, should be Type'.\
format(err_df.columns[0])
assert len(err_df) == len(X), \
'arrays must all be the same length'
assert all(ptypes.is_numeric_dtype(err_df[col]) for col in
list(err_df[err_df.columns[4:]])), \
'columns "true val", "mean", and "std" are of wrong type, should be\
numeric values.'
def test_uncertainty_rfr_cv():
'''
Test function for undertainty_rfr_cv. Checks that the prediction df has
as many rows as folds in cv. In the output df it checks that "true val"
values are 0 when true_y = False, and checks that values in "AB" are of
type string.
'''
X = df_test.iloc[[0, 1]]
Y = 'none'
d_start, x_start = 5, 5
o = 0
folds_test = 2
pred_df_test, err_df_test = \
uncertainty_rfr.uncertainty_rfr_cv(df_test, X, Y, o, d_start, x_start,
folds=folds_test)
assert pred_df_test.shape[0] == folds_test, \
'Number of row in pred_df_test array should equal number of folds, \
expected {}, got {}'.format(folds_test, pred_df_test.shape[0])
assert err_df_test[err_df_test.columns[4]][0] == 0.0, \
'Expected 0.0 in "true val" with true_y set to false, instead got a \
different val'
assert isinstance(err_df_test['AB'][1], str), \
'Expected string in column "AB", got {}'.format(type(
err_df_test['AB'][1]))
def test_largest_uncertainty():
'''
test function for largest_uncertainty. checks that that length of the
df is equal to the num of values it was asked to return, and that the
output idx are a list.
'''
df = | pd.DataFrame(data={'err_int': [1, 2, 3], 'std_dev': [4, 5, 6]}) | pandas.DataFrame |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
# df_all = pd.read_csv("/tmp/pycharm_project_723/new data sum info surg and Hosp numeric values.csv")
# # print(df_all.columns.tolist())
# # print (df_all.count())
# # print (df_all['Mortalty'].isnull().sum())
# # print (df_all['Mortalty'].value_counts())
# def refactor_categorical_values_to_numeric_values(df, col_names):
# # df = df.filter(col_names, axis=1)
# for col in col_names:
# try:
# df = df.replace({col: {False: 0, True: 1}})
# df = df.replace({col: {"No": 0, "Yes": 1}})
# df = df.replace({col: {"Male": 0, "Female": 1}})
# df = df.replace({col: {"Elective": 0, "Urgent": 1}})
# df = df.replace({col: {"Non-Hispanic": 0, "Hispanic": 1}})
# df = df.replace({col: {"Previous Non-CAB": 0, "Previous CAB": 1}})
# df = df.replace({col: {"None/Trivial/Trace/Mild": 0, "Moderate/Severe": 1}})
# df = df.replace({col: {"Unknown": 1, "Alive": 1, "Dead": 0}})
# df = df.replace({col: {"First cardiovascular surgery": 0, "NA - Not a cardiovascular surgery": 0,
# "First re-op cardiovascular surgery": 0, "Second re-op cardiovascular surgery": 1,
# "Third re-op cardiovascular surgery": 1,
# "Fourth or more re-op cardiovascular surgery": 1}})
# df = df.replace({col: {"Never smoker": 0, "Smoker": 1}})
# df = df.replace({col: {"I/II": 0, "III/IV": 1}})
# df = df.replace({col: {"None": 0, "One": 1, "Two": 2, "Three": 3}})
# except:
# x = "none"
# print(df.shape)
# df.to_csv("/tmp/pycharm_project_723/new data sum info surg and Hosp numeric values.csv")
#
df_all = df_all.replace({'STSRCHOSPD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
df_all = df_all.replace({'Mortality':{False:0, True:1}})
df_all = df_all.replace({'STSRCMM':{False:0, True:1}})
print (df_all['STSRCMM'].unique())
print (df_all['STSRCMM'].isna().sum())
df_all[:50].to_csv("all 50.csv")# def intersection(lst1, lst2):
# return list(set(lst1) & set(lst2))
#
#
# # list_vals = [ "Reoperation", "BMI", "Age", "Gender", "RaceCaucasian", "RaceBlack", "Ethnicity",
# # "RaceOther", "FHCAD", "Diabetes", "InsulinDiab", "Dyslip", "Dialysis", "Hypertn", "InfEndo",
# # "SmokingStatus", "ChrLungD", "ModSevereLungDis", "ImmSupp", "PVD", "DualAntiPlat", 'RenFail',
# # "CreatLst", 'PreCVAorTIAorCVD', "POCPCI", "PrevMI", "Angina", "UnstableAngina", "HeartFail",
# # "ClassNYHGroup", "Arrhythmia", "ArrhythAtrFibFlutter", "ArrhythOther", "MedACEI", "MedBeta",
# # "MedNitIV", "MedASA", "MedAntiplateltNoASA", "AntiCoag", "MedInotr", "MedSter", "HDEF", "EF<=35%",
# # "NumDisV", 'NumDisV_ordinal', "LeftMain", "VDInsufA", "VDStenA", "VDInsufM", "VDStenM", "VDInsufT",
# # "VDStenT", "Status", 'MedHeparin', 'Mortality', 'PrCVInt']
# # # list_val = ['PrCVInt']
# #
# #
# # # print (intersection(list2,list_vals))
# # test = df_all[:50]
# # refactor_categorical_values_to_numeric_values(test, list_vals)
# # test.rename(columns={"EF<=35%": "EF_less_equal_35"}, inplace=True)
# list2 = [ 'STSRCHOSPD', 'STSRCOM', 'STSRCDSWI', 'STSRCMM', 'STSRCPermStroke', 'STSRCProlvent', 'STSRcRenFail', 'STSRCreop',
# 'PLOS', 'PredMort', 'PredDeep', 'PredReop', 'PredStro', 'PredVent', 'PredRenF', 'PredMM', 'Pred6D', 'Pred14D'
# 'Age', 'Gender', 'RaceCaucasian', 'RaceBlack', 'RaceOther', 'Ethnicity', 'FHCAD', 'Diabetes', 'Hypertn',
# 'Dyslip', 'Dialysis', 'InfEndo', 'ChrLungD', 'ImmSupp', 'PVD', 'CreatLst', 'PrevMI', 'Arrhythmia', 'PrCVInt', 'prcab',
# 'prvalve', 'POCPCI', 'ProthCar', 'MedACEI', 'MedASA', 'MedBeta', 'MedInotr', 'MedNitIV', 'MedSter', 'NumDisV', 'HDEF',
# 'VDInsufA', 'VDStenA', 'VDInsufM', 'VDStenM', 'VDInsufT', 'VDStenT', 'Status', 'PerfusTm', 'XClampTm', 'DistVein', 'NumIMADA',
# 'NumRadDA', 'IABP', 'VentHrsTot', 'Complics', 'COpReBld', 'CPVntLng', 'CRenFail', 'HeartFail', 'Incidenc', 'Reoperation',
# 'SmokingStatus', 'InsulinDiab', 'ModSevereLungDis', 'PreCVAorTIAorCVD', 'RenFail', 'Angina', 'UnstableAngina', 'ClassNYHGroup',
# 'ArrhythAtrFibFlutter', 'ArrhythOther', 'DualAntiPlat', 'MedHeparin', 'AntiCoag', 'MedAntiplateltNoASA', 'NumDisV_ordinal', 'EF<=35%',
# 'CPBUse', 'RadArtUsed', 'IMAGraftUsed', 'DistVeinDone', 'TotalNumberOfGrafts', 'LeftMain', 'CompleteRevas', 'MajorComps', 'PLOS14',
# 'postCVAorTIA', 'IntraPostBloodTrans', 'ICUHrsTotal', 'BMI']
# # list2.to_csv("test for numeric draft model.csv")
# refactor_categorical_values_to_numeric_values(df_all,list2)
# mask_reop = df_all['Reoperation'] == 'Reoperation'
# df_reop = df_all[mask_reop]
# df_all = df_all.replace({'Reoperation':{'First Time':0, 'Reoperation':1}})
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
# hospid_2019 = pd.DataFrame()
# mask = df_all['HospID'] == 100427
# df1 = df_all[mask]
# df1.to_csv('100427.csv')
# df2 = df1.groupby(['HospID','surgyear'])['HospID'].count().reset_index(name='total')
# print (df2.head(6))
def create_2019_df(df):
df1 = df.groupby('HospID')['HospID'].count().reset_index(name='total')
df2 = df.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='Reop')
df3 = df.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='FirstOperation')
dfmort = df.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_all')
mask_reop = df['Reoperation'] == 'Reoperation'
df_reop = df[mask_reop]
df_op = df[~mask_reop]
dfmortf = df_op.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_first')
dfmortr = df_reop.groupby('HospID')['MtOpD'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_reop')
df_comp = df.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_all')
df_compr = df_reop.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_reop')
df_compf = df_op.groupby('HospID')['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, on='HospID', how='outer')
d2 = pd.merge(d1, df2, on='HospID', how='outer')
d3 = pd.merge(d2, dfmort, on='HospID', how='outer')
d4 = pd.merge(d3, dfmortf, on='HospID', how='outer')
d5 = pd.merge(d4, dfmortr, on='HospID', how='outer')
d6 = pd.merge(d5, df_comp, on='HospID', how='outer')
d7 = pd.merge(d6, df_compf, on='HospID', how='outer')
d8 = pd.merge(d7, df_compr, on='HospID', how='outer')
#df_sum_all_Years_total = pd.merge(d8, df_19, on='HospID', how='outer')
d8.fillna(0, inplace=True)
d8['mort_rate_All'] = (d8['Mortality_all'] / d8['total'])*100
d8['Mortality_First_rate'] =( d8['Mortality_first'] / d8['FirstOperation'])*100
d8['Mortality_Reop_rate'] = (d8['Mortality_reop'] / d8['Reop'])*100
d8['Complics_rate_All'] = (d8['Complics_all'] / d8['total']) * 100
d8['Complics_First_rate'] = (d8['Complics_FirstOperation'] / d8['FirstOperation']) * 100
d8['Complics_Reop_rate'] = (d8['Complics_reop'] / d8['Reop']) * 100
d8.to_csv("oneyear_hospid.csv")
df_PredMort_op = df_op.groupby('HospID')['PredMort'].mean().reset_index(name='PredMort_First_avg')
df_PredMort_reop= df_reop.groupby('HospID')['PredMort'].mean().reset_index(name='PredMort_Reoperation_avg')
df_PredComp_op = df_op.groupby('HospID')['PredMM'].mean().reset_index(name='PredComp_First_avg')
df_PredComp_reop= df_reop.groupby('HospID')['PredMM'].mean().reset_index(name='PredComp_Reoperation_avg')
d9 = pd.merge(d8, df_PredMort_op, on='HospID', how='outer')
d10 = pd.merge(d9, df_PredMort_reop, on='HospID', how='outer')
d11 = pd.merge(d10, df_PredComp_op, on='HospID', how='outer')
d12 = pd.merge(d11, df_PredComp_reop, on='HospID', how='outer')
d12.fillna(0, inplace=True)
d12['Mort_observe/expected_First'] = (d12['Mortality_First_rate'] / d12['PredMort_First_avg'])
d12['Mort_observe/expected_Reop'] = (d12['Mortality_Reop_rate'] / d12['PredMort_Reoperation_avg'])
d12[['log_First_Mort', 'log_Reoperation_Mort']] = np.log2(
d12[['Mort_observe/expected_First', 'Mort_observe/expected_Reop']].replace(0, np.nan))
d12.fillna(0, inplace=True)
d12['Comp_observe/expected_First'] = (d12['Complics_First_rate'] / d12['PredComp_First_avg'])
d12['Comp_observe/expected_Reop'] = (d12['Complics_Reop_rate'] / d12['PredComp_Reoperation_avg'])
d12[['log_First_Comp', 'log_Reoperation_Comp']] = np.log2(
d12[['Comp_observe/expected_First', 'Comp_observe/expected_Reop']].replace(0, np.nan))
d12.to_csv("oneyear_expec_hospid.csv")
def create_df():
df1 = df_all.groupby(['HospID','surgyear'])['HospID'].count().reset_index(name='total')
df2 = df_all.groupby(['HospID','surgyear'])['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='Reop')
df3 = df_all.groupby(['HospID','surgyear'])['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='FirstOperation')
df_aggr = pd.read_csv("aggregate_csv.csv")
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
dfmort = df_all.groupby(['HospID','surgyear'])['STSRCMM'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_all')
dfmortf = df_op.groupby(['HospID','surgyear'])['STSRCMM'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_first')
dfmortr = df_reop.groupby(['HospID','surgyear'])['STSRCMM'].apply(lambda x: (x == 1).sum()).reset_index(name='Mortality_reop')
df_comp = df_all.groupby(['HospID','surgyear'])['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_all')
df_compr = df_reop.groupby(['HospID','surgyear'])['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_reop')
df_compf = df_op.groupby(['HospID','surgyear'])['Complics'].apply(lambda x: (x == 1).sum()).reset_index(name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
d2 = pd.merge(d1, df2, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
df5 = pd.merge(df_aggr, d2, left_on=['HospID', 'surgyear'], right_on=['HospID', 'surgyear'],
how='inner') # how='left', on=['HospID','surgyear'])
del df5["Unnamed: 0"]
d3 = pd.merge(df5, dfmort, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
d4 = pd.merge(d3, dfmortf,left_on=['HospID','surgyear'], right_on=['HospID','surgyear'],how='outer')
d5 = pd.merge(d4, dfmortr,left_on=['HospID','surgyear'], right_on=['HospID','surgyear'],how='outer')
d6 = pd.merge(d5, df_comp,left_on=['HospID','surgyear'], right_on=['HospID','surgyear'],how='outer')
d7 = pd.merge(d6, df_compf, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
d8 = pd.merge(d7, df_compr, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
# df_sum_all_Years_total = pd.merge(d8, df_19, on='HospID', how='outer')
d8.fillna(0, inplace=True)
d8['mort_rate_All'] = (d8['Mortality_all'] / d8['total'])*100
d8['Mortality_First_rate'] =( d8['Mortality_first'] / d8['FirstOperation'])*100
d8['Mortality_Reop_rate'] = (d8['Mortality_reop'] / d8['Reop'])*100
d8['Complics_rate_All'] = (d8['Complics_all'] / d8['total']) * 100
d8['Complics_First_rate'] = (d8['Complics_FirstOperation'] / d8['FirstOperation']) * 100
d8['Complics_Reop_rate'] = (d8['Complics_reop'] / d8['Reop']) * 100
d8.to_csv('hospid_year_allyears.csv')
df_PredMort_all = df_all.groupby(['HospID','surgyear'])['PredMort'].mean().reset_index(name='PredMort_All_avg')
df_PredMort_op = df_op.groupby(['HospID','surgyear'])['PredMort'].mean().reset_index(name='PredMort_First_avg')
df_PredMort_reop = df_reop.groupby(['HospID','surgyear'])['PredMort'].mean().reset_index(name='PredMort_Reoperation_avg')
df_PredComp_all = df_all.groupby(['HospID','surgyear'])['PredMM'].mean().reset_index(name='PredComp_All_avg')
df_PredComp_op = df_op.groupby(['HospID','surgyear'])['PredMM'].mean().reset_index(name='PredComp_First_avg')
df_PredComp_reop = df_reop.groupby(['HospID','surgyear'])['PredMM'].mean().reset_index(name='PredComp_Reoperation_avg')
d19 = pd.merge(d8, df_PredMort_all, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
d9 = pd.merge(d19, df_PredMort_op, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
d10 = pd.merge(d9, df_PredMort_reop, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer')
d14 = | pd.merge(d10, df_PredComp_all, left_on=['HospID','surgyear'], right_on=['HospID','surgyear'], how='outer') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 09:11:58 2020
@author: ets
"""
import datetime as dt
import logging
import re
import warnings
from pathlib import Path
from typing import List, Tuple
# import climpred
import numpy as np
import pandas as pd
import xarray as xr
from climpred import HindcastEnsemble
from . import gis_import_error_message
try:
import rioxarray
from clisops.core import subset
except (ImportError, ModuleNotFoundError) as e:
msg = gis_import_error_message.format(Path(__file__).stem)
raise ImportError(msg) from e
from ravenpy.models import get_model
LOGGER = logging.getLogger("PYWPS")
# TODO: Complete docstrings
# This function gets model states after running the model (i.e. states at the end of the run).
def get_raven_states(model, workdir=None, **kwds):
"""Get the RAVEN states file (.rvc file) after a model run.
Parameters
----------
model : {'HMETS', 'GR4JCN', 'MOHYSE', 'HBVEC'}
Model name.
kwds : {}
Model configuration parameters, including the forcing files (ts).
Returns
-------
rvc : {}
Raven model forcing file
"""
# Run the model and get the rvc file for future hotstart.
m = get_model(model)(workdir=workdir)
m(overwrite=True, **kwds)
rvc = m.outputs["solution"]
return rvc
# Do the actual forecasting step
def perform_forecasting_step(rvc, model, workdir=None, **kwds):
"""
Function that might be useful eventually to do a forecast from a model setup.
"""
# kwds includes 'ts', the forecast timeseries data
# Setup the model
m = get_model(model)(workdir=workdir)
# Force the initial conditions
m.resume(rvc)
# Set the parameters, start dates, etc. required to run the model and run
m(overwrite=True, **kwds)
return m.q_sim
def perform_climatology_esp(
model_name, forecast_date, forecast_duration, workdir=None, **kwds
):
"""
This function takes the model setup and name as well as forecast data and duration and returns
an ESP forecast netcdf. The data comes from the climatology data and thus there is a mechanism
to get the correct data from the time series and exclude the current year.
Parameters
----------
model_name : {'HMETS', 'MOHYSE', 'GR4JCN', 'HBVEC'}
Model name to instantiate Raven model.
forecast_date : datetime.datetime
Date of the forecast issue.
forecast_duration : int
Number of days of forecast, forward looking.
kwds : dict
Raven model configuration parameters.
Returns
-------
qsims
Array of streamflow values from the ESP method along with list of member years
"""
# Get the timeseries
tsnc = xr.open_dataset(kwds["ts"])
# Prepare model instance
m = get_model(model_name)(workdir=workdir)
# Now find the periods of time for warm-up and forecast and add to the model keywords as the defaults are failing
# (nanoseconds datetimes do not like the year 0001...)
start_date = pd.to_datetime(tsnc["time"][0].values)
start_date = start_date.to_pydatetime()
kwds["start_date"] = start_date
# Forecasting from Feb 29th is not ideal, we will replace with Feb 28th.
# Should not change much in a climatological forecast.
if forecast_date.month == 2 and forecast_date.day == 29:
forecast_date.replace(day=28)
# Check to make sure forecast date is not in the first year as we need model warm-up.
# We cannot use timedelta because if the dataset happens to start on a leap
# year, then the timedelta=365 days will not be robust. (and we cannot use timedelta(years=1)...)
dateLimit = start_date.replace(year=start_date.year + 1)
if dateLimit > forecast_date:
msg = (
"Forecast date is within the warm-up period. Select another forecast date."
)
warnings.warn(msg)
# initialize the array of forecast variables
qsims = []
# list of unique years in the dataset:
avail_years = list(np.unique(tsnc["time.year"].data))
# Take a copy of the forecast initial date before overwriting in the forecast step.
forecast_date_main = forecast_date
# Remove the year that we are forecasting. Or else it's cheating!
avail_years.remove(forecast_date.year)
# Update the forecast end-date, which will be the day prior to the forecast date.
# So forecasts warm-up will be from day 1 in the dataset to the forecast date.
kwds["end_date"] = forecast_date - dt.timedelta(days=1)
# Get RVC file if it exists, else compute it.
if len(kwds['rvc']) > 0:
rvc=kwds.pop('rvc')
else:
# Run model to get rvc file after warm-up using base meteo
rvc = get_raven_states(model_name, workdir=workdir, **kwds)
# We need to check which years are long enough (ex: wrapping years, 365-day forecast starting in
# September 2015 will need data up to August 2016 at least)
for years in avail_years:
if forecast_date.replace(year=years) + dt.timedelta(
days=forecast_duration - 1
) > pd.to_datetime(tsnc["time"][-1].values):
avail_years.remove(years)
msg = (
f"Year {years} has been removed because it is the last year in the dataset and does not cover the "
f"forecast duration."
)
warnings.warn(msg)
# We will iterate this for all forecast years
for years in avail_years:
# Replace the forecast period start and end dates with the climatological ESP dates for the
# current member (year)
forecast_date = forecast_date.replace(year=years)
kwds["start_date"] = forecast_date
kwds["end_date"] = forecast_date + dt.timedelta(days=forecast_duration - 1)
# Setup the initial states from the warm-up and run the model.
# Note that info on start/end dates and timeseries are in the kwds.
m.resume(rvc)
m(run_name=f"run_{years}", **kwds)
# Add member to the ensemble and retag the dates to the real forecast dates
# (or else we will get dates from the climate dataset that cover all years)
new_member = m.q_sim.copy(deep=True)
new_member["time"] = pd.date_range(
forecast_date_main, periods=forecast_duration
)
qsims.append(new_member)
# Concatenate the members through a new dimension for the members and remove unused dims.
qsims = xr.concat(qsims, dim="member")
qsims = qsims.squeeze()
# Add the number of the forecast year as member ID
qsims["member"] = (["member"], avail_years)
return qsims
def get_hindcast_day(region_coll, date, climate_model="GEPS"):
"""
This function generates a forecast dataset that can be used to run raven.
Data comes from the CASPAR archive and must be aggregated such that each file
contains forecast data for a single day, but for all forecast timesteps and
all members.
The code takes the region shapefile, the forecast date required, and the
climate_model to use, here GEPS by default, but eventually could be GEPS, GDPS, REPS or RDPS.
"""
# Get the file locations and filenames as a function of the climate model and date
[ds, times] = get_CASPAR_dataset(climate_model, date)
return get_subsetted_forecast(region_coll, ds, times, True)
def get_CASPAR_dataset(climate_model, date):
"""Return Caspar Dataset."""
if climate_model == "GEPS":
d = dt.datetime.strftime(date, "%Y%m%d")
file_url = f"https://pavics.ouranos.ca/twitcher/ows/proxy/thredds/dodsC/birdhouse/caspar/daily/GEPS_{d}.nc"
ds = xr.open_dataset(file_url)
# Here we also extract the times at 6-hour intervals as Raven must have
# constant timesteps and GEPS goes to 6 hours
start = | pd.to_datetime(ds.time[0].values) | pandas.to_datetime |
# getFamaFrenchFactors.py
# Author: Vash
# Version 0.0.4
# Last updated: 18 May 2019
"""
This programme gets cleaned versions of factors including:
* Fama French 3 factor (MRP, SMB, HML)
* Momentum (MOM)
* Carhart 4 factors (MRP, SMB, HML, MOM)
* Fama French 5 factors (MRP, SMB, HML, RMW, CMA)
Updates in Version 0.0.4:
Replaces manual URL with scraped URL for initial futureproofing.
Updates in Version 0.0.3:
Adds support for annual data in addition to monthly data.
"""
import pandas as pd
from dateutil.relativedelta import relativedelta
import requests
from bs4 import BeautifulSoup
# Extract URLs to download
url = "http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
text_to_search = ['Fama/French 3 Factors', 'Momentum Factor (Mom)']
all_factors_text = soup.findAll('b', text=text_to_search)
home_url = "http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/"
all_factor_links = []
for text in all_factors_text:
links_for_factor = [] # Stores all links for a factor
for sib in text.next_siblings: # Find next element
# URLs are stored in bold tags, hence...
if sib.name == 'b':
bold_tags = sib
try:
link = bold_tags.find('a')['href']
links_for_factor.append(link)
except TypeError:
pass
csv_links = [home_url + link for link in links_for_factor if 'csv' in link.lower()]
txt_links = [home_url + link for link in links_for_factor if 'txt' in link.lower()]
factor_dict = {'factor' : text, 'csv_links' : csv_links, 'txt_links' : txt_links}
all_factor_links.append(factor_dict)
ff3factor_dict = dict(all_factor_links[0])
momAndOthers_dict = dict(all_factor_links[1])
def famaFrench3Factor(frequency='m'):
'''
Returns Fama French 3 factors (Market Risk Premium, SMB, HML)
Set frequency as:
'm' for monthly factors
'a' for annual factors
'''
rows_to_skip = 3
ff3_raw_data = ff3factor_dict['csv_links'][0]
ff3_factors = pd.read_csv(ff3_raw_data, skiprows=rows_to_skip)
ff3_factors.rename(columns = {ff3_factors.columns[0] : 'date_ff_factors'},
inplace=True)
# Get index of annual factor returns
annual_factor_index_loc = ff3_factors[
ff3_factors.values == ' Annual Factors: January-December '].index
# Clean annual and monthly versions
if frequency == 'm':
ff3_factors.drop(ff3_factors.index[annual_factor_index_loc[0]:], inplace=True)
# Convert dates to pd datetime objects
ff3_factors['date_ff_factors'] = pd.to_datetime(ff3_factors['date_ff_factors'],
format='%Y%m')
# Shift dates to end of month
ff3_factors['date_ff_factors'] = ff3_factors['date_ff_factors'].apply(
lambda date : date + relativedelta(day = 1, months = +1, days = -1))
elif frequency == 'a':
# Extract annual data only
ff3_factors.drop(ff3_factors.index[:annual_factor_index_loc[0]],
inplace=True)
# Ignore copyright footer & first 2 header rows
ff3_factors = ff3_factors.iloc[2:-1]
ff3_factors.reset_index(inplace=True)
ff3_factors.drop(columns=ff3_factors.columns[0], inplace=True)
# Deal with spacing issues (e.g. ' 1927' instead of '1927')
ff3_factors['date_ff_factors'] = ff3_factors['date_ff_factors'].apply(
lambda x : x.strip())
# Convert dates to datetime objects (note: values will be int64)
ff3_factors['date_ff_factors'] = pd.to_datetime(ff3_factors['date_ff_factors'],
format='%Y').dt.year.values
# Convert all factors to numeric and decimals (%)
for col in ff3_factors.columns[1:]:
ff3_factors[col] = | pd.to_numeric(ff3_factors[col]) | pandas.to_numeric |
import pandas as pd
import numpy as np
import scipy
import seaborn as sns
import matplotlib.pyplot as plt
import os
from functools import reduce
from statsmodels.tsa.stattools import coint
############### 一、pearson_corr begin
sns.set(style='white')
# Retrieve intraday price data and combine them into a DataFrame.
# 1. Load downloaded prices from folder into a list of dataframes.
#folder_path = 'STATICS/PRICE'
folder_path = '../STATICS/S&P500Top20'
curr_sector = 'InformationTechnology'
#curr_sector = 'HealthCare'
folder_path = '../STATICS/S&P500/' + curr_sector
file_names = os.listdir(folder_path)
tickers = [name.split('.')[0] for name in file_names]
#df_list = [pd.read_csv(os.path.join('STATICS/PRICE', name)) for name in file_names]
df_list = [pd.read_csv(os.path.join(folder_path, name)) for name in file_names]
#df_list = df_list[0:50]
# 2. Replace the closing price column name by the ticker.
for i in range(len(df_list)):
df_list[i].rename(columns={'close': tickers[i]}, inplace=True)
# 3. Merge all price dataframes. Extract roughly the first 70% data.
df = reduce(lambda x, y: | pd.merge(x, y, on='date') | pandas.merge |
import os
import pandas as pd
import datetime
import matplotlib.pyplot as plt
df = pd.read_csv(os.path.join('data', 'lake_mendota.csv'))
df['year'] = df['close_year']
df['month'] = df['close_month']
df['day'] = df['close_day']
df['close_date'] = pd.to_datetime(df[['year', 'month', 'day']])
df['year'] = df['open_year']
df['month'] = df['open_month']
df['day'] = df['open_day']
df['open_date'] = | pd.to_datetime(df[['year', 'month', 'day']]) | pandas.to_datetime |
"""This module provides access to the Vicon and biplane fluoroscopy filesystem-based database."""
from pathlib import Path
import itertools
import functools
import numpy as np
import pandas as pd
import quaternion
from lazy import lazy
from typing import Union, Callable, Type, Tuple
from biokinepy.cs import ht_r, change_cs, ht_inv
from ..kinematics.joint_cs import torso_cs_isb, torso_cs_v3d
from ..kinematics.segments import StaticTorsoSegment
from .db_common import TrialDescription, ViconEndpts, SubjectDescription, ViconCSTransform, trial_descriptor_df, MARKERS
from biokinepy.trajectory import PoseTrajectory
from ..misc.python_utils import NestedDescriptor
BIPLANE_FILE_HEADERS = {'frame': np.int32, 'pos_x': np.float64, 'pos_y': np.float64, 'pos_z': np.float64,
'quat_w': np.float64, 'quat_x': np.float64, 'quat_y': np.float64, 'quat_z': np.float64}
TORSO_FILE_HEADERS = {'pos_x': np.float64, 'pos_y': np.float64, 'pos_z': np.float64,
'quat_w': np.float64, 'quat_x': np.float64, 'quat_y': np.float64, 'quat_z': np.float64}
LANDMARKS_FILE_HEADERS = {'Landmark': 'string', 'X': np.float64, 'Y': np.float64, 'Z': np.float64}
TORSO_TRACKING_MARKERS = ['STRN', 'C7', 'T5', 'T10', 'CLAV']
def csv_get_item_method(csv_data: pd.DataFrame, marker_name: str) -> np.ndarray:
"""Return the marker data, (n, 3) numpy array view, associated with marker_name."""
return csv_data.loc[:, marker_name:(marker_name + '.2')].to_numpy()
def landmark_get_item_method(csv_data: pd.DataFrame, landmark_name: str) -> np.ndarray:
"""Return the landmark data, (3,) numpy array view, associated with landmark_name."""
return csv_data.loc[landmark_name, 'X':'Z'].to_numpy()
def csv_get_item_method_squeeze(csv_data: pd.DataFrame, marker_name: str) -> np.ndarray:
"""Return the marker data, (n, 3) numpy array view, associated with marker_name."""
return np.squeeze(csv_get_item_method(csv_data, marker_name))
def insert_nans(func: Callable) -> Callable:
"""Return a new dataframe derived from the original dataframe with appended columns filled with NaNs for missing
markers."""
@functools.wraps(func)
def wrapper(self) -> pd.DataFrame:
orig_data = func(self)
if not self.nan_missing_markers:
return orig_data
new_columns = [marker for marker in MARKERS if marker not in orig_data.columns]
new_columns1 = [col + '.1' for col in new_columns]
new_columns2 = [col + '.2' for col in new_columns]
raw_data = orig_data.to_numpy()
data_with_nan = np.concatenate((raw_data, np.full((orig_data.shape[0], len(new_columns) * 3), np.nan)), 1)
all_columns = itertools.chain(orig_data.columns,
itertools.chain.from_iterable(zip(new_columns, new_columns1, new_columns2)))
return pd.DataFrame(data=data_with_nan, columns=all_columns, dtype=np.float64)
return wrapper
class ViconCsvTrial(TrialDescription, ViconEndpts):
"""A Vicon trial that has been exported to CSV format.
Enables lazy (and cached) access to the labeled and filled Vicon Data.
Attributes
----------
trial_dir_path: pathlib.Path or str
Path to the directory where the Vicon CSV trial data resides.
vicon_csv_file_labeled: pathlib.Path
Path to the labeled marker data for the Vicon CSV trial.
vicon_csv_file_filled: pathlib.Path
Path to the filled marker data for the Vicon CSV trial.
nan_missing_markers: bool
Specifies whether to insert NaNs in the dataset for missing markers
"""
def __init__(self, trial_dir: Union[str, Path], nan_missing_markers: bool = False, **kwargs):
self.nan_missing_markers = nan_missing_markers
self.trial_dir_path = trial_dir if isinstance(trial_dir, Path) else Path(trial_dir)
super().__init__(trial_dir_path=self.trial_dir_path,
endpts_file=lambda: self.trial_dir_path / (self.trial_name + '_vicon_endpts.csv'), **kwargs)
# file paths
self.vicon_csv_file_labeled = self.trial_dir_path / (self.trial_name + '_vicon_labeled.csv')
self.vicon_csv_file_filled = self.trial_dir_path / (self.trial_name + '_vicon_filled.csv')
# make sure the files are actually there
assert (self.vicon_csv_file_labeled.is_file())
assert (self.vicon_csv_file_filled.is_file())
@lazy
@insert_nans
def vicon_csv_data_labeled(self) -> pd.DataFrame:
"""Pandas dataframe with the labeled Vicon CSV data."""
# TODO: this works fine for now and by using the accessor method below we get a view (rather than a copy) of the
# data, however it probably makes sense to using something like structured arrays or xarray. Note that
# multi-level column labels should not be used (i.e. header=[0, 1) because a copy of the data, not a view is
# returned
return pd.read_csv(self.vicon_csv_file_labeled, header=[0], skiprows=[1], dtype=np.float64)
@lazy
@insert_nans
def vicon_csv_data_filled(self) -> pd.DataFrame:
"""Pandas dataframe with the filled Vicon CSV data."""
return pd.read_csv(self.vicon_csv_file_filled, header=[0], skiprows=[1], dtype=np.float64)
@lazy
def labeled(self) -> NestedDescriptor:
"""Descriptor that allows marker indexed ([marker_name]) access to labeled CSV data. The indexed access returns
a (n, 3) numpy array view."""
return NestedDescriptor(self.vicon_csv_data_labeled, csv_get_item_method)
@lazy
def filled(self) -> NestedDescriptor:
"""Descriptor that allows marker indexed ([marker_name]) access to filled CSV data. The indexed access return
a (n, 3) numpy array view."""
return NestedDescriptor(self.vicon_csv_data_filled, csv_get_item_method)
class ViconCsvSubject(SubjectDescription):
"""A subject that contains multiple Vicon CSV trials.
Attributes
----------
subject_dir_path: pathlib.Path
Path to directory containing subject data.
trials: list of biplane_kine.database.biplane_vicon_db.ViconCsvTrial
List of trials for the subject.
"""
def __init__(self, subj_dir: Union[str, Path], **kwargs):
self.subject_dir_path = subj_dir if isinstance(subj_dir, Path) else Path(subj_dir)
super().__init__(subject_dir_path=self.subject_dir_path, **kwargs)
self.trials = [ViconCsvTrial(folder) for folder in self.subject_dir_path.iterdir() if (folder.is_dir() and
folder.stem != 'Static')]
@lazy
def subject_df(self) -> pd.DataFrame:
"""A Pandas dataframe summarizing the Vicon CSV trials belonging to the subject."""
df = trial_descriptor_df(self.subject_name, self.trials)
df['Trial'] = pd.Series(self.trials, dtype=object)
return df
class BiplaneViconTrial(ViconCsvTrial):
"""A trial that contains both biplane and Vicon data.
Attributes
----------
vicon_csv_file_smoothed: pathlib.Path
Path to the smoothed marker data for the Vicon CSV trial.
humerus_biplane_file: pathlib.Path
File path to the raw kinematic trajectory for the humerus as derived from biplane fluoroscopy
scapula_biplane_file: pathlib.Path
File path to the raw kinematic trajectory for the scapula as derived from biplane fluoroscopy
humerus_biplane_file_avg_smooth: pathlib.Path
File path to the smoothed kinematic trajectory for the humerus as derived from biplane fluoroscopy
scapula_biplane_file_avg_smooth: pathlib.Path
File path to the smoothed kinematic trajectory for the scapula as derived from biplane fluoroscopy
torso_vicon_file: pathlib.Path
File path to the kinematic trajectory for the torso (ISB definition) as derived from skin markers
torso_vicon_file_v3d: pathlib.Path
File path to the kinematic trajectory for the torso (V3D definition) as derived from skin markers
subject: biplane_kine.database.vicon_accuracy.BiplaneViconSubject
Pointer to the subject that contains this trial.
"""
def __init__(self, trial_dir: Union[str, Path], subject: 'BiplaneViconSubject', nan_missing_markers: bool = True,
**kwargs):
super().__init__(trial_dir, nan_missing_markers, **kwargs)
self.subject = subject
# file paths
self.vicon_csv_file_smoothed = self.trial_dir_path / (self.trial_name + '_vicon_smoothed.csv')
self.humerus_biplane_file = self.trial_dir_path / (self.trial_name + '_humerus_biplane.csv')
self.humerus_biplane_file_avg_smooth = self.trial_dir_path / (self.trial_name +
'_humerus_biplane_avgSmooth.csv')
self.scapula_biplane_file = self.trial_dir_path / (self.trial_name + '_scapula_biplane.csv')
self.scapula_biplane_file_avg_smooth = self.trial_dir_path / (self.trial_name +
'_scapula_biplane_avgSmooth.csv')
self.torso_vicon_file = self.trial_dir_path / (self.trial_name + '_torso.csv')
self.torso_vicon_file_v3d = self.trial_dir_path / (self.trial_name + '_torso_v3d.csv')
# make sure the files are actually there
assert (self.vicon_csv_file_smoothed.is_file())
assert (self.humerus_biplane_file.is_file())
assert (self.scapula_biplane_file.is_file())
assert (self.humerus_biplane_file_avg_smooth.is_file())
assert (self.scapula_biplane_file_avg_smooth.is_file())
assert (self.torso_vicon_file.is_file())
assert (self.torso_vicon_file_v3d.is_file())
@lazy
@insert_nans
def vicon_csv_data_smoothed(self) -> pd.DataFrame:
"""Pandas dataframe with the smoothed Vicon CSV data."""
return pd.read_csv(self.vicon_csv_file_smoothed, header=[0], skiprows=[1], dtype=np.float64)
@lazy
def smoothed(self) -> NestedDescriptor:
"""Descriptor that allows marker indexed ([marker_name]) access to smoothed CSV data. The indexed access returns
a (n, 3) numpy array view."""
return NestedDescriptor(self.vicon_csv_data_smoothed, csv_get_item_method)
@lazy
def humerus_biplane_data(self) -> pd.DataFrame:
"""Humerus raw biplane data."""
return pd.read_csv(self.humerus_biplane_file, header=0, dtype=BIPLANE_FILE_HEADERS, index_col='frame')
@lazy
def scapula_biplane_data(self) -> pd.DataFrame:
"""Scapula raw biplane data."""
return pd.read_csv(self.scapula_biplane_file, header=0, dtype=BIPLANE_FILE_HEADERS, index_col='frame')
@lazy
def humerus_biplane_data_avg_smooth(self) -> pd.DataFrame:
"""Humerus (average) smoothed biplane data."""
return pd.read_csv(self.humerus_biplane_file_avg_smooth, header=0,
dtype=BIPLANE_FILE_HEADERS, index_col='frame')
@lazy
def scapula_biplane_data_avg_smooth(self) -> pd.DataFrame:
"""Scapula (average) smothed biplane data."""
return pd.read_csv(self.scapula_biplane_file_avg_smooth,
header=0, dtype=BIPLANE_FILE_HEADERS, index_col='frame')
@lazy
def humerus_quat_fluoro(self) -> np.ndarray:
"""Humerus orientation (as a quaternion) expressed in fluoro reference frame."""
return self.humerus_biplane_data.iloc[:, 3:].to_numpy()
@lazy
def humerus_pos_fluoro(self) -> np.ndarray:
"""Humerus position expressed in fluoro reference frame."""
return self.humerus_biplane_data.iloc[:, :3].to_numpy()
@lazy
def humerus_quat_fluoro_avg_smooth(self) -> np.ndarray:
"""Smoothed humerus orientation (as a quaternion) expressed in fluoro reference frame."""
return self.humerus_biplane_data_avg_smooth.iloc[:, 3:].to_numpy()
@lazy
def humerus_pos_fluoro_avg_smooth(self) -> np.ndarray:
"""Smoothed humerus position expressed in fluoro reference frame."""
return self.humerus_biplane_data_avg_smooth.iloc[:, :3].to_numpy()
@lazy
def humerus_frame_nums(self) -> np.ndarray:
"""Frame numbers for which the humerus was tracked in biplane fluoroscopy."""
return self.humerus_biplane_data.index.to_numpy()
@lazy
def scapula_quat_fluoro(self) -> np.ndarray:
"""Scapula orientation (as a quaternion) expressed in fluoro reference frame."""
return self.scapula_biplane_data.iloc[:, 3:].to_numpy()
@lazy
def scapula_pos_fluoro(self) -> np.ndarray:
"""Scapula position expressed in fluoro reference frame."""
return self.scapula_biplane_data.iloc[:, :3].to_numpy()
@lazy
def scapula_quat_fluoro_avg_smooth(self) -> np.ndarray:
"""Smoothed scapula orientation (as a quaternion) expressed in fluoro reference frame."""
return self.scapula_biplane_data_avg_smooth.iloc[:, 3:].to_numpy()
@lazy
def scapula_pos_fluoro_avg_smooth(self) -> np.ndarray:
"""Smoothed scapula position expressed in fluoro reference frame."""
return self.scapula_biplane_data_avg_smooth.iloc[:, :3].to_numpy()
@lazy
def scapula_frame_nums(self) -> np.ndarray:
"""Frame numbers for which the scapula was tracked in biplane fluoroscopy."""
return self.scapula_biplane_data.index.to_numpy()
@lazy
def torso_vicon_data(self) -> pd.DataFrame:
"""Torso trajectory dataframe."""
return pd.read_csv(self.torso_vicon_file, header=0, dtype=TORSO_FILE_HEADERS)
@lazy
def torso_vicon_data_v3d(self) -> pd.DataFrame:
"""V3D torso trajectory dataframe."""
return | pd.read_csv(self.torso_vicon_file_v3d, header=0, dtype=TORSO_FILE_HEADERS) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
| tm.assert_series_equal(result['foo'], expected) | pandas.util.testing.assert_series_equal |
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Parse the results from a Workload Automation run and show it in a
"pretty" table
"""
import os
import collections, csv, re
import pandas as pd
from matplotlib import pyplot as plt
class Result(pd.DataFrame):
"""A DataFrame-like class for storing benchmark results"""
def __init__(self, *args, **kwargs):
super(Result, self).__init__(*args, **kwargs)
self.ax = None
def init_fig(self):
_, self.ax = plt.subplots()
def enlarge_axis(self, data):
"""Make sure that the axis don't clobber some of the data"""
(_, _, plot_y_min, plot_y_max) = plt.axis()
concat_data = pd.concat(data[s] for s in data)
data_min = min(concat_data)
data_max = max(concat_data)
# A good margin can be 10% of the data range
margin = (data_max - data_min) / 10
if margin < 1:
margin = 1
update_axis = False
if data_min <= plot_y_min:
plot_y_min = data_min - margin
update_axis = True
if data_max >= plot_y_max:
plot_y_max = data_max + margin
update_axis = True
if update_axis:
self.ax.set_ylim(plot_y_min, plot_y_max)
def plot_results_benchmark(self, benchmark, title=None):
"""Plot the results of the execution of a given benchmark
A title is added to the plot if title is not supplied
"""
if title is None:
title = benchmark.replace('_', ' ')
title = title.title()
self[benchmark].plot(ax=self.ax, kind="bar", title=title)
plt.legend(bbox_to_anchor=(1.05, .5), loc=6)
def plot_results(self):
for bench in self.columns.levels[0]:
self.plot_results_benchmark(bench)
def get_run_number(metric):
found = False
run_number = None
if re.match("Overall_Score|score|FPS", metric):
found = True
match = re.search(r"(.+)[ _](\d+)", metric)
if match:
run_number = int(match.group(2))
if match.group(1) == "Overall_Score":
run_number -= 1
else:
run_number = 0
return (found, run_number)
def get_results(path=".", name=None):
"""Return a pd.DataFrame with the results
The DataFrame's rows are the scores. The first column is the
benchmark name and the second the id within it. For benchmarks
that have a score result, that's what's used. For benchmarks with
FPS_* result, that's the score. E.g. glbenchmarks "score" is it's
fps.
An optional name argument can be passed. If supplied, it overrides
the name in the results file.
"""
bench_dict = collections.OrderedDict()
if os.path.isdir(path):
path = os.path.join(path, "results.csv")
with open(path) as fin:
results = csv.reader(fin)
for row in results:
(is_result, run_number) = get_run_number(row[3])
if is_result:
if name:
run_id = name
else:
run_id = re.sub(r"_\d+", r"", row[0])
bench = row[1]
try:
result = int(row[4])
except ValueError:
result = float(row[4])
if bench in bench_dict:
if run_id in bench_dict[bench]:
if run_number not in bench_dict[bench][run_id]:
bench_dict[bench][run_id][run_number] = result
else:
bench_dict[bench][run_id] = {run_number: result}
else:
bench_dict[bench] = {run_id: {run_number: result}}
bench_dfrs = {}
for bench, run_id_dict in bench_dict.iteritems():
bench_dfrs[bench] = | pd.DataFrame(run_id_dict) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
import pandas as pd
import os
import sys
from collections import Counter
import operator
from itertools import takewhile
import multiprocessing
from functools import partial
import argparse
try:
import matplotlib
except ImportError:
print("Install matplotlib")
else:
a = int(pd.__version__.split(".")[1])
if a >= 21:
print("Version satisfied")
else:
sys.exit('\nERROR: mtR_find requires Pandas version 0.21.0 or higher. Please update Pandas\n')
docstring= """
USAGE:
python mtR_find.py <species_name>
Arguments:
Valid species name
(1) --species_name dre #(for zebrafish)
(2) --species_name hsa #(for humans)
(3) --species_name mmu #(for mouse)
(4) --species_name dme #(for Drosophila)
Valid RNA type:
(1) sRNA
(2) lncRNA
Use --help for more info
DESCRIPTION
Creates a read count of mitochondrial derived sequences and also outputs the annotation information
"""
parser = argparse.ArgumentParser(usage = "\n" +"python %(prog)s species_name [--non_model NON_MODEL] [--FASTA FASTA] [-GTF GTF] [--graphical_output GRAPHICAL_OUTPUT][--output_path path/to/folder] [--input_path path/to/folder] [--files list of files] \n" +"\n" + "Usage examples:\nFor Zebrafish as species name and if the current directory has all FASTQ files\npython mtR_find.py dre\nFor Zebrafish as species name and to specify filenames explicitly\npython mtR_find.py dre --files filename1.fastq filename2.fastq\nFor Zebrafish as species name and to specify path to folder containing FASTQ files\npython mtR_find.py dre --path path/to/folder\nFor Zebrafish as species name and to specify no graphical output\npython mtR_find.py dre --graphical_output no\n" + "\n" + "Description:\nCreate read count file with sequences mapping to mitochondrial genome with their annotation information\n", add_help=False)
required = parser.add_argument_group('required arguments')
required.add_argument("species_name", help = """enter species name:Accepted species name arguments: (1) species name = dre #(for zebrafish) (2) species name = hsa #(for humans)(3) species name = mmu #(for mouse) (4) species name = dme #(for Drosophila) (5) species name = xen #(for xenophus) (6) species name = gal #(for chicken)""")
required.add_argument("RNA", help = """enter sRNA for small non-coding RNA and lncRNA fro long non-coding RNA""")
optional = parser.add_argument_group('optional arguments')
optional.add_argument("--non_model", help="""for non-model organisms or when the mitochondrial genome/annotation file is already downlaoded locally""")
optional.add_argument("--FASTA", help = "specify the absolute path to the mitochondrial genome FASTA file")
optional.add_argument("--GTF", help = "specify the absolute path to the mitochondrial genome GTF file")
optional.add_argument('--input_path', default = os.getcwd(), help= 'paste path to FASTQ files')
optional.add_argument('--output_path', default = os.getcwd(), help= 'paste path to store output files')
optional.add_argument('--files', nargs='*', help= 'enter FASTQ files seperated by space(enter the absolute path and not the relative path)')
optional.add_argument("--graphical_output", default = "no", help="""If you want to specify graphical output specify -grapical_output yes in the command line followed by species name""")
optional.add_argument("--metadata", help= 'enter PATH to metadata file')
optional.add_argument("--condition", help= 'enter the column namefrom the metadata file that consists of teh condition that needs to be compared. Example: Tissue, stage or disease condition')
optional.add_argument("--color", nargs='*', help= 'enter color names entered by space')
optional.add_argument("-h", "--help", action='help', help='print help message')
args = parser.parse_args()
if args.graphical_output == "yes":
try:
import matplotlib
except ImportError:
print("Install matplotlib")
else:
a = matplotlib.__version__
if float(a[:3]) >= 2:
print("Matplotlib version check OK......")
else:
sys.exit('\nERROR: mtR_find requires Matplotlib version 2.0.2 or higher\n')
if args.metadata == None:
sys.exit('\nERROR: Metadata option is mandatory if graphical_output == "yes"\n')
if args.condition == None:
sys.exit('\nERROR: Condition option is mandatory if graphical_output == "yes"\n')
mt_list=['tRNA-Phe', 'mtSSU rRNA', 'tRNA-Val', 'mtLSU rRNA', 'tRNA-Leu', 'ND1', 'tRNA-Ile', 'tRNA-Gln', 'tRNA-Met', 'ND2', 'tRNA-Trp', 'tRNA-Ala', 'tRNA-Asn', 'tRNA-Cys', 'tRNA-Tyr', 'COI', 'tRNA-Ser1', 'tRNA-Asp', 'COII', 'tRNA-Lys', 'ATP8', 'ATP6', 'COIII', 'tRNA-Gly', 'ND3', 'tRNA-Arg', 'ND4L', 'ND4', 'tRNA-His', 'tRNA-Ser2', 'tRNA-Leu2', 'ND5', 'ND6', 'tRNA-Glu', 'CytB', 'tRNA-Thr', 'tRNA-Pro']
#check if bowtie version 1.1.2 exist only of args.index is provided
os.system("bowtie --version > vers.txt")
infile= open("vers.txt", "r")
lines = infile.readlines()
if lines != []:
ver = lines[0].strip().split("bowtie version")[1].split(" ")[1]
else:
ver = "no"
if (ver == "no"):
sys.exit('\nERROR: Bowtie not found. Please add the PATH to bowtie $PATH\n%s')
else:
bowtie = "bowtie"
def worker(f):
infile= open(f)
filename = str(f.split(".")[0].split("_")[0])
fastq_lst = infile.readlines()[1::4]
d = len(fastq_lst)
print(filename + " " + str(d))
fastq_lst = [line.strip() for line in fastq_lst]
b = Counter(fastq_lst)
print("Completed file " + str(f))
return d,filename,b
#defining how to extract MT cordinates from gtf file
def extract_MT(infile):
lines = infile.readlines()[5:]
annotation=[]
i = 0
for line in lines:
line= line.strip().split("\t")
if (args.species_name != "xen"):
if (line[0] == 'MT' and line[2]== 'gene'):
feature=line[8].split(";")
#annot=feature[2].split(" ")[::-1][0].replace('"',"")
biotype=feature[4].split(" ")[::-1][0].replace('"',"")
annotation.append((line[3],line[4],line[6],mt_list[i],biotype))
i = i + 1
else:
if (line[0] == 'MT') and (line[2]== 'gene' or line[2]== 'tRNA' or line[2]== 'rRNA'):
feature=line[8].split(";")
if (line[2] == "gene"):
biotype=feature[4].split("=")[1]
annotation.append((line[3],line[4],line[6],feature[-1].split("=")[1],biotype))
else:
biotype=feature[-1].split("=")[1]
annotation.append((line[3],line[4],line[6],feature[-2].split("=")[1],biotype))
i = i + 1
print("Number of mitochondrial genes is " + str(len(annotation)))
return annotation
#define function to annotate fragments
def mt_annotator(line,kind):
element = []
line= line.strip().split("\t")
subs = line[12].split("MD:Z:")[1]
if (kind != "mt-lncRNA") and len(subs) > 2:
subs = subs
elif (kind == "mt-lncRNA") and (len(subs) > 3):
subs = subs
else:
subs = ""
endposition = (len(line[0])+int(line[3]))-1
i = 0
for x in annotation:
i=i+1
if(i<37):
if (int(x[0]) <= int(line[3]) < int(x[1])) and (len(line[0])+int(x[0])<=int(x[1])):
#seq= line[0][::-1]
#seq=seq.replace("T","%temp%").replace("A","T").replace("%temp%", "A")
#seq=seq.replace("G","%temp%").replace("C","G").replace("%temp%", "C")
if line[1] == "0" and x[2] == "+":
element.extend([line[0],kind,x[4],"H","sense",x[3].split("-")[-1],"within gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "0" and x[2] == "-":
element.extend([line[0],kind,x[4],"H","anti-sense",x[3].split("-")[-1],"within gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "16" and x[2] == "+":
element.extend([line[0],kind,x[4],"L","anti-sense",x[3].split("-")[-1],"within gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "16" and x[2] == "-":
element.extend([line[0],kind,x[4],"L","sense",x[3].split("-")[-1],"within gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if int(x[0]) <= (int(line[3])+len(line[0])-1) <= int(x[1]):
#seq= line[0][::-1]
#print seq
#seq=seq.replace("T","%temp%").replace("A","T").replace("%temp%", "A")
#seq=seq.replace("G","%temp%").replace("C","G").replace("%temp%", "C")
if line[1] == "0" and x[2] == "+":
element.extend([line[0],kind,x[4],"H","sense",x[3].split("-")[-1],"overlap gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "0" and x[2] == "-":
element.extend([line[0],kind,x[4],"H","anti-sense",x[3].split("-")[-1],"overlap gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "16" and x[2] == "+":
element.extend([line[0],kind,x[4],"L","anti-sense",x[3].split("-")[-1],"overlap gene boundary",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "16" and x[2] == "-":
element.extend([line[0],kind,x[4],"L","sense",x[3].split("-")[-1],"overlap gene boundary",line[3],endposition,x[0],x[1],subs])
return element
else:
if line[1] != "16":
element.extend([line[0],kind,"non-coding","H","sense","non-coding","falls in non-coding region",line[3],endposition,"na","na",subs])
return element
else:
#seq = line[9][::-1]
#print seq
#seq=seq.replace("T","%temp%").replace("A","T").replace("%temp%", "A")
#seq=seq.replace("G","%temp%").replace("C","G").replace("%temp%", "C")
element.extend([line[0],kind,"non-coding","L","anti-sense","non-coding","falls in non-coding region",line[3],endposition,"na","na",subs])
return element
# annotation for long non coding RNA
def mt_annotator2(line,kind):
element = []
line= line.strip().split("\t")
subs = line[12].split("MD:Z:")[1]
if (kind != "mt-lncRNA") and len(subs) > 2:
subs = "-" + subs
elif (kind == "mt-lncRNA") and (len(subs) > 3):
subs = "-" + subs
else:
subs = ""
endposition = (len(line[0])+int(line[3]))-1
i = 0
for x in annotation:
i=i+1
if(i<37):
if (int(x[0]) <= int(line[3]) < int(x[1])):
if line[1] == "0" and x[2] == "+":
element.extend([line[0],kind,x[4],"H","sense",x[3].split("-")[-1],"seq start site inside gene",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "0" and x[2] == "-":
element.extend([line[0],kind,x[4],"H","anti-sense",x[3].split("-")[-1],"seq start site inside gene",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "16" and x[2] == "+":
element.extend([line[0],kind,x[4],"L","anti-sense",x[3].split("-")[-1],"seq start site inside gene",line[3],endposition,x[0],x[1],subs])
return element
if line[1] == "16" and x[2] == "-":
element.extend([line[0],kind,x[4],"L","sense",x[3].split("-")[-1],"seq start site inside gene",line[3],endposition,x[0],x[1],subs])
return element
else:
if line[1] != "16":
element.extend([line[0],kind,"non-coding","H","sense","non-coding","seq start site outside gene",line[3],endposition,"na","na",subs])
return element
else:
element.extend([line[0],kind,"non-coding","L","anti-sense","non-coding","seq start site outside gene",line[3],endposition,"na","na",subs])
return element
#download mitochondrial genome of species and create bowtie index and aslo downlaod gtf file and extract MT coordinates
if (args.species_name == "dre") and (args.non_model==None):
if not os.path.exists("zebrafish"):
os.makedirs("zebrafish")
species = "zebrafish"
os.chdir("zebrafish")
if not os.path.exists("MT_genome"):
os.makedirs("MT_genome")
os.chdir("MT_genome")
command = "wget ftp://ftp.ensembl.org/pub/release-92/fasta/danio_rerio/dna/Danio_rerio.GRCz11.dna.chromosome.MT.fa.gz"
print("Downloading Zebrafish Mitochondrial genome....................")
print(command)
os.system(command)
os.system("gunzip -f *.gz")
os.chdir("../")
print("Creating bowtie index for Zebrafish mitochondrial genome")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
index = "bowtie-index/zebra_MT_index"
command = "bowtie-build MT_genome/Danio_rerio.GRCz11.dna.chromosome.MT.fa " + index
print(command)
os.system(command)
os.chdir("../")
command = "wget ftp://ftp.ensembl.org/pub/release-92/gtf/danio_rerio/Danio_rerio.GRCz11.92.chr.gtf.gz"
print(command)
os.system(command)
os.system("gunzip -f Danio_rerio.GRCz11.92.chr.gtf.gz")
infile= open("Danio_rerio.GRCz11.92.chr.gtf", "r")
annotation = extract_MT(infile)
#print "No of elements in annotation is " + str(len(annotation))
elif (args.species_name == "hsa") and (args.non_model==None):
if not os.path.exists("human"):
os.makedirs("human")
species = "human"
os.chdir("human")
if not os.path.exists("MT_genome"):
os.makedirs("MT_genome")
os.chdir("MT_genome")
command = "wget ftp://ftp.ensembl.org/pub/release-92/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.chromosome.MT.fa.gz"
print("Downloading Human Mitochondrial genome....................")
print(command)
os.system(command)
os.system("gunzip -f *.gz")
os.chdir("../")
print("Creating bowtie index for Human mitochondrial genome")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
index = "bowtie-index/human_MT_index"
command = "bowtie-build MT_genome/Homo_sapiens.GRCh38.dna.chromosome.MT.fa " + index
os.system(command)
os.chdir("../")
command = "wget ftp://ftp.ensembl.org/pub/release-92/gtf/homo_sapiens/Homo_sapiens.GRCh38.92.chr.gtf.gz"
os.system(command)
os.system("gunzip -f Homo_sapiens.GRCh38.92.chr.gtf.gz")
infile= open("Homo_sapiens.GRCh38.92.chr.gtf", "r")
annotation = extract_MT(infile)
#print annotation
#print "No of elements in annotation is " + str(len(annotation))
elif (args.species_name == "mmu") and (args.non_model==None):
if not os.path.exists("mouse"):
os.makedirs("mouse")
species = "mouse"
os.chdir("mouse")
if not os.path.exists("MT_genome"):
os.makedirs("MT_genome")
os.chdir("MT_genome")
command = "wget ftp://ftp.ensembl.org/pub/release-92/fasta/mus_musculus/dna/Mus_musculus.GRCm38.dna.chromosome.MT.fa.gz"
print("Downloading Mouse Mitochondrial genome....................")
print(command)
os.system(command)
os.system("gunzip -f *.gz")
os.chdir("../")
print("Creating bowtie index for Mouse mitochondrial genome")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
index = "bowtie-index/mouse_MT_index"
command = "bowtie-build MT_genome/Mus_musculus.GRCm38.dna.chromosome.MT.fa " + index
os.system(command)
os.chdir("../")
command = "wget ftp://ftp.ensembl.org/pub/release-92/gtf/mus_musculus/Mus_musculus.GRCm38.92.chr.gtf.gz"
os.system(command)
os.system("gunzip -f Mus_musculus.GRCm38.92.chr.gtf.gz")
infile= open("Mus_musculus.GRCm38.92.chr.gtf", "r")
annotation = extract_MT(infile)
#print "No of elements in annotation is " + str(len(annotation))
elif (args.species_name == "rno") and (args.non_model==None):
if not os.path.exists("Rat"):
os.makedirs("Rat")
species = "rat"
os.chdir("Rat")
if not os.path.exists("MT_genome"):
os.makedirs("MT_genome")
os.chdir("MT_genome")
command = "wget ftp://ftp.ensembl.org/pub/release-92/fasta/rattus_norvegicus/dna/Rattus_norvegicus.Rnor_6.0.dna.chromosome.MT.fa.gz"
print("Downloading Rat Mitochondrial genome....................")
print(command)
os.system(command)
os.system("gunzip -f *.gz")
os.chdir("../")
print("Creating bowtie index for Rat mitochondrial genome")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
index = "bowtie-index/Rat_MT_index"
command = "bowtie-build MT_genome/Rattus_norvegicus.Rnor_6.0.dna.chromosome.MT.fa " + index
os.system(command)
os.chdir("../")
command = "wget ftp://ftp.ensembl.org/pub/release-92/gtf/rattus_norvegicus/Rattus_norvegicus.Rnor_6.0.92.chr.gtf.gz"
os.system(command)
os.system("gunzip -f Rattus_norvegicus.Rnor_6.0.92.chr.gtf.gz")
infile= open("Rattus_norvegicus.Rnor_6.0.92.chr.gtf", "r")
annotation = extract_MT(infile)
#print "No of elements in annotation is " + str(len(annotation))
elif (args.species_name == "gga") and (args.non_model==None):
if not os.path.exists("chicken"):
os.makedirs("chicken")
species = "chicken"
os.chdir("chicken")
if not os.path.exists("MT_genome"):
os.makedirs("MT_genome")
os.chdir("MT_genome")
command = "wget ftp://ftp.ensembl.org/pub/release-92/fasta/gallus_gallus/dna/Gallus_gallus.Gallus_gallus-5.0.dna.chromosome.MT.fa.gz"
print("Downloading Rat Mitochondrial genome....................")
print(command)
os.system(command)
os.system("gunzip -f *.gz")
os.chdir("../")
print("Creating bowtie index for Rat mitochondrial genome")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
index = "bowtie-index/chicken_MT_index"
command = "bowtie-build MT_genome/Gallus_gallus.Gallus_gallus-5.0.dna.chromosome.MT.fa " + index
os.system(command)
os.chdir("../")
command = "wget ftp://ftp.ensembl.org/pub/release-92/gtf/gallus_gallus/Gallus_gallus.Gallus_gallus-5.0.92.chr.gtf.gz"
os.system(command)
os.system("gunzip -f Gallus_gallus.Gallus_gallus-5.0.92.chr.gtf.gz")
infile= open("Gallus_gallus.Gallus_gallus-5.0.92.chr.gtf", "r")
annotation = extract_MT(infile)
#print "No of elements in annotation is " + str(len(annotation))
elif (args.species_name == "xen") and (args.non_model==None):
if not os.path.exists("frog"):
os.makedirs("frog")
species = "frog"
os.chdir("frog")
if not os.path.exists("MT_genome"):
os.makedirs("MT_genome")
os.chdir("MT_genome")
command = "wget ftp://ftp.xenbase.org/pub/Genomics/Sequences/Mitochondrial/Xlaevis_mito_seq.fa"
print("Downloading Xenophus laevis Mitochondrial genome....................")
print(command)
os.system(command)
os.chdir("../")
print("Creating bowtie index for Xenophus laevis mitochondrial genome")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
index = "bowtie-index/frog_MT_index"
command = "bowtie-build MT_genome/Xlaevis_mito_seq.fa " + index
os.system(command)
os.chdir("../")
command = "wget http://ftp.xenbase.org/pub/Genomics/JGI/Xenla9.2/XENLA_9.2_GCF.gff3"
os.system(command)
#os.system("tar -xzf XENLA_9.2_Xenbase.gtf")
infile= open("XENLA_9.2_GCF.gff3", "r")
annotation = extract_MT(infile)
#print annotation
#print "No of elements in annotation is " + str(len(annotation))
elif (args.species_name == "non_model"):
species = "non_model"
if not os.path.exists("non_model"):
os.makedirs("non_model")
os.chdir("non_model")
if not os.path.exists("bowtie-index"):
os.makedirs("bowtie-index")
os.chdir("..")
index = "bowtie-index/non_model_index"
print("Creating bowtie index from the specified file")
command = "bowtie-build " + args.FASTA + " " + "non_model/" + index
os.system(command)
infile= open(args.GTF, "r")
annotation = extract_MT(infile)
#print "No of mitochondrial genes is " + str(len(annotation))
else:
sys.exit('\nERROR: Argument is not valid\n%s' %(docstring))
#collapse fastq files
# get the current working directory
if (args.files!=None) and (args.input_path!=None):
sys.exit('\nERROR: --files and --input_path cannot be specified together\n%s'%(docstring))
elif (args.files!=None):
files = arg.files
files = [f for f in files if f.split("/")[-1].endswith(".fastq") or f.split("/")[-1].endswith(".fastq.gz")]
else:
if (args.input_path==None):
cwd = os.getcwd()
else:
cwd= args.input_path
files = os.listdir(cwd)
if len(files) == 0:
sys.exit('\nERROR: No FASTQ files in home directory\nEnsure that mtR_find is executed from the same place as the FATSQ files are present or specify the path to FASTQ files\n%s'%(docstring))
if __name__ == "__main__":
if args.RNA == "sRNA":
pool = multiprocessing.Pool(processes = 30)
result_list = pool.map(worker, [f for f in files if f.endswith(".fastq")])
pool.close()
pool.join()
elif args.RNA == "lncRNA":
result_list =[]
file_list = [f for f in files if f.endswith(".fastq")]
for f in file_list:
infile= open(f)
filename = str(f.split(".")[0].split("_")[0])
fastq_lst = infile.readlines()[1::4]
d = len(fastq_lst)
print(filename + " " + str(d))
fastq_lst = [line.strip() for line in fastq_lst]
b = Counter(fastq_lst)
result_list.append((d, filename, b))
else:
sys.exit('\nERROR: Argument is not valid\n%s' %(docstring))
summed_counter = Counter()
#asan = pd.DataFrame([],columns=['filename','Read#'])
read_stat = []
for x,y,z in result_list:
#for y,z in x.iteritems():
print("Processing filename " + str(y))
read_stat.append((y,x))
summed_counter.update(z)
read_sta = pd.DataFrame(list(read_stat), columns = ["filename", "total-count"])
read_sta.to_csv("total_count_per_file.csv", sep = ",", index=False)
summed_counter = dict(takewhile(lambda x: x[1] >= 200, summed_counter.most_common()))
print("total number of unique sequences is " + str(len(summed_counter)))
#summed_counter = sorted(summed_counter.items(), key=operator.itemgetter(0))
#output = "../master_new_output007.fa"
#print output
#ofile = open(output, "w")
#for x,y in summed_counter:
#ofile.write(">" + str(x) +"\n" + str(y) + "\n")
#ofile.close()
summed_counter = pd.DataFrame.from_dict(summed_counter, orient='index').reset_index()
summed_counter = summed_counter.rename(columns={'index':'read', 'total_count':'count'})
summed_counter = pd.DataFrame(summed_counter)
summed_counter.columns=['read', 'total_count']
for x,y,z in result_list:
#for y,z in x.iteritems():
print("Generating read count for file " + str(y))
df = pd.DataFrame.from_dict(z, orient='index').reset_index()
df = df.rename(columns={'index':'read', 0:y})
summed_counter = pd.merge(summed_counter, df, how='left', on='read')
summed_counter = summed_counter.fillna(0)
summed_counter.sort_values('read', inplace=True)
summed_counter.to_csv("read_count.csv", sep = ",", index=False)
fasta_lst = summed_counter["read"].tolist()
col_list=list(summed_counter)
col_list[0]='sequence'
summed_counter.columns=col_list
ofile = open("master.fa", "w")
for x in fasta_lst:
ofile.write(">" + str(x) +"\n" + str(x) + "\n")
ofile.close()
#map the sequences to mitochondrial genome index using bowtie
command = "bowtie --best -v 1 -p 20 " + species + "/" + index + " -f master.fa -S filtered200_mt.sam"
print(command)
os.system(command)
#seperate mapped and unmapped reads
command = "samtools view -Sh -F 4 filtered200_mt.sam > filtered200_mt_mapped.sam"
#print command
os.system(command)
command = "samtools view -Sh -f 4 filtered200_mt.sam > filtered200_mt_unmapped.sam"
#print command
os.system(command)
#parsing SAM files
command = "egrep -v '@HD|@SQ|@PG' filtered200_mt_mapped.sam > filtered200_mt_mapped.tsv"
#print command
os.system(command)
command = "egrep -v '@HD|@SQ|@PG' filtered200_mt_unmapped.sam > filtered200_mt_unmapped.tsv"
#print command
os.system(command)
lines = open("filtered200_mt_mapped.tsv", "r").readlines()
#sm_RNA = [ line for line in lines if len(line.split("\t")[0]) < 50]
#ln_RNA = [ line for line in lines if len(line.split("\t")[0]) >= 50]
allmtseq = pd.DataFrame([], columns=["sequence","type","bio-type","strand","orientation","annotation","Sequence alignment","Sequence start position(bp)","Sequence end position(bp)","gene-boundary:start(bp)","gene-boundary:end(bp)", "substitutions"])
if (len(lines) != 0) and (args.RNA == "sRNA"):
pool = multiprocessing.Pool()
result_list = pool.map(partial(mt_annotator, kind = "normal_1_mismatch"), [line for line in lines])
pool.close()
pool.join()
if len(result_list)==0:
sys.exit('\nERROR: Problem with FASTQ or gtf file \n')
allmtseq = pd.DataFrame(result_list, columns=["sequence","type","bio-type","strand","orientation","annotation","Sequence alignment","Sequence start position(bp)","Sequence end position(bp)","gene-boundary:start(bp)","gene-boundary:end(bp)", "substitutions"])
print("Number of mtsRNAs mapping to mitochondrial genome with one-mismatch is " + str(len(allmtseq)))
#mask CCA from unmapped reads
lines = open("filtered200_mt_unmapped.tsv", "r").readlines()
lines = [line.strip().split("\t")[0] for line in lines]
selection=list(range(3,-4,-1))
fo = open("CCA_fasta.fa","w")
for k in lines:
if (k[-3:] == 'CCA'):
fo.write(">" + k[:-3] + "\n" + k[:-3] + "\n")
else:
continue
if len(lines) > 0:
#map the sequences to mitochondrial genome index using bowtie
command = "bowtie --best -v 0 -p 20 " + species + "/" + index + " -f CCA_fasta.fa -S filtered200_CCA_mt.sam"
print(command)
os.system(command)
#seperate mapped and unmapped reads
command = "samtools view -Sh -F 4 filtered200_CCA_mt.sam > filtered200_CCA_mt_mapped.sam"
#print command
os.system(command)
#parsing SAM files
command = "egrep -v '@HD|@SQ|@PG' filtered200_CCA_mt_mapped.sam > filtered200_CCA_mt_mapped.tsv"
#print command
os.system(command)
lines2 = open("filtered200_CCA_mt_mapped.tsv", "r").readlines()
#sm_RNA_CCA = [ line for line in lines if len(line.split("\t")[0]) < 47]
if len(lines2) != 0:
pool = multiprocessing.Pool()
result_lst = pool.map(partial(mt_annotator, kind = "CCA_0_mismatch"), [line for line in lines2])
pool.close()
pool.join()
selection=list(range(3,-4,-1))
result_list = []
for line in result_lst:
#line= line.split(",")
#annot=line[4].split(" ")[::-3]
if line[5] != "non-coding":
#print line
if ((int(line[9])-int(line[7])) in selection) or ((int(line[10])-int(line[8])) in selection):
line[0] = line[0] + "CCA"
#print line[0]
result_list.append(line)
else:
continue
CCA = pd.DataFrame(result_list,columns=["sequence","type","bio-type", "strand","orientation","annotation","Sequence alignment","Sequence start position(bp)","Sequence end position(bp)","gene-boundary:start(bp)","gene-boundary:end(bp)", "substitutions"])
if len(result_list) != 0:
print("total number of CCA mitochondrial tRfs is " + str(len(CCA)))
CCA["substitutions"] = "CCA"
CCA.insert(1, "subtype", "")
selection=list(range(3,-4,-1))
for i in range(0,len(CCA),1):
if len(CCA.at[i,'sequence']) - ((int(CCA.at[i,'gene-boundary:end(bp)'])-int(CCA.at[i,'gene-boundary:start(bp)']))/2) in selection:
if (int(CCA.at[i,'gene-boundary:start(bp)'])-int(CCA.at[i,'Sequence start position(bp)'])) in selection:
CCA.at[i,'subtype'] = 'tRNA-half-3'
elif (int(CCA.at[i,'gene-boundary:end(bp)'])-int(CCA.at[i,'Sequence end position(bp)'])) in selection:
CCA.at[i,'subtype']= 'tRNA-half-5'
else:
if (int(CCA.at[i,'gene-boundary:start(bp)'])-int(CCA.at[i,'Sequence start position(bp)'])) in selection:
CCA.at[i,'subtype']= 'tRF-3'
elif (int(CCA.at[i,'gene-boundary:end(bp)'])-int(CCA.at[i,'Sequence end position(bp)'])) in selection:
CCA.at[i,'subtype']= 'tRF-5'
else:
CCA = pd.DataFrame([],columns=["sequence","type","bio-type", "strand","orientation","annotation","Sequence alignment","Sequence start position(bp)","Sequence end position(bp)","ge\
ne-boundary:start(bp)","gene-boundary:end(bp)", "substitutions"])
else:
CCA = pd.DataFrame([],columns=["sequence","type","bio-type", "strand","orientation","annotation","Sequence alignment","Sequence start position(bp)","Sequence end position(bp)","ge\
ne-boundary:start(bp)","gene-boundary:end(bp)", "substitutions"])
if len(allmtseq) != 0:
allmtseq.insert(1, "subtype", "")
tRNA = allmtseq.loc[allmtseq["bio-type"]=="Mt_tRNA"]
tRNA.reset_index(drop=True,inplace=True)
#tRNA.insert(1, "subtype", "")
for i in range(0,len(tRNA),1):
if (tRNA.at[i,'strand'] == "H"):
if len(tRNA.at[i,'sequence']) - ((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) in range(0,2):
#print len(tRNA.at[i,'sequence']),int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']),tRNA.at[i,'Sequence start position(bp)'],tRNA.at[i,'Sequence end position(bp)'],tRNA.at[i,'gene-boundary:start(bp)'], tRNA.at[i,'gene-boundary:end(bp)']
if (int(tRNA.at[i,'gene-boundary:start(bp)'])-int(tRNA.at[i,'Sequence start position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRNA-half-5'
elif (int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'Sequence end position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRNA-half-3'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) > int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-5'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) <= int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-3'
elif (int(tRNA.at[i,'gene-boundary:start(bp)'])-int(tRNA.at[i,'Sequence start position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRF-5'
elif (int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'Sequence end position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRF-3'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) > int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-5'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) <= int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-3'
elif tRNA.at[i,'strand'] == "L":
if len(tRNA.at[i,'sequence']) - ((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) in range(0,2):
#print len(tRNA.at[i,'sequence']),int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']),tRNA.at[i,'Sequence start position(bp)'],tRNA.at[i,'Sequence end position(bp)'],tRNA.at[i,'gene-boundary:start(bp)'], tRNA.at[i,'gene-boundary:end(bp)']
if (int(tRNA.at[i,'gene-boundary:start(bp)'])-int(tRNA.at[i,'Sequence start position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRNA-half-3'
elif (int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'Sequence end position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRNA-half-5'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) > int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-5'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) <= int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-3'
elif (int(tRNA.at[i,'gene-boundary:start(bp)'])-int(tRNA.at[i,'Sequence start position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRF-3'
elif (int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'Sequence end position(bp)'])) in selection:
tRNA.at[i,'subtype']='tRF-5'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) > int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-3'
elif (((int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']))/2) + int(tRNA.at[i,'gene-boundary:start(bp)'])) <= int(tRNA.at[i,'Sequence start position(bp)']):
tRNA.at[i,'subtype']='i-tRF-5'
na_count = allmtseq.loc[allmtseq['annotation'] == "non-coding"].copy()
na_count["subtype"] = "non-coding"
nontRNA = allmtseq.loc[(allmtseq["bio-type"] == "protein_coding")|(allmtseq["bio-type"] == "Mt_rRNA")]
nontRNA.reset_index(drop=True,inplace=True)
#nontRNA.insert(1, "subtype", "")
for i in range(0,len(nontRNA),1):
if (nontRNA.at[i,'strand'] == "H"):
if len(nontRNA.at[i,'sequence']) - ((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) in range(0,2):
#print len(tRNA.at[i,'sequence']),int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']),tRNA.at[i,'Sequence start position(bp)'],tRNA.at[i,'Sequence end position(bp)'],tRNA.at[i,'gene-boundary:start(bp)'], tRNA.at[i,'gene-boundary:end(bp)']
if (int(nontRNA.at[i,'gene-boundary:start(bp)'])-int(nontRNA.at[i,'Sequence start position(bp)'])) in selection:
nontRNA.at[i,'subtype']="5'-half"
elif (int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'Sequence end position(bp)'])) in selection:
nontRNA.at[i,'subtype']="3'-half"
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) > int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-5prime'
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) <= int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-3prime'
elif (int(nontRNA.at[i,'gene-boundary:start(bp)'])-int(nontRNA.at[i,'Sequence start position(bp)'])) in selection:
nontRNA.at[i,'subtype']="5prime"
elif (int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'Sequence end position(bp)'])) in selection:
nontRNA.at[i,'subtype']="3prime"
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) > int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-5prime'
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) <= int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-3prime'
elif (nontRNA.at[i,'strand'] == "L"):
if len(nontRNA.at[i,'sequence']) - ((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) in range(0,2):
#print len(tRNA.at[i,'sequence']),int(tRNA.at[i,'gene-boundary:end(bp)'])-int(tRNA.at[i,'gene-boundary:start(bp)']),tRNA.at[i,'Sequence start position(bp)'],tRNA.at[i,'Sequence end position(bp)'],tRNA.at[i,'gene-boundary:start(bp)'], tRNA.at[i,'gene-boundary:end(bp)']
if (int(nontRNA.at[i,'gene-boundary:start(bp)'])-int(nontRNA.at[i,'Sequence start position(bp)'])) in selection:
nontRNA.at[i,'subtype']="3'-half"
elif (int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'Sequence end position(bp)'])) in selection:
nontRNA.at[i,'subtype']="5'-half"
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) > int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-5prime'
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) <= int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-3prime'
elif (int(nontRNA.at[i,'gene-boundary:start(bp)'])-int(nontRNA.at[i,'Sequence start position(bp)'])) in selection:
nontRNA.at[i,'subtype']="3prime"
elif (int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'Sequence end position(bp)'])) in selection:
nontRNA.at[i,'subtype']="5prime"
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) > int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-3prime'
elif (((int(nontRNA.at[i,'gene-boundary:end(bp)'])-int(nontRNA.at[i,'gene-boundary:start(bp)']))/2) + int(nontRNA.at[i,'gene-boundary:start(bp)'])) <= int(nontRNA.at[i,'Sequence start position(bp)']):
nontRNA.at[i,'subtype']='i-5prime'
#allmtseq=allmtseq.append(CCA)[CCA.columns.tolist()]
#print list(summed_counter)
#print list(allmtseq)
filter_lst = []
for i in range(0,len(annotation)):
if ((annotation[i][4] == "Mt_tRNA") and (annotation[i][2] == "+")) and (annotation[i][2] == annotation[i+1][2]):
filter_lst.append((annotation[i][3],annotation[i+1][3]))
for x,v in filter_lst:
for i in range(0,len(tRNA),1):
if (tRNA.at[i,'subtype'] == 'tRF-5') and (tRNA.at[i,'annotation'] == v) and (tRNA.at[i,'strand'] == "H"):
#z = "tRF-1-" + str(x) + "/" + str(tRNA.at[i,'subtype']) + '-' + str(v)
#z = "tRF-1-" +str(v)
z = "tRF-1"
tRNA.at[i,'subtype']=z
tRNA.at[i,'annotation'] = str(x)
if (tRNA.at[i,'subtype'] == "5'-half") and (tRNA.at[i,'annotation'] == v) and (tRNA.at[i,'strand'] == "H"):
#z = "tRF-1-" + str(x) + "/" + str(tRNA.at[i,'subtype']) + '-' + str(v)
#z = "tRF-1-" +str(v)
z = "tRF-1"
tRNA.at[i,'subtype']=z
tRNA.at[i,'annotation']=str(x)
for x,v in filter_lst:
for i in range(0,len(nontRNA),1):
if (nontRNA.at[i,'subtype'] == '5prime') and (nontRNA.at[i,'annotation'] == v) and (nontRNA.at[i,'strand'] == "H"):
#z = "tRF-1-" + str(x) + "/" + str(nontRNA.at[i,'subtype']) + '-' + str(v)
#z = "tRF-1-" +str(v)
z = "tRF-1"
nontRNA.at[i,'subtype']=z
nontRNA.at[i,'annotation']=str(x)
if len(CCA) != 0:
allmtseq = tRNA.append([CCA, nontRNA, na_count])[tRNA.columns.tolist()]
allmtseq=pd.merge(allmtseq, summed_counter, how='left', on='sequence')
allmtseq.insert(1, "Specific-ID", "")
allmtseq.insert(2, "General-ID", "")
#allmtseq.insert(3, "detailed_name", "")
allmtseq.insert(3, "temp_c", "")
allmtseq.insert(4, "temp_d", "")
allmtseq.insert(5, "temp_e", "")
allmtseq.insert(6, "temp_f", "")
fnames = {"anti-sense":"as","sense": ""}
gnames = {"normal_1_mismatch":"", "CCA_0_mismatch": "-CCA"}
nnames = {'tRNA-half-5': 'tRH-5', 'tRNA-half-3': 'tRH-3', 'i-tRF-5': 'i-tRF-5', 'i-tRF-3': 'i-tRF-3', 'tRF-3': 'tRF-3', 'tRF-5': 'tRF-5', 'tRF-1': 'tRF-1', 'i-3prime':'i-3p', 'i-5prime': 'i-5p', '5prime': '5p', '3prime': '3p', "5'-half": '5H', "3'-half": '3H', "non-coding": 'nc' }
allmtseq["temp_c"] = allmtseq["orientation"].map(fnames)
allmtseq["temp_d"] = allmtseq["type"].map(gnames)
allmtseq["temp_e"] = allmtseq.apply(lambda x: str(len(x["sequence"])), axis =1)
allmtseq["temp_f"] = allmtseq["subtype"].map(nnames)
allmtseq["Specific-ID"]= args.species_name + "-" + "mt-sRNA" + "-" + allmtseq["annotation"]+ "-" + allmtseq["strand"] + "-" + allmtseq["temp_c"] + allmtseq["Sequence start position(bp)"] + "-" + allmtseq["temp_f"] + "-" + allmtseq["temp_e"] + "-" +allmtseq["substitutions"]
allmtseq["General-ID"]= args.species_name + "-" + "mt-sRNA" + "-" + allmtseq["annotation"]+ "-" + allmtseq["strand"] + "-" + allmtseq["temp_c"] + allmtseq["Sequence start position(bp)"] + allmtseq["temp_d"]
#allmtseq["short-ref"] = allmtseq[["annotation","strand","orientation","subtype"]].apply(lambda x: "-".join(x), axis =1)
allmtseq.drop(["temp_c","temp_d","temp_e","temp_f"], inplace = True, axis =1)
allmtseq["Specific-ID"] = allmtseq["Specific-ID"].str.replace("--","-")
if len(allmtseq) > 0:
allmtseq.to_csv("masterreadcount.csv", sep = ",", index=False)
print("Total number of mtsRNAs is " + str(len(allmtseq)))
#long non coding RNA section
if (len(lines) != 0) and (args.RNA == "lncRNA"):
pool = multiprocessing.Pool()
result_list = pool.map(partial(mt_annotator2, kind = "mt-lncRNA"), [line for line in lines])
pool.close()
pool.join()
allmtseq2 = pd.DataFrame(result_list, columns=["sequence","type","bio-type","strand","orientation","annotation","Sequence alignment","Sequence start position(bp)","Sequence end position(bp)","gene-boundary:start(bp)","gene-boundary:end(bp)", "substitutions"])
allmtseq2= | pd.merge(allmtseq2, summed_counter, how='left', on='sequence') | pandas.merge |
import pandas as pd
import os
#
from .... import global_var
from . import transcode, paths
def load(map_code = None):
"""
Loads the load data provided by ENTSO-E.
:param map_code: The delivery zone
:type map_code: string
:return: The load data
:rtype: pd.DataFrame
"""
df_path = paths.fpath_tmp.format(map_code = map_code) + '.csv'
try:
print('Load load/entsoe - ', end = '')
df = pd.read_csv(df_path,
header = [0],
sep = ';',
)
df.loc[:,global_var.load_dt_UTC] = pd.to_datetime(df[global_var.load_dt_UTC])
print('Loaded')
except Exception as e:
print('fail')
print(e)
dikt_load = {}
try:
list_files = sorted(os.listdir(paths.folder_raw))
assert len(list_files) > 0
except Exception as e:
print('Files not found.\n'
'They can be downloaded with the SFTP share proposed by ENTSOE at \n'
'https://transparency.entsoe.eu/content/static_content/Static%20content/knowledge%20base/SFTP-Transparency_Docs.html\n'
'and stored in\n'
'{0}'.format(paths.folder_raw)
)
raise e
for ii, fname in enumerate(list_files):
if os.path.splitext(fname)[1] == '.csv':
print('\r{0:3}/{1:3} - {2:<28}'.format(ii+1,
len(list_files),
fname,
),
end = '',
)
df = pd.read_csv(os.path.join(paths.folder_raw,
fname,
),
encoding = 'UTF-8',
sep = '\t',
decimal = '.',
)
df = df.rename(transcode.columns,
axis = 1,
)
df[global_var.load_dt_UTC] = | pd.to_datetime(df[global_var.load_dt_UTC]) | pandas.to_datetime |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
MONTHS = ["january","february", "march", "april",\
"may","june","all"]
DAYS = ["monday","tuesday","wednesday","thursday",\
"friday","saturday","sunday","all"]
def get_user_input(input_variable, allowed_inputs):
"""
Asks user to specify an input_variable
among the list of allowed_inputs.
Returns:
(str) variable - value of the variable
"""
# Enter again flag
enter_again = False
keep_asking = True
while keep_asking or enter_again:
print("Enter variable - {}".format(input_variable))
print("Allowed values are - \n\t{}".format("\n\t".join(allowed_inputs)))
# Take user input
variable = input()
# Remove whitespace and change to lower case
variable = variable.strip().lower()
# Check if variable is in allowed_inputs
if variable in allowed_inputs:
enter_again = False
print("You selected: {} = {}".format(input_variable, variable))
choice_enter_again = input("Do you want to continue with this choice? Press y or Y to confirm. Press any other key to enter new value\n").strip().lower()
if choice_enter_again != "" and choice_enter_again[0]=='y':
keep_asking = False
else:
keep_asking = True
enter_again = True
else:
print("Invalid input. Try again.")
return variable
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = get_user_input("City", CITY_DATA.keys())
# TO DO: get user input for month (all, january, february, ... , june)
month = get_user_input("Month", MONTHS)
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = get_user_input("Weekday", DAYS)
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = | pd.read_csv(CITY_DATA[city]) | pandas.read_csv |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn import metrics
from utils import preprocessing
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
def train():
df = | pd.read_excel('datasets/raw_data.xlsx', engine='openpyxl') | pandas.read_excel |
#!/usr/bin/env python
import os
import pandas as pd
def get_supertwists(qmc_out):
""" read supercell twists from QMCPACK output
Args:
qmc_out (str): QMCPACK output, must contain "Super twist #"
Return:
np.array: an array of twist vectors (ntwist, ndim)
"""
from qharv.reel import ascii_out
mm = ascii_out.read(qmc_out)
idxl = ascii_out.all_lines_with_tag(mm, 'Super twist #')
lines = ascii_out.all_lines_at_idx(mm ,idxl)
data = []
for line in lines:
text = ascii_out.lr_mark(line, '[', ']')
vec = np.array(text.split(), dtype=float)
data.append(vec)
mat = np.array(data)
return mat
def epl_val_err(epl_out):
""" convert epl_out to a pandas DataFrame.
epl_out is expected to be an output of energy.pl from QMCPACK
It simply has to have the format {name:22c}={val:17.3f} +/- {err:26.4f}.
rows with forces will be recognized with 'force_prefix'
Args:
epl_out (str): energy.pl output filename
Returns:
pd.DataFrame: df contains columns ['name','val','err']
"""
tab = pd.read_csv(epl_out, delimiter='=', names=['name', 'text'])
tab = tab.dropna()
def text2val(text):
tokens = text.split('+/-')
if len(tokens) != 2:
raise NotImplementedError('unrecognized value '+text)
val,err = map(float, tokens)
return pd.Series({'val':val, 'err':err})
df = pd.concat([
tab.drop('text', axis=1),
tab['text'].apply(text2val)],
axis=1)
return df
def epldf_to_entry(df):
names = [name.strip() for name in df.name.values]
ymean = ['%s_mean' % name for name in names]
yerror = ['%s_error' % name for name in names]
names1 = np.concatenate([ymean, yerror])
means = df.val.values
errs = df.err.values
entry = pd.Series(np.concatenate([means, errs]), names1)
return entry
def get_forces(df, natom, prefix='force', ndim=3):
yml = []
yel = []
for iatom in range(natom):
for idim in range(ndim):
col = '%s_%d_%d' % (prefix, iatom, idim)
sel = df.name.apply(lambda x: col in x)
y1m = df.loc[sel].val.squeeze()
y1e = df.loc[sel].err.squeeze()
yml.append(y1m)
yel.append(y1e)
return np.array(yml), np.array(yel)
def sk_from_fs_out(fs_out):
""" extract fluctuating S(k) from qmcfinitesize output
returns: kmag,sk,vk,spk,spsk
kmag: magnitude of kvectors, sk: raw fluc. S(k), vk: long-range potential after break-up
spk: kmags for splined S(k), spsk: splined S(k) """
import reader
bint = reader.BlockInterpreter()
sfile = reader.SearchableFile(fs_out)
# read raw data
block_text = sfile.block_text('#SK_RAW_START#','#SK_RAW_STOP#')
kmag,sk,vk = bint.matrix(block_text[block_text.find('\n')+1:]).T
# read splined S(k)
block_text = sfile.block_text('#SK_SPLINE_START#','#SK_SPLINE_STOP#')
spk,spsk = bint.matrix(block_text[block_text.find('\n')+1:]).T
return kmag,sk,vk,spk,spsk
# end def
# =============== complicated functions ===============
import numpy as np
from copy import deepcopy
def read_jastrows(jas_node):
""" 'jas_node' should be an xml node containing bspline jastrows
put coefficients and attributes into a list of dictionaries """
if (jas_node.attrib["type"] != "Two-Body"): # works for one-body! miracle!
pass#raise TypeError("input is not a two-body Jastrow xml node")
elif (jas_node.attrib["function"].lower() != "bspline"):
raise NotImplementedError("can only handle bspline Jastrows for now")
# end if
data = []
for corr in jas_node.xpath('./correlation'):
coeff = corr.xpath('./coefficients')[0]
entry = deepcopy( corr.attrib )
entry.update(coeff.attrib)
entry['coeff'] = np.array(coeff.text.split(),dtype=float)
entry['type'] = jas_node.attrib['type']
data.append(entry)
# end for corr
return data
# end def read_jastrows
from lxml import etree
def extract_jastrows(qmcpack_input,json_name='jas.json',warn=True,force_refresh=False):
""" given a QMCPACK input that contains linear optimization, extract all printed Jastrows and store in a local database
1. parse 'qmcpack_input' for the qmc[@metho="linear"] section
2. for each *.opt.xml, parse if it exists
3. parse each opt.xml and make local database """
failed = False
subdir = os.path.dirname(qmcpack_input)
target_json = os.path.join(subdir,json_name)
if os.path.isfile(target_json) and (not force_refresh):
if warn:
print("skipping %s" % subdir)
# end if
return 0 # skip ths file
# end if
parser = etree.XMLParser(remove_blank_text=True)
# get prefix
xml = etree.parse(qmcpack_input,parser)
proj = xml.xpath("//project")[0]
prefix = proj.attrib['id']
# determine number of optimization loops
all_qmc_sections = xml.xpath('.//qmc[@method="linear"]')
all_iopt = 0 # track multiple 'linear' sections
data = []
for qmc_section in all_qmc_sections:
# for each linear optimization:
# find the number of loops
nopt = 1
loop = qmc_section.getparent()
if loop.tag == 'loop':
nopt = int(loop.attrib['max'])
# end if
# collect all jastrow coefficients
for iopt in range(nopt):
# get optimization file
opt_file = prefix + ".s%s.opt.xml" % str(all_iopt).zfill(3)
opt_xml = os.path.join(subdir,opt_file)
if not os.path.isfile(opt_xml):
if warn:
print("skipping %d in %s" % (all_iopt,subdir))
# end if
continue
# end if
# parse optimization file
opt = etree.parse(opt_xml,parser)
jnodes = opt.xpath('//jastrow')
for jas_node in jnodes:
entries = read_jastrows(jas_node)
for entry in entries:
entry['iopt'] = all_iopt
# end for entry
data.append(entry)
# end for
all_iopt += 1
# end for iopt
# end for qmc_section
if len(data) == 0:
failed = True
else:
df = pd.DataFrame( data )
df.to_json(target_json)
# end if
return failed
# end def extract_jastrows
def extract_best_jastrow_set(opt_input,opt_json='opt_scalar.json',nequil='auto',force_refresh=False):
import nexus_addon as na
subdir = os.path.dirname(opt_input)
# locally create jas.json
extract_jastrows(opt_input,force_refresh=force_refresh)
# locally create opt_scalar.json
scalar_json = os.path.join(subdir,opt_json)
if (not os.path.isfile(scalar_json)) or force_refresh:
# initialize analyzer
from qmca import QBase
options = {"equilibration":nequil}
QBase.options.transfer_from(options)
entry = na.scalars_from_input(opt_input)
pd.DataFrame(entry).to_json(scalar_json)
# end if
# get best jastrow set
best_jas = collect_best_jastrow_set(subdir)
return best_jas
# end def extract_best_jastrow_set
def collect_best_jastrow_set(subdir,jas_json='jas.json',opt_json='opt_scalar.json'
,rval_weight=0.75,rerr_weight=0.25):
""" find best set of jastrows in 'subdir', assume files:
1. jas.json: a database of QMCPACK bspline jastrows with 'iopt' column
2. opt_scalar.json: a database of QMCPACK scalars including 'LocalEnergy_mean', 'LocalEnergy_error', 'Variance_mean', and 'Variance_error' """
from dmc_database_analyzer import div_columns
jfile = os.path.join(subdir,jas_json)
if not os.path.isfile(jfile):
raise RuntimeError('%s not found in %s' % (jfile,subdir))
# end if
ofile = os.path.join(subdir,opt_json)
if not os.path.isfile(ofile):
raise RuntimeError('%s not found in %s' % (ofile,subdir))
# end if
jdf = pd.read_json(jfile) # jastrows
sdf = | pd.read_json(ofile) | pandas.read_json |
import numpy as np
import csv
import sys
import os
import h5py
import pandas as pd
import simplejson as json
import sqlite3
import copy
# structure followed in this file is based on : https://github.com/nhammerla/deepHAR/tree/master/data
# and https://github.com/IRC-SPHERE/sphere-challenge
class data_reader:
def __init__(self, dataset):
if dataset == 'dap':
self.data, self.idToLabel = self.readDaphnet()
self.save_data(dataset)
elif dataset =='opp':
self.data, self.idToLabel = self.readOpportunity()
self.save_data(dataset)
elif dataset == 'pa2':
self.data, self.idToLabel = self.readPamap2()
self.save_data(dataset)
elif dataset == 'sph':
self.data, self.idToLabel = self.readSphere()
self.save_data(dataset)
else:
print('Not supported yet')
sys.exit(0)
def save_data(self,dataset):
if dataset == 'dap':
f = h5py.File('daphnet.h5')
for key in self.data:
f.create_group(key)
for field in self.data[key]:
f[key].create_dataset(field, data=self.data[key][field])
f.close()
print('Done.')
elif dataset == 'opp':
f = h5py.File('opportunity.h5')
for key in self.data:
f.create_group(key)
for field in self.data[key]:
f[key].create_dataset(field, data=self.data[key][field])
f.close()
print('Done.')
elif dataset == 'pa2':
f = h5py.File('pamap2.h5')
for key in self.data:
f.create_group(key)
for field in self.data[key]:
f[key].create_dataset(field, data=self.data[key][field])
f.close()
print('Done.')
elif dataset == "sph":
f = h5py.File('sphere.h5')
for key in self.data:
f.create_group(key)
for field in self.data[key]:
f[key].create_dataset(field, data=self.data[key][field])
f.close()
print('Done.')
else:
print('Not supported yet')
sys.exit(0)
@property
def train(self):
return self.data['train']
@property
def test(self):
return self.data['test']
def readPamap2(self):
files = {
'train': ['subject101.dat', 'subject102.dat','subject103.dat','subject104.dat', 'subject107.dat', 'subject108.dat', 'subject109.dat'],
'test': ['subject106.dat']
}
label_map = [
# (0, 'other'),
(1, 'lying'),
(2, 'sitting'),
(3, 'standing'),
(4, 'walking'),
(5, 'running'),
(6, 'cycling'),
(7, 'Nordic walking'),
(9, 'watching TV'),
(10, 'computer work'),
(11, 'car driving'),
(12, 'ascending stairs'),
(13, 'descending stairs'),
(16, 'vacuum cleaning'),
(17, 'ironing'),
(18, 'folding laundry'),
(19, 'house cleaning'),
(20, 'playing soccer'),
(24, 'rope jumping')
]
labelToId = {str(x[0]): i for i, x in enumerate(label_map)}
# print "label2id=",labelToId
idToLabel = [x[1] for x in label_map]
# print "id2label=",idToLabel
cols = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
]
# print "cols",cols
data = {dataset: self.readPamap2Files(files[dataset], cols, labelToId)
for dataset in ('train', 'test')}
return data, idToLabel
def readPamap2Files(self, filelist, cols, labelToId):
data = []
labels = []
for i, filename in enumerate(filelist):
print('Reading file %d of %d' % (i+1, len(filelist)))
with open('./Protocol/%s' % filename, 'r') as f:
#print "f",f
reader = csv.reader(f, delimiter=' ')
for line in reader:
#print "line=",line
elem = []
#not including the non related activity
if line[1] == "0":
continue
# if line[10] == "0":
# continue
for ind in cols:
#print "ind=",ind
# if ind == 10:
# # print "line[ind]",line[ind]
# if line[ind] == "0":
# continue
elem.append(line[ind])
# print "elem =",elem
# print "elem[:-1] =",elem[:-1]
# print "elem[0] =",elem[0]
if sum([x == 'NaN' for x in elem]) == 0:
data.append([float(x) / 1000 for x in elem[:-1]])
labels.append(labelToId[elem[0]])
# print "[x for x in elem[:-1]]=",[x for x in elem[:-1]]
# print "[float(x) / 1000 for x in elem[:-1]]=",[float(x) / 1000 for x in elem[:-1]]
# print "labelToId[elem[0]]=",labelToId[elem[0]]
# print "labelToId[elem[-1]]",labelToId[elem[-1]]
# sys.exit(0)
return {'inputs': np.asarray(data), 'targets': np.asarray(labels, dtype=int)+1}
def readDaphnet(self):
files = {
'train': ['S01R01.txt', 'S01R02.txt','S03R01.txt','S03R02.txt', 'S03R03.txt', 'S04R01.txt', 'S05R01.txt', 'S05R02.txt','S06R01.txt', 'S06R02.txt', 'S07R01.txt', 'S07R02.txt', 'S08R01.txt','S10R01.txt'],
'test': ['S02R01.txt', 'S02R02.txt']
}
label_map = [
(1, 'No freeze'),
(2, 'freeze')
]
labelToId = {str(x[0]): i for i, x in enumerate(label_map)}
# print "label2id=",labelToId
idToLabel = [x[1] for x in label_map]
# print "id2label=",idToLabel
cols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# print "cols",cols
data = {dataset: self.readDaphFiles(files[dataset], cols, labelToId)
for dataset in ('train', 'test')}
return data, idToLabel
def readDaphFiles(self, filelist, cols, labelToId):
data = []
labels = []
for i, filename in enumerate(filelist):
print('Reading file %d of %d' % (i+1, len(filelist)))
with open('./dataset/%s' % filename, 'r') as f:
#print "f",f
reader = csv.reader(f, delimiter=' ')
for line in reader:
#print "line=",line
elem = []
#not including the non related activity
if line[10] == "0":
continue
for ind in cols:
#print "ind=",ind
if ind == 10:
# print "line[ind]",line[ind]
if line[ind] == "0":
continue
elem.append(line[ind])
if sum([x == 'NaN' for x in elem]) == 0:
data.append([float(x) / 1000 for x in elem[:-1]])
labels.append(labelToId[elem[-1]])
return {'inputs': np.asarray(data), 'targets': np.asarray(labels, dtype=int)+1}
def readOpportunity(self):
files = {
'train': ['S1-ADL1.dat','S1-ADL3.dat', 'S1-ADL4.dat', 'S1-ADL5.dat', 'S1-Drill.dat', 'S2-ADL1.dat', 'S2-ADL2.dat', 'S2-ADL5.dat', 'S2-Drill.dat', 'S3-ADL1.dat', 'S3-ADL2.dat', 'S3-ADL5.dat', 'S3-Drill.dat', 'S4-ADL1.dat', 'S4-ADL2.dat', 'S4-ADL3.dat', 'S4-ADL4.dat', 'S4-ADL5.dat', 'S4-Drill.dat'],
'test': ['S2-ADL3.dat', 'S2-ADL4.dat','S3-ADL3.dat', 'S3-ADL4.dat']
}
#names are from label_legend.txt of Opportunity dataset
#except 0-ie Other, which is an additional label
label_map = [
(0, 'Other'),
(406516, 'Open Door 1'),
(406517, 'Open Door 2'),
(404516, 'Close Door 1'),
(404517, 'Close Door 2'),
(406520, 'Open Fridge'),
(404520, 'Close Fridge'),
(406505, 'Open Dishwasher'),
(404505, 'Close Dishwasher'),
(406519, 'Open Drawer 1'),
(404519, 'Close Drawer 1'),
(406511, 'Open Drawer 2'),
(404511, 'Close Drawer 2'),
(406508, 'Open Drawer 3'),
(404508, 'Close Drawer 3'),
(408512, 'Clean Table'),
(407521, 'Drink from Cup'),
(405506, 'Toggle Switch')
]
labelToId = {str(x[0]): i for i, x in enumerate(label_map)}
idToLabel = [x[1] for x in label_map]
cols = [
37, 38, 39, 40, 41, 42, 43, 44, 45, 50, 51, 52, 53, 54, 55, 56, 57, 58,63, 64, 65, 66, 67, 68, 69, 70, 71, 76, 77, 78, 79, 80, 81, 82, 83, 84,
89, 90, 91, 92, 93, 94, 95, 96, 97, 102, 103, 104, 105, 106, 107, 108,109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 249
]
data = {dataset: self.readOpportunityFiles(files[dataset], cols, labelToId)
for dataset in ('train', 'test')}
return data, idToLabel
#this is from https://github.com/nhammerla/deepHAR/tree/master/data and it is an opportunity Challenge reader. It is a python translation one
#for the official one provided by the dataset publishers in Matlab.
def readOpportunityFiles(self, filelist, cols, labelToId):
data = []
labels = []
for i, filename in enumerate(filelist):
print('Reading file %d of %d' % (i+1, len(filelist)))
with open('./dataset/%s' % filename, 'r') as f:
reader = csv.reader(f, delimiter=' ')
for line in reader:
elem = []
for ind in cols:
elem.append(line[ind])
if sum([x == 'NaN' for x in elem]) == 0:
data.append([float(x) / 1000 for x in elem[:-1]])
labels.append(labelToId[elem[-1]])
return {'inputs': np.asarray(data), 'targets': np.asarray(labels, dtype=int)+1}
def readSphere(self):
files = {
'train': ['00001','00002', '00003', '00004', '00005', '00006', '00007', '00008'],
'test' : [ '00009', '00010']
# 'test': ['00011','00012','00013','00014','00015','00016','00017','00018','00019','00020','00021','00022','00023','00024','00025','00026','00027',
# '00028','00029','00030','00031','00032','00033','00034','00035','00036','00037','00038','00039','00040','00041','00042','00043','00044','00045',
# '00046','00047','00048','00049','00050','00051','00052','00053','00054','00055','00056','00057','00058','00059','00060','00061','00062','00063',
# '00064','00065','00066','00067','00068','00069','00070','00071','00072','00073','00074','00075','00076','00077','00078','00079','00080','00081',
# '00082','00083','00084','00085','00086','00087','00088','00089','00090','00091','00092','00093','00094','00095','00096','00097','00098','00099',
# '00100','00101','00102','00103','00104','00105','00106','00107','00108','00109','00110','00111','00112','00113','00114','00115','00116','00117',
# '00118','00119','00120','00121','00122','00123','00124','00125','00126','00127','00128','00129','00130','00131','00132','00133','00134','00135',
# '00136','00137','00138','00139','00140','00141','00142','00143','00144','00145','00146','00147','00148','00149','00150','00151','00152','00153',
# '00154','00155','00156','00157','00158','00159','00160','00161','00162','00163','00164','00165','00166','00167','00168','00169','00170','00171',
# '00172','00173','00174','00175','00176','00177','00178','00179','00180','00181','00182','00183','00184','00185','00186','00187','00188','00189',
# '00190','00191','00192','00193','00194','00195','00196','00197','00198','00199','00200','00201','00202','00203','00204','00205','00206','00207',
# '00208','00209','00210','00211','00212','00213','00214','00215','00216','00217','00218','00219','00220','00221','00222','00223','00224','00225',
# '00226','00227','00228','00229','00230','00231','00232','00233','00234','00235','00236','00237','00238','00239','00240','00241','00242','00243',
# '00244','00245','00246','00247','00248','00249','00250','00251','00252','00253','00254','00255','00256','00257','00258','00259','00260','00261',
# '00262','00263','00264','00265','00266','00267','00268','00269','00270','00271','00272','00273','00274','00275','00276','00277','00278','00279',
# '00280','00281','00282','00283','00284','00285','00286','00287','00288','00289','00290','00291','00292','00293','00294','00295','00296','00297',
# '00298','00299','00300','00301','00302','00303','00304','00305','00306','00307','00308','00309','00310','00311','00312','00313','00314','00315',
# '00316','00317','00318','00319','00320','00321','00322','00323','00324','00325','00326','00327','00328','00329','00330','00331','00332','00333',
# '00334','00335','00336','00337','00338','00339','00340','00341','00342','00343','00344','00345','00346','00347','00348','00349','00350','00351',
# '00352','00353','00354','00355','00356','00357','00358','00359','00360','00361','00362','00363','00364','00365','00366','00367','00368','00369',
# '00370','00371','00372','00373','00374','00375','00376','00377','00378','00379','00380','00381','00382','00383','00384','00385','00386','00387',
# '00388','00389','00390','00391','00392','00393','00394','00395','00396','00397','00398','00399','00400','00401','00402','00403','00404','00405',
# '00406','00407','00408','00409','00410','00411','00412','00413','00414','00415','00416','00417','00418','00419','00420','00421','00422','00423',
# '00424','00425','00426','00427','00428','00429','00430','00431','00432','00433','00434','00435','00436','00437','00438','00439','00440','00441',
# '00442','00443','00444','00445','00446','00447','00448','00449','00450','00451','00452','00453','00454','00455','00456','00457','00458','00459',
# '00460','00461','00462','00463','00464','00465','00466','00467','00468','00469','00470','00471','00472','00473','00474','00475','00476','00477',
# '00478','00479','00480','00481','00482','00483','00484','00485','00486','00487','00488','00489','00490','00491','00492','00493','00494','00495',
# '00496','00497','00498','00499','00500','00501','00502','00503','00504','00505','00506','00507','00508','00509','00510','00511','00512','00513',
# '00514','00515','00516','00517','00518','00519','00520','00521','00522','00523','00524','00525','00526','00527','00528','00529','00530','00531',
# '00532','00533','00534','00535','00536','00537','00538','00539','00540','00541','00542','00543','00544','00545','00546','00547','00548','00549',
# '00550','00551','00552','00553','00554','00555','00556','00557','00558','00559','00560','00561','00562','00563','00564','00565','00566','00567',
# '00568','00569','00570','00571','00572','00573','00574','00575','00576','00577','00578','00579','00580','00581','00582','00583','00584','00585',
# '00586','00587','00588','00589','00590','00591','00592','00593','00594','00595','00596','00597','00598','00599','00600','00601','00602','00603',
# '00604','00605','00606','00607','00608','00609','00610','00611','00612','00613','00614','00615','00616','00617','00618','00619','00620','00621',
# '00622','00623','00624','00625','00626','00627','00628','00629','00630','00631','00632','00633','00634','00635','00636','00637','00638','00639',
# '00640','00641','00642','00643','00644','00645','00646','00647','00648','00649','00650','00651','00652','00653','00654','00655','00656','00657',
# '00658','00659','00660','00661','00662','00663','00664','00665','00666','00667','00668','00669','00670','00671','00672','00673','00674','00675',
# '00676','00677','00678','00679','00680','00681','00682','00683','00684','00685','00686','00687','00688','00689','00690','00691','00692','00693',
# '00694','00695','00696','00697','00698','00699','00700','00701','00702','00703','00704','00705','00706','00707','00708','00709','00710','00711',
# '00712','00713','00714','00715','00716','00717','00718','00719','00720','00721','00722','00723','00724','00725','00726','00727','00728','00729',
# '00730','00731','00732','00733','00734','00735','00736','00737','00738','00739','00740','00741','00742','00743','00744','00745','00746','00747',
# '00748','00749','00750','00751','00752','00753','00754','00755','00756','00757','00758','00759','00760','00761','00762','00763','00764','00765',
# '00766','00767','00768','00769','00770','00771','00772','00773','00774','00775','00776','00777','00778','00779','00780','00781','00782','00783',
# '00784','00785','00786','00787','00788','00789','00790','00791','00792','00793','00794','00795','00796','00797','00798','00799','00800','00801',
# '00802','00803','00804','00805','00806','00807','00808','00809','00810','00811','00812','00813','00814','00815','00816','00817','00818','00819',
# '00820','00821','00822','00823','00824','00825','00826','00827','00828','00829','00830','00831','00832','00833','00834','00835','00836','00837',
# '00838','00839','00840','00841','00842','00843','00844','00845','00846','00847','00848','00849','00850','00851','00852','00853','00854','00855',
# '00856','00857','00858','00859','00860','00861','00862','00863','00864','00865','00866','00867','00868','00869','00870','00871','00872','00873',
# '00874','00875','00876','00877','00878','00879','00880','00881','00882']
}
label_map = [
(0, 'a_ascend'),
(1, 'a_descend'),
(2, 'a_jump'),
(3, 'a_loadwalk'),
(4, 'a_walk'),
(5, 'p_bent'),
(6, 'p_kneel'),
(7, 'p_lie'),
(8, 'p_sit'),
(9, 'p_squat'),
(10, 'p_stand'),
(11, 't_bend'),
(12, 't_kneel_stand'),
(13, 't_lie_sit'),
(14, 't_sit_lie'),
(15, 't_sit_stand'),
(16, 't_stand_kneel'),
(17, 't_stand_sit'),
(18, 't_straighten'),
(19, 't_turn')
]
labelToId = {str(x[0]): i for i, x in enumerate(label_map)}
# print ("label2id=",labelToId)
idToLabel = [x[1] for x in label_map]
# print ("idToLabel=",idToLabel)
# colums of the merged file (ie video_hall+video_living+video_kitchen+accelerometer+maxarg_target)
# the strict order is : accelerometer_data, video_hall_data, living_room_data, target_value
cols = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]
# cols_acceleration = [1, 2, 3, 4, 5, 6, 7]
# cols_video = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
data = {dataset: self.readSphereFiles(files[dataset],cols,labelToId,idToLabel)
for dataset in ('train', 'test')}
return data, idToLabel
# partial code was taken from :https://github.com/IRC-SPHERE/sphere-challenge/blob/master/visualise_data.py
def readSphereFiles(self, filelist, cols, labelToId,idToLabel):
data = []
labels = []
mapping = {'a_ascend': 0, 'a_descend': 1, 'a_jump': 2, 'a_loadwalk': 3, 'a_walk': 4, 'p_bent': 5, 'p_kneel': 6, 'p_lie': 7, 'p_sit': 8,
'p_squat': 9, 'p_stand': 10, 't_bend': 11, 't_kneel_stand': 12, 't_lie_sit': 13, 't_sit_lie': 14, 't_sit_stand': 15, 't_stand_kneel': 16,
't_stand_sit': 17,'t_straighten': 18, 't_turn': 19}
# mapping2 = {0:'a_ascend', 1:'a_descend', 2:'a_jump', 3:'a_loadwalk', 4:'a_walk', 5:'p_bent', 6:'p_kneel', 7:'p_lie', 8:'p_sit',
# 9:'p_squat', 10:'p_stand', 11:'t_bend', 12:'t_kneel_stand', 13:'t_lie_sit', 14:'t_sit_lie', 15:'t_sit_stand', 16:'t_stand_kneel',
# 17:'t_stand_sit', 18:'t_straighten', 19: 't_turn'}
for i, filename in enumerate(filelist):
path = './train/%s/'%filename
meta_root = './metadata/'
video_cols = json.load(open(os.path.join(meta_root, 'video_feature_names.json')))
centre_2d = video_cols['centre_2d']
bb_2d = video_cols['bb_2d']
centre_3d = video_cols['centre_3d']
bb_3d = video_cols['bb_3d']
print('Reading file %d of %d'%(i+1,len(filelist)))
meta = json.load(open(os.path.join(path, 'meta.json')))
acceleration_keys = json.load(open(os.path.join(meta_root, 'accelerometer_axes.json')))
rssi_keys = json.load(open(os.path.join(meta_root, 'access_point_names.json')))
video_names = json.load(open(os.path.join(meta_root, 'video_locations.json')))
pir_names = json.load(open(os.path.join(meta_root, 'pir_locations.json')))
location_targets = json.load(open(os.path.join(meta_root, 'rooms.json')))
activity_targets = json.load(open(os.path.join(meta_root, 'annotations.json')))
accel = load_wearable(path,acceleration_keys,rssi_keys)
vid = load_video(path,video_names)
pir = load_environmental(path)
annot = load_annotations(path)
targ = load_targets(path)
# accel = accel.dropna(how='any')
# vid = pd.DataFrame(vid.items())
# vid = pd.DataFrame.from_dict(orient='index',data = vid)
# vid = vid.dropna(how='any')
#we have read the whole train set for the current file
#now we trim off all unlabeled target instances
targ = targ.dropna(how='any')
#we feel the accelerometer NaN values with zero (mean impute would not make much sense)
accel = accel.fillna(0)
# print(i)
# print(filename)
#we get the target label for each instance, which would be the argmax, of the targe probability distribution
targLabel = copy.deepcopy(targ)
targLabel.drop(targLabel.columns[[0, 1]], axis=1, inplace=True)
#we create a target column, with the corresponding argmax targets
targ['target'] = targLabel.idxmax(axis=1)
#delete the probability distribution columns
for activity in idToLabel:
del targ[activity]
# print(targ)
# print("accel")
# print(accel.keys())
# print("vid['hallway']")
# print(vid['hallway'].keys())
# print("theEND")
accel.insert(0, 't', 0)
accel['t'] = accel.index
vid['hallway']['t']= vid['hallway'].index
vid['living_room']['t'] = vid['living_room'].index
vid['kitchen']['t'] = vid['kitchen'].index
merged = pd.merge(accel,vid['hallway'],how='outer',on='t')
merged = pd.merge(merged,vid['living_room'],how='outer',on='t')
merged = pd.merge(merged,vid['kitchen'],how='outer',on='t')
# print ("accel.shape")
# print (accel.shape)
# print ("vid[hallway].shape")
# print (vid['hallway'].shape)
# print ("vid[living_room].shape")
# print (vid['living_room'].shape)
# print ("vid[kitchen].shape")
# print (vid['kitchen'].shape)
# Rename the columns appropriately
merged.columns = ['time', 'x', 'y','z', 'Kitchen_AP', 'Lounge_AP', 'Upstairs_AP',
'Study_AP', 'centre_2d_x_hall', 'centre_2d_y_hall' , 'bb_2d_br_x_hall' ,'bb_2d_br_y_hall',
'bb_2d_tl_x_hall' , 'bb_2d_tl_y_hall' , 'centre_3d_x_hall' , 'centre_3d_y_hall' , 'centre_3d_z_hall',
'bb_3d_brb_x_hall' , 'bb_3d_brb_y_hall' , 'bb_3d_brb_z_hall' , 'bb_3d_flt_x_hall' , 'bb_3d_flt_y_hall',
'bb_3d_flt_z_hall' , 'centre_2d_x_living' , 'centre_2d_y_living' , 'bb_2d_br_x_living' , 'bb_2d_br_y_living',
'bb_2d_tl_x_living' , 'bb_2d_tl_y_living' , 'centre_3d_x_living' , 'centre_3d_y_living' , 'centre_3d_z_living',
'bb_3d_brb_x_living' , 'bb_3d_brb_y_living' ,'bb_3d_brb_z_living' , 'bb_3d_flt_x_living' , 'bb_3d_flt_y_living',
'bb_3d_flt_z_living' , 'centre_2d_x_kitchen' , 'centre_2d_y_kitchen' , 'bb_2d_br_x_kitchen' , 'bb_2d_br_y_kitchen',
'bb_2d_tl_x_kitchen' , 'bb_2d_tl_y_kitchen' , 'centre_3d_x_kitchen' , 'centre_3d_y_kitchen' , 'centre_3d_z_kitchen' , 'bb_3d_brb_x_kitchen',
'bb_3d_brb_y_kitchen' , 'bb_3d_brb_z_kitchen' , 'bb_3d_flt_x_kitchen' , 'bb_3d_flt_y_kitchen' , 'bb_3d_flt_z_kitchen',
]
# pd.set_option('display.max_columns', 500)
# print("merged.keys()")
# print(merged.keys())
# print("merged.shape")
# print(merged.shape)
# print("merged")
# print(merged.ix[:5, :54])
# print()
#concatinate the target file labels and start,end tuples with the accelerometer timeseries.
# print("going for the sql table creation")
conn = sqlite3.connect(':memory:')
targ.to_sql('targ',conn,index=True)
merged.to_sql('merged',conn,index=True)
# vid['hallway'].to_sql('hall',conn,index=True)
# vid['living_room'].to_sql('living',conn,index=True)
# vid['kitchen'].to_sql('kitchen',conn,index=True)
# print("just did the sql table creation")
qry = '''
select
time, x, y,z, Kitchen_AP, Lounge_AP, Upstairs_AP,
Study_AP, centre_2d_x_hall, centre_2d_y_hall , bb_2d_br_x_hall ,bb_2d_br_y_hall,
bb_2d_tl_x_hall , bb_2d_tl_y_hall , centre_3d_x_hall , centre_3d_y_hall , centre_3d_z_hall,
bb_3d_brb_x_hall , bb_3d_brb_y_hall , bb_3d_brb_z_hall , bb_3d_flt_x_hall , bb_3d_flt_y_hall,
bb_3d_flt_z_hall , centre_2d_x_living , centre_2d_y_living , bb_2d_br_x_living , bb_2d_br_y_living,
bb_2d_tl_x_living , bb_2d_tl_y_living , centre_3d_x_living , centre_3d_y_living , centre_3d_z_living,
bb_3d_brb_x_living , bb_3d_brb_y_living ,bb_3d_brb_z_living , bb_3d_flt_x_living , bb_3d_flt_y_living,
bb_3d_flt_z_living , centre_2d_x_kitchen , centre_2d_y_kitchen , bb_2d_br_x_kitchen , bb_2d_br_y_kitchen,
bb_2d_tl_x_kitchen , bb_2d_tl_y_kitchen , centre_3d_x_kitchen , centre_3d_y_kitchen , centre_3d_z_kitchen , bb_3d_brb_x_kitchen,
bb_3d_brb_y_kitchen , bb_3d_brb_z_kitchen , bb_3d_flt_x_kitchen , bb_3d_flt_y_kitchen , bb_3d_flt_z_kitchen, targ.target
from merged join targ on merged.time between targ.start and targ.end
'''
# pd.set_option('display.max_columns', 500)
# print("doing the query")
res = pd.read_sql_query(qry,conn)
# print("query done")
# print("res.shape")
# print(res.shape)
# print("res")
# print(res.ix[:5, :100])
res["target"].replace(mapping, inplace=True)
res = res.fillna(0)
# print("res_after_mapping")
# print(res.ix[:5, :100])
conn.close()
for index, line in res.iterrows():
elem = []
for ind in cols:
elem.append(line[ind])
if sum([x=='NaN' for x in elem]) == 0:
data.append([float(x) / 1000 for x in elem[:-1]])
labels.append(labelToId[str(int(elem[-1]))])
return {'inputs': np.asarray(data), 'targets': np.asarray(labels, dtype=int)+1}
# partial code was taken from :https://github.com/IRC-SPHERE/sphere-challenge/blob/master/visualise_data.py
def load_wearable(path,acceleration_keys,rssi_keys):
accel_rssi = pd.read_csv(os.path.join(path, 'acceleration.csv'), index_col='t')
acceleration = accel_rssi[acceleration_keys]
rssi = | pd.DataFrame(index=acceleration.index) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
import pandas as pd
from importlib import reload
from bs4 import BeautifulSoup
import requests
from tqdm import tqdm
import numpy as np
import itertools
import shutil
import grimsel_h.auxiliary.timemap as timemap
import grimsel_h.auxiliary.aux_sql_func as aql
from grimsel_h.auxiliary.aux_general import print_full
import PROFILE_READER.profile_reader as profile_reader
reload(profile_reader)
reload(profile_reader)
class TSOReader(profile_reader.ProfileReader):
'''
Some common methods.
'''
def get_fn_list(self, lst_res=['Wind', 'Solar']):
'''
Download the files from constructed urls. Append fn to fn_list
Parameters:
lst_res -- list of strings, names of resources as used to compose
the url
'''
# get complete list of urls
lst_year = range(2005, 2020)
lst_month = range(1, 13)
lst_res = lst_res
url_list = [self.url_base.format(mt=mt, yr=yr, res=res)
for yr, mt, res
in itertools.product(lst_year, lst_month, lst_res)]
self.fn_list = []
url = url_list[0]
for url in url_list:
pt = 'WIN_TOT' if 'wind' in url.lower() else 'SOL_PHO'
s = url
fn = '{}_{}_{}.csv'.format(self._dir.split(os.path.sep)[-1],
pt, s[-5:].replace('-', '_'))
_fn = os.path.join(self._dir, fn)
self.fn_list.append(_fn)
print('Downloading ' + s[:int(60/3 - 3)]
+ '...'
+ s[int(len(s)/2 - 10): int(len(s)/2 + 10)]
+ '...'
+ s[int(len(s) - 60/3 - 3):], end=' --- ')
if not os.path.exists(_fn):
r = requests.get(url)
if r.status_code == 200:
with open(_fn, 'wb') as f:
f.write(r.content)
if not 'no data available' in next(r.iter_lines()).decode('utf-8'):
print('success.')
else:
print('success (no data available)')
else:
print('failed (status code={}).'.format(r.status_code))
else:
print('skipping (file exists).')
class TennetReader(TSOReader):
'''
'''
dict_sql_default = dict(sc='profiles_raw', tb='german_tso_tennet')
data_dir = os.path.normpath('GERMAN_TSO/TENNET')
tb_cols = [('"DateTime"', 'TIMESTAMP'),
('val_type', 'VARCHAR'),
('value', 'DOUBLE PRECISION'),
('hy', 'SMALLINT'),
('tso', 'VARCHAR'),
('pp_id', 'VARCHAR'),
('year', 'SMALLINT')]
tb_pk = ['val_type', 'year', 'hy', 'pp_id']
exclude_substrings=[]
url_base = ('http://www.tennettso.de/site/en/phpbridge?commandpath=Tats'
+ 'aechliche_und_prognostizierte_{res}energieeinspeisung%2Fm'
+ 'onthDataSheetCsv.php&sub=total&querystring=monat%3D'
+ '{yr:02d}-{mt:02d}')
def __init__(self, kw_dict):
super().__init__(**kw_dict)
self.get_fn_list(lst_res=['Wind', 'Solar'])
def read(self, fn):
try:
df_add = pd.read_csv(fn, delimiter=';', skiprows=3, index_col=False)
except pd.errors.EmptyDataError as e:
print(str(e))
return None
df_add = df_add.dropna(how='all', axis=1)
df_add['Date'] = df_add['Date'].fillna(method='ffill')
df_add['Position'] -= 1
df_add['hour'] = np.floor(df_add.Position / 4).apply(int)
df_add['minute'] = (np.floor(df_add.Position % 4) * 15).apply(int)
for idt, dt in enumerate(['year', 'month', 'day']):
df_add[dt] = df_add.Date.apply(lambda x: int(x.split('-')[idt]))
df_add['DateTime'] = pd.to_datetime(df_add[['year', 'month', 'day',
'hour', 'minute']])
lst_datacols = (['Forecast [MW]', 'Actual [MW]']
+ (['Offshore contribution [MW]']
if 'WIN' in fn else []))
df_add = (df_add.set_index('DateTime')[lst_datacols]
.stack().reset_index()
.rename(columns={'level_1': 'val_type', 0: 'value'}))
df_add['val_type'] = (df_add.val_type
.apply(lambda x: x.lower().replace(' [mw]', '')))
df_add['pp_id'] = 'DE_' + '_'.join(fn.split(os.path.sep)[-1].split('_')[1:3])
df_add['tso'] = 'tennet'
df_add = self.time_resample(df_add)
return df_add[['DateTime', 'tso', 'val_type', 'pp_id', 'value']]
def postprocessing_tot(self):
'''
Various operations once the table df_tot has been assembled.
'''
self.tz_localize_convert(tz='UTC')
self.df_tot = self.get_hour_of_the_year(self.df_tot)
self.append_to_sql(self.df_tot.copy())
class AmprionReader(profile_reader.ProfileReader):
'''
'''
dict_sql_default = dict(sc='profiles_raw', tb='german_tso_amprion')
data_dir = os.path.normpath('GERMAN_TSO/AMPRION')
tb_cols = [('"DateTime"', 'TIMESTAMP'),
('val_type', 'VARCHAR'),
('value', 'DOUBLE PRECISION'),
('hy', 'SMALLINT'),
('tso', 'VARCHAR'),
('pp_id', 'VARCHAR'),
('year', 'SMALLINT')]
tb_pk = ['val_type', 'year', 'hy', 'pp_id']
exclude_substrings=[]
def __init__(self, kw_dict):
super().__init__(**kw_dict)
super().get_fn_list()
def read(self, fn):
df_add = | pd.read_csv(fn, delimiter=';') | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 9 16:34:12 2018
@author: nmei
"""
import pandas as pd
import numpy as np
from utils import (posthoc_multiple_comparison,
post_processing,
posthoc_multiple_comparison_interaction,
resample_ttest,
MCPConverter,
stars)
n_ps = 200
n_permutation = int(1e5)
np.random.seed(12345)
if __name__ == '__main__':
############################## using 3 features ##########################
pos = pd.read_csv('../results/Pos_3_1_features.csv')
att = pd.read_csv('../results/ATT_3_1_features.csv')
# pos
df = pos.copy()
id_vars = ['model',
'score',
'sub',
'window',]
value_vars =[
'correct',
'awareness',
'confidence',
]
df_post = post_processing(df[(df['window'] >0) & (df['window'] < 5)],
id_vars,value_vars)
c = df_post.groupby(['sub','Models','Window','Attributes']).mean().reset_index()
# interaction
level_window = pd.unique(c['Window'])
level_attribute = pd.unique(c['Attributes'])
unique_levels = []
for w in level_window:
for a in level_attribute:
unique_levels.append([w,a])
# results = []
# for model_name,df_sub in c.groupby(['Models']):
# # main effect of window
# factor = 'Window'
# result = posthoc_multiple_comparison(df_sub,
# depvar = 'Values',
# factor = factor,
# n_ps = n_ps,
# n_permutation = n_permutation)
# result['Model'] = model_name
# results.append(result)
# # main effect of attributes
# factor = 'Attributes'
# result = posthoc_multiple_comparison(df_sub,
# depvar = 'Values',
# factor = factor,
# n_ps = n_ps,
# n_permutation = n_permutation)
# result['Model'] = model_name
# results.append(result)
# # interaction
# result = posthoc_multiple_comparison_interaction(
# df_sub,
# depvar = 'Values',
# unique_levels = ["Window","Attributes"],
# n_ps = n_ps,
# n_permutation = n_permutation)
# result['Model'] = model_name
# results.append(result)
#
# results = pd.concat(results)
# results.to_csv('../results/post hoc multiple comparison POS_3_1_features.csv',
# index=False)
# 1 sample t test against baseline
c_test = c.copy()
# normalize random forest feature importance because of reasons ...
# from sklearn.preprocessing import scale
# c_test.loc[c_test['Models'] == "RandomForestClassifier",'Values'] = \
# scale(c_test.loc[c_test['Models'] == "RandomForestClassifier",'Values'].values)
ttest_results = dict(
model_name = [],
window = [],
attribute = [],
ps_mean = [],
ps_std = [],
value_mean = [],
value_std = [],
baseline = [],
)
for (model_name,attribute,window),df_sub in c_test.groupby(['Models','Attributes','Window']):
if model_name == "RandomForestClassifier":
baseline = 1/3.
ps = resample_ttest(df_sub['Values'].values,
baseline = baseline,
n_ps = n_ps,
n_permutation = n_permutation,
one_tail = True,)
elif model_name == "LogisticRegression":
baseline = 1
ps = resample_ttest(df_sub['Values'].values,
baseline = baseline,
n_ps = n_ps,
n_permutation = n_permutation,
one_tail = True,)
ttest_results['model_name'].append(model_name)
ttest_results['window'].append(window)
ttest_results['attribute'].append(attribute)
ttest_results['ps_mean'].append(ps.mean())
ttest_results['ps_std'].append(ps.std())
ttest_results['value_mean'].append(df_sub['Values'].values.mean())
ttest_results['value_std'].append(df_sub['Values'].values.std())
ttest_results['baseline'].append(baseline)
ttest_results = pd.DataFrame(ttest_results)
temp = []
for model_name, df_sub in ttest_results.groupby(['model_name']):
df_sub = df_sub.sort_values(['ps_mean'])
converter = MCPConverter(pvals = df_sub['ps_mean'].values)
d = converter.adjust_many()
df_sub['ps_corrected'] = d['bonferroni'].values
temp.append(df_sub)
ttest_results = pd.concat(temp)
ttest_results = ttest_results.sort_values(['model_name','window','attribute'])
ttest_results['stars'] = ttest_results['ps_corrected'].apply(stars)
ttest_results.to_csv('../results/one sample t test POS_3_1_features.csv',
index = False)
# att
df = att.copy()
id_vars = ['model',
'score',
'sub',
'window',]
value_vars =[
'correct',
'awareness',
'confidence',
]
df_post = post_processing(df[(df['window'] >0) & (df['window'] < 5)],
id_vars,value_vars)
c = df_post.groupby(['sub','Models','Window','Attributes']).mean().reset_index()
# interaction
level_window = | pd.unique(c['Window']) | pandas.unique |
import pandas as pd
import pytest
from sklearn import datasets
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from skqulacs.circuit.pre_defined import create_qcl_ansatz
from skqulacs.qnn import QNNClassifier
from skqulacs.qnn.solver import Adam, Bfgs, Solver
@pytest.mark.parametrize(
("solver", "maxiter"),
[(Adam(tolerance=1e-2, n_iter_no_change=5), 777), (Bfgs(), 8)],
)
def test_classify_iris(solver: Solver, maxiter: int) -> None:
iris = datasets.load_iris()
df = | pd.DataFrame(iris.data, columns=iris.feature_names) | pandas.DataFrame |
from django.shortcuts import render
from django.http import HttpResponse
from django.views import View
import pytz
import numpy as np
from datetime import datetime, time
import pandas as pd
import os, subprocess, psutil
from django.conf.urls.static import static
from . forms import SubmitTickerSymbolForm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #points to static folder
class CommandCenterView(View):
def __init__(self):
self.the_form = SubmitTickerSymbolForm()
self.month_year = datetime.now().strftime('%d | %B | %Y')
def contextRender(self, request,*args,**kwargs):
'''Common context renderer for the CommandCenterView'''
context = {
"title": "Command center",
"form": self.the_form,
"month_year": self.month_year,
"twsRunning": kwargs['msg'],
}
return render(request, "ib/commandCenter.html", context)
def get(self, request, *args, **kwargs):
t_msg = "Keep up the good work :)"
return self.contextRender(request\
,msg=t_msg)
def post(self, request, *args, **kwargs):
form = SubmitTickerSymbolForm(request.POST)
# launch trader work station(TWS)
if request.method == 'POST' and 'launchTws' in request.POST.keys():
if "tws.exe" in (p.name() for p in psutil.process_iter()):
t_msg = "TWS is running..."
return self.contextRender(request\
,msg=t_msg)
else:
subprocess.Popen(['C:\\Jts\\tws.exe'])
t_msg = "Launching TWS..."
return self.contextRender(request\
,msg=t_msg)
#add a ticker to forex list
elif request.method == 'POST' and 'forexQuote0' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
forex_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathForex)
except:
emptydf.to_csv(csvPathForex, sep=',', index=False)
df = pd.read_csv(csvPathForex)
client_id = [i for i in range(20, 25) if i not in df['clientid'].values ][0]
if forex_ticker in df['ticker'].values:
t_msg = "FAILED! "+forex_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = forex_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = " Added " + forex_ticker+ " to FOREX list"
return self.contextRender(request\
,msg=t_msg)
#add a ticker to stock list
elif request.method == 'POST' and 'stockQuote0' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
stock_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathStock)
except:
emptydf.to_csv(csvPathStock, sep=',', index=False)
df = pd.read_csv(csvPathStock)
# insertPoint = len([i for i in df['ticker'].values if isinstance(i, str)])
client_id = [i for i in range(5, 20) if i not in df['clientid'].values ][0]
if stock_ticker in df['ticker'].values:
t_msg = "FAILED! "+stock_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
#create emty csv to deal with file not found error
fName = "static\\csv\\realtimeData\\" + stock_ticker + "_raw_realtime_ib.csv"
csvPath = os.path.join(BASE_DIR, fName ) # original data
columns = ['Time', 'Open', 'High', 'Low', 'Close']
try:
if datetime.fromtimestamp(os.path.getmtime(csvPath)).date() < \
datetime.now(tz=pytz.timezone('US/Eastern')).date():
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
except:
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = stock_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = " Added " + stock_ticker+ " to STOCK list"
return self.contextRender(request\
,msg=t_msg)
#remove a ticker from the forex list
elif request.method == 'POST' and 'forexRow' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
row_number = int(request.POST['forexRow'])
f_ticker = request.POST['forexTicker']
df = pd.read_csv(csvPathForex)
pid_insert_point = df['ticker'].values.tolist().index(f_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
p = psutil.Process(pid)
p.terminate()
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+f_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+f_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed CSV and "\
+ f_ticker+" from FOREX list"
return self.contextRender(request\
,msg=t_msg)
except:
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+f_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+f_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = "Successfully removed "\
+ f_ticker+" from FOREX list! \n No active "+ f_ticker+" downloads!"
return self.contextRender(request\
,msg=t_msg)
#remove a ticker from the stock list
elif request.method == 'POST' and 'stockRow' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
row_number = int(request.POST['stockRow'])
s_ticker = request.POST['stockTicker']
df = pd.read_csv(csvPathStock)
pid_insert_point = df['ticker'].values.tolist().index(s_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
# terminate quote downloads
p = psutil.Process(pid)
p.terminate()
#remove csv files
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+s_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+s_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
# remove from list
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed "\
+ s_ticker+" from STOCK list"
return self.contextRender(request\
,msg=t_msg)
except:
try:
fName_rt_raw = "static\\csv\\realtimeData\\"+s_ticker+"_raw_realtime_ib.csv"
fName_rt = "static\\csv\\realtimeData\\"+s_ticker+"_realtime_ib.csv"
csvPathForex_rt_raw = os.path.join(BASE_DIR, fName_rt)
csvPathForex_rt = os.path.join(BASE_DIR, fName_rt_raw)
os.remove(csvPathForex_rt_raw)
os.remove(csvPathForex_rt)
except:
pass
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathStock, sep=',', index=False)
t_msg = " Successfully removed "\
+ s_ticker+" from STOCK list! \n No active "+ s_ticker+" downloads!"
return self.contextRender(request\
,msg=t_msg)
# get forex quote for a clicked ticker
elif request.method == 'POST' and 'forexQuote' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
f_ticker = request.POST['forexQuote']
df = pd.read_csv(csvPathForex)
pid_insert_point = df['ticker'].values.tolist().index(f_ticker)
try:
q = psutil.Process(df['pid'].iloc[pid_insert_point].astype(int))
t_msg = "FAILED to download FOREX "+ f_ticker\
+"!\n Terminate the ongoing download to start again"
return self.contextRender(request\
,msg=t_msg)
except:
client_id_no = df['clientid'].iloc[pid_insert_point].astype(int) # type conversion from np dtype to python datatype
scriptPath= ['E:\\ProgramData\\Anaconda3\\python.exe'\
, 'E:\\ProgramData\\Anaconda3\\Scripts\\mysite\\static\\py\\Forex1RealtimeIB.py']
args = ["-t", f_ticker, "-i", str(client_id_no)]
scriptPath.extend(args)
proc1 = subprocess.Popen(scriptPath)
df['pid'].iloc[pid_insert_point] = proc1.pid
df.to_csv(csvPathForex, sep=',', index=False)
if "tws.exe" in (p.name() for p in psutil.process_iter()):
try:
p = psutil.Process(proc1.pid)
t_msg = "Downloading FOREX "+ f_ticker+" now!"
return self.contextRender(request\
,msg=t_msg)
except:
t_msg = "FAILED to download FOREX "+ f_ticker+" check TWS status"
return self.contextRender(request\
,msg=t_msg)
else:
t_msg = "FAILED to download FOREX "+ f_ticker\
+" Please lauch TWS and try again "
return self.contextRender(request\
,msg=t_msg)
# get stock quote for the clicked ticker
elif request.method == 'POST' and 'stockQuote' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
s_ticker = request.POST['stockQuote']
df = pd.read_csv(csvPathStock)
pid_insert_point = df['ticker'].values.tolist().index(s_ticker)
if "tws.exe" not in (p.name() for p in psutil.process_iter()):
t_msg = "FAILED to download STOCK "+ s_ticker\
+" Please lauch TWS and try again "
return self.contextRender(request\
,msg=t_msg)
try:
q = psutil.Process(df['pid'].iloc[pid_insert_point].astype(int))
t_msg = "FAILED to download FOREX "+ s_ticker\
+"!\n Terminate the ongoing download to start again"
return self.contextRender(request\
,msg=t_msg)
except:
client_id_no = df['clientid'].iloc[pid_insert_point].astype(int)
scriptPath= ['E:\\ProgramData\\Anaconda3\\python.exe'\
, 'E:\\ProgramData\\Anaconda3\\Scripts\\mysite\\static\\py\\USStock1RealTimeIB.py']
args = ["-t", s_ticker, "-i", str(client_id_no)]
scriptPath.extend(args)
proc1 = subprocess.Popen(scriptPath)
df['pid'].iloc[pid_insert_point] = proc1.pid
df.to_csv(csvPathStock, sep=',', index=False)
if "tws.exe" in (p.name() for p in psutil.process_iter()):
try:
time.sleep(4)
p = psutil.Process(proc1.pid)
t_msg = "Downloading STOCK "+ s_ticker+" now!"
return self.contextRender(request\
,msg=t_msg)
except:
t_msg = "FAILED to download STOCK "+ s_ticker+" check TWS status"
return self.contextRender(request\
,msg=t_msg)
#add to live trading list
elif request.method == 'POST' and 'addtolivetrade' in request.POST.keys():
fNamelive = "static\\csv\\liveTradeList.csv"
csvPathLive = os.path.join(BASE_DIR, fNamelive )
stock_ticker = request.POST['addtolivetrade']
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathLive)
except:
emptydf.to_csv(csvPathLive, sep=',', index=False)
df = pd.read_csv(csvPathLive)
client_id = [i for i in range(20, 25) if i not in df['clientid'].values ][0]
if len(df.index) == 0:
if stock_ticker in df['ticker'].values:
t_msg = "FAILED! "+stock_ticker+ " is already in the LIVE TRADE list"
return self.contextRender(request\
,msg=t_msg)
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = stock_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathLive, sep=',', index=False)
t_msg = " Added " + stock_ticker+ " to LIVE TRADE list"
else:
t_msg = " Failed to add " + stock_ticker+ " to LIVE TRADE list. Ccurrently the no of live trades are restricted to '1'!"
return self.contextRender(request\
,msg=t_msg)
# remove ticker from live trade list
elif request.method == 'POST' and 'livelistRow' in request.POST.keys():
fNamelive = "static\\csv\\liveTradeList.csv"
csvPathLive = os.path.join(BASE_DIR, fNamelive )
row_number = int(request.POST['livelistRow'])
f_ticker = request.POST['liveTicker']
df = pd.read_csv(csvPathLive)
pid_insert_point = df['ticker'].values.tolist().index(f_ticker)
pid = df['pid'].iloc[pid_insert_point].astype(int)
try:
p = psutil.Process(pid)
p.terminate()
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathLive, sep=',', index=False)
t_msg = "Process terminated! \n Successfully removed "\
+ f_ticker+" from LIVE trading list"
return self.contextRender(request\
,msg=t_msg)
except:
df.drop(df.index[row_number], inplace=True)
df.to_csv(csvPathLive, sep=',', index=False)
t_msg = "Successfully removed "\
+ f_ticker+" from LIVE trading list!"
return self.contextRender(request\
,msg=t_msg)
#start trading.. launch algorithm
elif request.method == 'POST' and 'startrade' in request.POST.keys():
fNamelive = "static\\csv\\liveTradeList.csv"
csvPathLive = os.path.join(BASE_DIR, fNamelive )
s_ticker = request.POST['startrade']
df = | pd.read_csv(csvPathLive) | pandas.read_csv |
import os
from scipy.io import loadmat
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from datasets.SequenceDatasets import dataset
from datasets.sequence_aug import *
from tqdm import tqdm
from itertools import islice
#Digital data was collected at 12,000 samples per second
signal_size = 1024
work_condition=['_20_0.csv','_30_2.csv']
dataname= {0:[os.path.join('bearingset','health'+work_condition[0]),
os.path.join('gearset','Health'+work_condition[0]),
os.path.join('bearingset','ball'+work_condition[0]),
os.path.join('bearingset','outer'+work_condition[0]),
os.path.join('bearingset', 'inner' + work_condition[0]),
os.path.join('bearingset', 'comb' + work_condition[0]),
os.path.join('gearset', 'Chipped' + work_condition[0]),
os.path.join('gearset', 'Miss' + work_condition[0]),
os.path.join('gearset', 'Surface' + work_condition[0]),
os.path.join('gearset', 'Root' + work_condition[0]),
],
1:[os.path.join('bearingset','health'+work_condition[1]),
os.path.join('gearset','Health'+work_condition[1]),
os.path.join('bearingset','ball'+work_condition[1]),
os.path.join('bearingset','outer'+work_condition[1]),
os.path.join('bearingset', 'inner' + work_condition[1]),
os.path.join('bearingset', 'comb' + work_condition[1]),
os.path.join('gearset', 'Chipped' + work_condition[1]),
os.path.join('gearset', 'Miss' + work_condition[1]),
os.path.join('gearset', 'Surface' + work_condition[1]),
os.path.join('gearset', 'Root' + work_condition[1]),
]
}
label = [i for i in range(0, 9)]
def get_files(root, N):
'''
This function is used to generate the final training set and test set.
root:The location of the data set
'''
data = []
lab =[]
for k in range(len(N)):
for n in tqdm(range(len(dataname[N[k]]))):
path1 = os.path.join(root, dataname[N[k]][n])
if n==0:
data1, lab1 = data_load(path1, label=label[n])
else:
data1, lab1 = data_load(path1, label=label[n-1])
data += data1
lab +=lab1
return [data, lab]
def data_load(filename, label):
'''
This function is mainly used to generate test data and training data.
filename:Data location
axisname:Select which channel's data,---->"_DE_time","_FE_time","_BA_time"
'''
#--------------------
f = open(filename, "r", encoding='gb18030', errors='ignore')
fl = []
if "ball_20_0.csv" in filename:
for line in islice(f, 16, None): # Skip the first 16 lines
line = line.rstrip()
word = line.split(",", 8) # Separated by commas
fl.append(eval(word[1])) # Take a vibration signal in the x direction as input
else:
for line in islice(f, 16, None): # Skip the first 16 lines
line = line.rstrip()
word = line.split("\t", 8) # Separated by \t
fl.append(eval(word[1])) # Take a vibration signal in the x direction as input
#--------------------
fl = np.array(fl)
fl = fl.reshape(-1, 1)
# print(fl.shape())
data = []
lab = []
start, end = int(fl.shape[0]/2), int(fl.shape[0]/2)+signal_size
while end <= (int(fl.shape[0]/2)+int(fl.shape[0]/3)):
data.append(fl[start:end])
lab.append(label)
start += signal_size
end += signal_size
return data, lab
#--------------------------------------------------------------------------------------------------------------------
class Md(object):
num_classes = 9
inputchannel = 1
def __init__(self, data_dir, transfer_task, normlizetype="0-1"):
self.data_dir = data_dir
self.source_N = transfer_task[0]
self.target_N = transfer_task[1]
self.normlizetype = normlizetype
self.data_transforms = {
'train': Compose([
Reshape(),
Normalize(self.normlizetype),
# RandomAddGaussian(),
# RandomScale(),
# RandomStretch(),
# RandomCrop(),
Retype(),
# Scale(1)
]),
'val': Compose([
Reshape(),
Normalize(self.normlizetype),
Retype(),
# Scale(1)
])
}
def data_split(self, transfer_learning=True):
if transfer_learning:
# get source train and val
list_data = get_files(self.data_dir, self.source_N)
data_pd = pd.DataFrame({"data": list_data[0], "label": list_data[1]})
train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd["label"])
source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
# get target train and val
list_data = get_files(self.data_dir, self.target_N)
data_pd = | pd.DataFrame({"data": list_data[0], "label": list_data[1]}) | pandas.DataFrame |
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest # noqa
from .common import PostgreSQLTests
from ibis.compat import unittest
from ibis import literal as L
import ibis.expr.types as ir
import ibis
import sqlalchemy as sa
import pandas as pd
import pandas.util.testing as tm
class TestPostgreSQLFunctions(PostgreSQLTests, unittest.TestCase):
def test_cast(self):
at = self._to_sqla(self.alltypes)
d = self.alltypes.double_col
s = self.alltypes.string_col
sa_d = at.c.double_col
sa_s = at.c.string_col
cases = [
(d.cast('int8'), sa.cast(sa_d, sa.SMALLINT)),
(d.cast('int16'), sa.cast(sa_d, sa.SMALLINT)),
(s.cast('double'), sa.cast(sa_s, sa.FLOAT)),
(s.cast('float'), sa.cast(sa_s, sa.REAL))
]
self._check_expr_cases(cases)
@pytest.mark.xfail(raises=AssertionError, reason='NYI')
def test_decimal_cast(self):
assert False
def test_timestamp_cast_noop(self):
# See GH #592
at = self._to_sqla(self.alltypes)
tc = self.alltypes.timestamp_col
ic = self.alltypes.int_col
tc_casted = tc.cast('timestamp')
ic_casted = ic.cast('timestamp')
# Logically, it's a timestamp
assert isinstance(tc_casted, ir.TimestampArray)
assert isinstance(ic_casted, ir.TimestampArray)
cases = [
(tc_casted, at.c.timestamp_col),
(ic_casted,
sa.func.timezone('UTC', sa.func.to_timestamp(at.c.int_col)))
]
self._check_expr_cases(cases)
def test_timestamp_functions(self):
from datetime import datetime
v = L('2015-09-01 14:48:05.359').cast('timestamp')
vt = datetime(
year=2015, month=9, day=1,
hour=14, minute=48, second=5, microsecond=359000
)
cases = [
(v.strftime('%Y%m%d'), '20150901'),
(v.year(), 2015),
(v.month(), 9),
(v.day(), 1),
(v.hour(), 14),
(v.minute(), 48),
(v.second(), 5),
(v.millisecond(), 359),
# there could be pathological failure at midnight somewhere, but
# that's okay
(v.strftime('%Y%m%d %H'), vt.strftime('%Y%m%d %H')),
# test quoting behavior
(v.strftime('DD BAR %w FOO "DD"'),
vt.strftime('DD BAR %w FOO "DD"')),
(v.strftime('DD BAR %w FOO "D'),
vt.strftime('DD BAR %w FOO "D')),
(v.strftime('DD BAR "%w" FOO "D'),
vt.strftime('DD BAR "%w" FOO "D')),
(v.strftime('DD BAR "%d" FOO "D'),
vt.strftime('DD BAR "%d" FOO "D')),
(v.strftime('DD BAR "%c" FOO "D'),
vt.strftime('DD BAR "%c" FOO "D')),
(v.strftime('DD BAR "%x" FOO "D'),
vt.strftime('DD BAR "%x" FOO "D')),
(v.strftime('DD BAR "%X" FOO "D'),
vt.strftime('DD BAR "%X" FOO "D'))
]
self._check_e2e_cases(cases)
def test_binary_arithmetic(self):
cases = [
(L(3) + L(4), 7),
(L(3) - L(4), -1),
(L(3) * L(4), 12),
(L(12) / L(4), 3),
# (L(12) ** L(2), 144),
(L(12) % L(5), 2)
]
self._check_e2e_cases(cases)
def test_div_floordiv(self):
cases = [
(L(7) / L(2), 3.5),
(L(7) // L(2), 3),
(L(7).floordiv(2), 3),
(L(2).rfloordiv(7), 3),
]
self._check_e2e_cases(cases)
def test_typeof(self):
cases = [
(L('foo_bar').typeof(), 'text'),
(L(5).typeof(), 'integer'),
(ibis.NA.typeof(), 'null'),
# TODO: this should really be double
(L(1.2345).typeof(), 'numeric'),
]
self._check_e2e_cases(cases)
def test_nullifzero(self):
cases = [
(L(0).nullifzero(), None),
(L(5.5).nullifzero(), 5.5),
]
self._check_e2e_cases(cases)
def test_string_length(self):
cases = [
(L('foo_bar').length(), 7),
(L('').length(), 0),
]
self._check_e2e_cases(cases)
def test_string_substring(self):
cases = [
(L('foo_bar').left(3), 'foo'),
(L('foo_bar').right(3), 'bar'),
(L('foo_bar').substr(0, 3), 'foo'),
(L('foo_bar').substr(4, 3), 'bar'),
(L('foo_bar').substr(1), 'oo_bar'),
]
self._check_e2e_cases(cases)
def test_string_strip(self):
cases = [
(L(' foo ').lstrip(), 'foo '),
(L(' foo ').rstrip(), ' foo'),
(L(' foo ').strip(), 'foo'),
]
self._check_e2e_cases(cases)
def test_string_pad(self):
cases = [
(L('foo').lpad(6, ' '), ' foo'),
(L('foo').rpad(6, ' '), 'foo '),
]
self._check_e2e_cases(cases)
def test_string_reverse(self):
cases = [
(L('foo').reverse(), 'oof'),
]
self._check_e2e_cases(cases)
def test_string_upper_lower(self):
cases = [
(L('foo').upper(), 'FOO'),
(L('FOO').lower(), 'foo'),
]
self._check_e2e_cases(cases)
def test_string_contains(self):
cases = [
(L('foobar').contains('bar'), True),
(L('foobar').contains('foo'), True),
(L('foobar').contains('baz'), False),
(L('100%').contains('%'), True),
(L('a_b_c').contains('_'), True)
]
self._check_e2e_cases(cases)
def test_capitalize(self):
cases = [
(L('foo bar foo').capitalize(), 'Foo Bar Foo'),
(L('foobar Foo').capitalize(), 'Foobar Foo'),
]
self._check_e2e_cases(cases)
def test_repeat(self):
cases = [
(L('bar ').repeat(3), 'bar bar bar '),
]
self._check_e2e_cases(cases)
def test_re_replace(self):
cases = [
(
L('fudge|||chocolate||candy').re_replace('\\|{2,3}', ', '),
'fudge, chocolate, candy'
)
]
self._check_e2e_cases(cases)
def test_translate(self):
cases = [
(L('faab').translate('a', 'b'), 'fbbb'),
]
self._check_e2e_cases(cases)
def test_find_in_set(self):
cases = [
(L('a').find_in_set(list('abc')), 0),
(L('b').find_in_set(list('abc')), 1),
]
self._check_e2e_cases(cases)
def test_find_in_set_null_scalar(self):
cases = [
(L(None).cast('string').find_in_set(['a', 'b', None]), 2),
]
self._check_e2e_cases(cases)
def test_isnull_notnull(self):
cases = [
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
]
self._check_e2e_cases(cases)
def test_string_functions(self):
cases = [
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
(L('a').ascii_str(), ord('a'))
]
self._check_e2e_cases(cases)
def test_math_functions(self):
cases = [
(L(-5).abs(), 5),
(L(5).abs(), 5),
(ibis.least(L(5), L(10), L(1)), 1),
(ibis.greatest(L(5), L(10), L(1)), 10),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
]
self._check_e2e_cases(cases)
def test_regexp(self):
v = L('abcd')
v2 = L('1222')
cases = [
(v.re_search('[a-z]'), True),
(v.re_search('[\d]+'), False),
(v2.re_search('[\d]+'), True),
]
self._check_e2e_cases(cases)
def test_regexp_extract(self):
cases = [
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
(L('abcd').re_extract('(ab)(cd)', 1), 'cd'),
# valid group number but no match => empty string
(L('abcd').re_extract('(\d)', 0), ''),
# match but not a valid group number => NULL
(L('abcd').re_extract('abcd', 3), None),
]
self._check_e2e_cases(cases)
def test_fillna_nullif(self):
cases = [
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
]
self._check_e2e_cases(cases)
def test_coalesce(self):
cases = [
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
]
self._check_e2e_cases(cases)
@pytest.mark.xfail(raises=TypeError, reason='Ambiguous argument types')
def test_coalesce_all_na(self):
NA = ibis.NA
int8_na = ibis.NA.cast('int8')
cases = [
(ibis.coalesce(NA, NA), NA),
(ibis.coalesce(NA, NA, NA.cast('double')), NA),
(ibis.coalesce(int8_na, int8_na, int8_na), int8_na),
]
self._check_e2e_cases(cases)
def test_numeric_builtins_work(self):
t = self.alltypes
d = t.double_col
exprs = [
d.fillna(0),
]
self._execute_projection(t, exprs)
def test_misc_builtins_work(self):
t = self.alltypes
d = t.double_col
exprs = [
(d > 20).ifelse(10, -20),
(d > 20).ifelse(10, -20).abs(),
# tier and histogram
d.bucket([0, 10, 25, 50, 100]),
d.bucket([0, 10, 25, 50], include_over=True),
d.bucket([0, 10, 25, 50], include_over=True, close_extreme=False),
d.bucket([10, 25, 50, 100], include_under=True),
]
self._execute_projection(t, exprs)
def test_category_label(self):
t = self.alltypes
d = t.double_col
bucket = d.bucket([0, 10, 25, 50, 100])
exprs = [
bucket.label(['a', 'b', 'c', 'd'])
]
self._execute_projection(t, exprs)
@pytest.mark.xfail(
raises=sa.exc.ProgrammingError,
reason='union not working yet'
)
def test_union(self):
t = self.alltypes
expr = (t.group_by('string_col')
.aggregate(t.double_col.sum().name('foo'))
.sort_by('string_col'))
t1 = expr.limit(4)
t2 = expr.limit(4, offset=4)
t3 = expr.limit(8)
result = t1.union(t2).execute()
expected = t3.execute()
assert (result.string_col == expected.string_col).all()
def test_aggregations_execute(self):
table = self.alltypes.limit(100)
d = table.double_col
s = table.string_col
cond = table.string_col.isin(['1', '7'])
exprs = [
table.bool_col.count(),
table.bool_col.any(),
table.bool_col.all(),
table.bool_col.notany(),
table.bool_col.notall(),
d.sum(),
d.mean(),
d.min(),
d.max(),
d.var(),
d.std(),
d.var(how='sample'),
d.std(how='pop'),
table.bool_col.count(where=cond),
d.sum(where=cond),
d.mean(where=cond),
d.min(where=cond),
d.max(where=cond),
d.var(where=cond),
d.std(where=cond),
d.var(where=cond, how='sample'),
d.std(where=cond, how='pop'),
s.group_concat(),
]
self._execute_aggregation(table, exprs)
def test_distinct_aggregates(self):
table = self.alltypes.limit(100)
exprs = [
table.double_col.nunique()
]
self._execute_aggregation(table, exprs)
def test_not_exists_works(self):
t = self.alltypes
t2 = t.view()
expr = t[-(t.string_col == t2.string_col).any()]
expr.execute()
def test_interactive_repr_shows_error(self):
# #591. Doing this in PostgreSQL because so many built-in functions are
# not available
import ibis.config as config
expr = self.alltypes.double_col.approx_nunique()
with config.option_context('interactive', True):
result = repr(expr)
assert 'no translator rule' in result.lower()
def test_subquery_invokes_postgresql_compiler(self):
t = self.alltypes
expr = (t.mutate(d=t.double_col.fillna(0))
.limit(1000)
.group_by('string_col')
.size())
expr.execute()
def _execute_aggregation(self, table, exprs):
agg_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
agged_table = table.aggregate(agg_exprs)
agged_table.execute()
def _execute_projection(self, table, exprs):
agg_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
proj = table.projection(agg_exprs)
proj.execute()
def test_simple_window(self):
t = self.alltypes
df = t.execute()
for func in 'mean sum min max'.split():
f = getattr(t.double_col, func)
df_f = getattr(df.double_col, func)
result = t.projection([(t.double_col - f()).name('double_col')]).execute().double_col
expected = df.double_col - df_f()
tm.assert_series_equal(result, expected)
def test_rolling_window(self):
t = self.alltypes
df = t[['double_col', 'timestamp_col']].execute().sort_values('timestamp_col').reset_index(drop=True)
window = ibis.window(
order_by=t.timestamp_col,
preceding=6,
following=0
)
for func in 'mean sum min max'.split():
f = getattr(t.double_col, func)
df_f = getattr(df.double_col.rolling(7, min_periods=0), func)
result = t.projection([f().over(window).name('double_col')]).execute().double_col
expected = df_f()
tm.assert_series_equal(result, expected)
def test_partitioned_window(self):
t = self.alltypes
df = t.execute()
window = ibis.window(
group_by=t.string_col,
order_by=t.timestamp_col,
preceding=6,
following=0,
)
def roller(func):
def rolled(df):
torder = df.sort_values('timestamp_col')
rolling = torder.double_col.rolling(7, min_periods=0)
return getattr(rolling, func)()
return rolled
for func in 'mean sum min max'.split():
f = getattr(t.double_col, func)
expr = f().over(window).name('double_col')
result = t.projection([expr]).execute().double_col
expected = df.groupby('string_col').apply(
roller(func)
).reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_cumulative_simple_window(self):
t = self.alltypes
df = t.execute()
for func in 'sum min max'.split():
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(ibis.cumulative_window())).name('double_col')])
result = expr.execute().double_col
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
| tm.assert_numpy_array_equal(pd.NaT == right, expected) | pandas.util.testing.assert_numpy_array_equal |
import sys
import pandas as pd
import numpy as np
from designer_summary import DesignerSummary
pd.set_option('display.expand_frame_repr', False)
pd.set_option('display.max_columns', 10)
class Grailed(object):
def __init__(self, feed_csv_path):
self.df = self.load_df_from_csv(feed_csv_path)
self.groups = self.df.groupby('designer')
designers_to_groups = self.groups.indices
self.non_collabs = [name for name in designers_to_groups if not self.is_collab(name)]
self.collabs = [name for name in designers_to_groups if self.is_collab(name)]
self.designers_with_collabs = { designer : [] for designer in self.non_collabs }
for collab_name in self.collabs:
for non_collab in self.non_collabs:
if non_collab in collab_name:
self.designers_with_collabs[non_collab].append(collab_name)
def load_df_from_csv(self, feed_csv_path):
with open(feed_csv_path, 'r') as f:
df = pd.read_csv(f, names=['title', 'designer', 'size', 'price', 'original_price', 'age', 'bumped'])
# Remove dollar sign and convert to int
df['price'] = df['price'].map(lambda price_str: int(price_str[1:]))
df['original_price'] = df['original_price'].map(lambda price_str: int(price_str[1:]))
df['designer'] = df['designer'].fillna(value='NO DESIGNER').map(lambda designer_str: designer_str.lower())
def string_to_seconds(time_desc):
time_denoms = {
'second' : 1 / 24 / 60 / 60,
'minute': 1 / 24 / 60,
'hour': 1 / 24,
'day' : 1,
'week': 7,
'month': 30,
'year': 365
}
time_denoms['min'] = time_denoms['minute']
desc = time_desc.split(' ')
num, denom = desc[0], desc[1]
if denom[-1] == 's':
denom = denom[:len(denom)-1]
return int(num) * time_denoms[denom]
df['age'] = df['age'].map(string_to_seconds)
df['bumped'] = df['age'] - df['bumped'].map(lambda b: b if | pd.isnull(b) | pandas.isnull |
import ipywidgets
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import seaborn as sns
import sympy
# sympy symbol definition for confusion matrix (CM) entries
symbol_order = 'TP FN TN FP'.split()
tp, fn, tn, fp = cm_elements = sympy.symbols(symbol_order)
n = sum(cm_elements)
distribution_samples = int(2e4)
default_rope = 0.0
# priors (naming conventions from Alvares2018)
def objective_prior(val):
return pd.Series([val] * len(symbol_order), index=symbol_order)
bayes_laplace_prior = objective_prior(1)
haldane_prior = objective_prior(0)
jeffreys_prior = objective_prior(0.5)
rda_prior = objective_prior(1. / len(symbol_order))
ha_prior = objective_prior(np.sqrt(2) / len(symbol_order))
dcm_priors = {'Bayes-Laplace': bayes_laplace_prior,
'Haldane': haldane_prior,
'Jeffreys': jeffreys_prior,
'RDA': rda_prior,
'HA': ha_prior}
triplebeta_priors = {'Haldane': {'PREVALENCE': [0, 0], 'TPR': [0, 0], 'TNR': [0, 0]},
'Bayes-Laplace': {'PREVALENCE': [1, 1], 'TPR': [1, 1], 'TNR': [1, 1]},
'Jeffreys': {'PREVALENCE': [0.5, 0.5], 'TPR': [0.5, 0.5], 'TNR': [0.5, 0.5]}}
class BetaBinomialDist(object):
def __init__(self, k, j, prior=[0, 0]):
self.k = k
self.j = j
self.n = k + j
self.prior = prior
self.theta_samples = self.sample_theta()
self.theta_uncertainty = self.calc_uncertainty(self.theta_samples)
self.pp_samples = self.posterior_predict_metric()
self.pp_uncertainty = self.calc_uncertainty_list(self.pp_samples)
def sample_theta(self):
alpha = self.k + self.prior[0]
beta = self.n - self.k + self.prior[1]
return pd.Series(np.random.beta(alpha, beta, size=distribution_samples))
def posterior_predict_metric(self):
predicted_k = np.array([np.random.binomial(self.n, x) for x in self.theta_samples.values])
predicted_j = self.n - predicted_k
return | pd.DataFrame({'k': predicted_k, 'j': predicted_j}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import unittest
from dstools.preprocessing.Bucketizer import Bucketizer
class TestBucketizer(unittest.TestCase):
def compare_DataFrame(self, df_transformed, df_transformed_correct):
"""
helper function to compare the values of the transformed DataFrame with the values of a correctly transformed DataFrame
"""
#same number of columns
self.assertEqual(len(df_transformed.columns), len(df_transformed_correct.columns))
#check for every column in correct DataFrame, that all items are equal
for column in df_transformed_correct.columns:
#compare every element
for x, y in zip(df_transformed[column], df_transformed_correct[column]):
#if both values are np.NaN, the assertion fails, although they are equal
#if np.isnan(x)==True and np.isnan(y)==True: --> doesn't work with strings
if pd.isnull(x)==True and pd.isnull(y)==True:
pass
else:
self.assertEqual(x, y)
def test_no_numeric_feature(self):
"""
no transformation should be performed
"""
df=pd.DataFrame({'x':[np.NaN,'b','c']})
df_transformed_correct=pd.DataFrame({'x':[np.NaN,'b','c']})
bucket = Bucketizer(features=['x'])
df_transformed = bucket.fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_one_numeric_feature_no_transformation(self):
"""
no transformation should be performed
"""
df=pd.DataFrame({'x':[1,2,3]})
df_transformed_correct=pd.DataFrame({'x':[1,2,3]})
bucket = Bucketizer()
df_transformed = bucket.fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_one_numeric_feature_with_transformation(self):
"""
transformation should be performed
"""
df= | pd.DataFrame({'x':[1,2,3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': np.nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# GH 12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(df, decimals=0, out=df)
def test_numpy_round_nan(self):
# See gh-14197
df = Series([1.53, np.nan, 0.06]).to_frame()
with tm.assert_produces_warning(None):
result = df.round()
expected = Series([2., np.nan, 0.]).to_frame()
tm.assert_frame_equal(result, expected)
def test_round_mixed_type(self):
# GH 11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH 11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
msg = "Index of decimals must be unique"
with pytest.raises(ValueError, match=msg):
df.round(decimals)
def test_built_in_round(self):
# GH 11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_round_nonunique_categorical(self):
# See GH21809
idx = pd.CategoricalIndex(['low'] * 3 + ['hi'] * 3)
df = pd.DataFrame(np.random.rand(6, 3), columns=list('abc'))
expected = df.round(3)
expected.index = idx
df_categorical = df.copy().set_index(idx)
assert df_categorical.shape == (6, 3)
result = df_categorical.round(3)
assert result.shape == (6, 3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Clip
def test_clip(self, float_frame):
median = float_frame.median().median()
original = float_frame.copy()
with tm.assert_produces_warning(FutureWarning):
capped = float_frame.clip_upper(median)
assert not (capped.values > median).any()
with tm.assert_produces_warning(FutureWarning):
floored = float_frame.clip_lower(median)
assert not (floored.values < median).any()
double = float_frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that float_frame was not changed inplace
assert (float_frame.values == original.values).all()
def test_inplace_clip(self, float_frame):
# GH 15388
median = float_frame.median().median()
frame_copy = float_frame.copy()
with tm.assert_produces_warning(FutureWarning):
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = float_frame.copy()
with tm.assert_produces_warning(FutureWarning):
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = float_frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH 2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
# GH 24162, clipping now preserves numeric types per column
df = DataFrame([[1, 2, 3.4], [3, 4, 5.6]],
columns=['foo', 'bar', 'baz'])
expected = df.dtypes
result = df.clip(upper=3).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH 6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, simple_frame,
inplace, lower, axis, res):
# GH 15390
original = simple_frame.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_against_unordered_columns(self):
# GH 20911
df1 = DataFrame(np.random.randn(1000, 4), columns=['A', 'B', 'C', 'D'])
df2 = DataFrame(np.random.randn(1000, 4), columns=['D', 'A', 'B', 'C'])
df3 = DataFrame(df2.values - 1, columns=['B', 'D', 'C', 'A'])
result_upper = df1.clip(lower=0, upper=df2)
expected_upper = df1.clip(lower=0, upper=df2[df1.columns])
result_lower = df1.clip(lower=df3, upper=3)
expected_lower = df1.clip(lower=df3[df1.columns], upper=3)
result_lower_upper = df1.clip(lower=df3, upper=df2)
expected_lower_upper = df1.clip(lower=df3[df1.columns],
upper=df2[df1.columns])
tm.assert_frame_equal(result_upper, expected_upper)
tm.assert_frame_equal(result_lower, expected_lower)
tm.assert_frame_equal(result_lower_upper, expected_lower_upper)
def test_clip_with_na_args(self, float_frame):
"""Should process np.nan argument as None """
# GH 17276
tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan),
float_frame)
# GH 19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
expected = a.dot(a.iloc[0])
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError, match='Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(np.random.randn(3, 4),
index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(np.random.randn(5, 3),
index=lrange(5), columns=[1, 2, 3])
with pytest.raises(ValueError, match='aligned'):
df.dot(df2)
def test_matmul(self):
# matmul test is for GH 10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
assert isinstance(result, DataFrame)
assert result.columns.equals(b.columns)
assert result.index.equals(pd.Index(range(3)))
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result.values, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(np.random.randn(3, 4),
index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(np.random.randn(5, 3),
index=lrange(5), columns=[1, 2, 3])
with pytest.raises(ValueError, match='aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH 10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with pytest.raises(TypeError, match=error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
['group', 'category_string'], ['group', 'string']])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with pytest.raises(TypeError, match=error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
@pytest.mark.parametrize('method,expected', [
('nlargest',
pd.DataFrame({'a': [2, 2, 2, 1], 'b': [3, 2, 1, 3]},
index=[2, 1, 0, 3])),
('nsmallest',
pd.DataFrame({'a': [1, 1, 1, 2], 'b': [1, 2, 3, 1]},
index=[5, 4, 3, 0]))])
def test_duplicates_on_starter_columns(self, method, expected):
# regression test for #22752
df = pd.DataFrame({
'a': [2, 2, 2, 1, 1, 1],
'b': [1, 2, 3, 3, 2, 1]
})
result = getattr(df, method)(4, columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_n_identical_values(self):
# GH 15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# GH 16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = | Series([1, 1, 1]) | pandas.Series |
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from pandas import (
concat,
read_csv,
Series
)
from sklearn.tree import DecisionTreeClassifier
class Titanic(object):
titanic_data = None
def __init__(self, titanic_csv):
self.titanic_data = read_csv(titanic_csv, index_col='PassengerId')
def _percents(self, field):
return self.titanic_data.groupby(field).size().apply(
lambda x: float(x) / self.titanic_data.groupby(field).size().sum() * 100
)
@property
def titanic_sex(self):
return '{male} {female}'.format(**self.titanic_data['Sex'].value_counts())
@property
def survived_percentage(self):
return '{0:.2f}'.format(self._percents('Survived')[1])
@property
def first_class_percentage(self):
return '{0:.2f}'.format(self._percents('Pclass')[1])
@property
def std_and_mean_for_age(self):
ages = self.titanic_data['Age'].dropna()
return '{0:.2f} {1:.2f}'.format(
ages.mean(),
ages.median()
)
@property
def correlation_sibsp_parch(self):
return '{0:.2f}'.format(
self.titanic_data['SibSp'].corr(self.titanic_data['Parch'])
)
def _get_first_name(self, full_name):
try:
return full_name.split('(')[1].replace(')', ' ').split(' ')[0].replace('"', '')
except IndexError:
try:
return full_name.split('Miss. ')[1].split(' ')[0].replace('"', '')
except IndexError:
try:
return full_name.split('Mrs. ')[1].split(' ')[0].replace('"', '')
except IndexError:
return None
@property
def most_popular_female_name(self):
return self.titanic_data.groupby(
self.titanic_data[self.titanic_data.Sex == 'female']['Name'].apply(self._get_first_name)
).count().idxmax().Name
@property
def survival_criteria(self):
values = self.titanic_data[['Pclass', 'Fare', 'Age', 'Sex', 'Survived']].dropna()
values['IsMale'] = values.Sex == 'male'
clf = DecisionTreeClassifier(random_state=241)
clf.fit(values[['Pclass', 'Fare', 'Age', 'IsMale']], values['Survived'])
criteria = concat([
| Series(['Pclass', 'Fare', 'Age', 'Sex']) | pandas.Series |
import csv
import sys
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import json
from os import listdir
from os.path import isfile, join
import re
monnomdistances={'C':0,'I':0,'D':1,'J':1,'K':2,'L':1,'M':2,'S':1,'T':2}
markersize=8
linewidth=3
markerstyles = {'MonNom':{'color':'#000000', 'symbol':'x','size':markersize+2},
'Nom':{'color':'#00B050', 'symbol':'cross','size':markersize},
'Proxied Grift':{'color':'#ED7D31', 'symbol':'arrow-up','size':markersize},
'Monotonic Grift':{'color':'#ED7D31', 'symbol':'diamond-open','size':markersize},
'Racket':{'color':'#4472C4', 'symbol':'circle-open','size':markersize},
'C#':{'color':'#264478', 'symbol':'diamond','size':markersize},
'Java':{'color':'#7030A0', 'symbol':'diamond-wide','size':markersize+3},
'NodeJS':{'color':'#9E480E', 'symbol':'circle','size':markersize},
'HiggsCheck':{'color':'#C00000', 'symbol':'arrow-up','size':markersize},
'Reticulated':{'color':'#B21E6F', 'symbol':'circle-open','size':markersize}}
linestyles = {'MonNom':{'color':'#000000', 'width':linewidth},
'Nom':{'color':'#00aa00', 'dash':'dash', 'width':linewidth},
'Proxied Grift':{'color':'#ED7D31', 'dash':'longdash', 'width':linewidth},
'Monotonic Grift':{'color':'#ED7D31', 'dash':'dashdot', 'width':linewidth},
'Racket':{'color':'#4472C4', 'dash':'dot', 'width':linewidth},
'C#':{'color':'#264478', 'dash':'dot', 'width':linewidth},
'Java':{'color':'#7030A0', 'dash':'dot', 'width':linewidth},
'NodeJS':{'color':'#9E480E', 'dash':'dot', 'width':linewidth},
'HiggsCheck':{'color':'#C00000', 'dash':'dot', 'width':linewidth},
'Reticulated':{'color':'#B21E6F', 'dash':'dot', 'width':linewidth}}
def distance_to_fully_typed(config):
ret=0
for c in config:
ret+=monnomdistances[c]
return ret
def combine_funcs(f1,f2):
return lambda x: f2(f1(x))
def cut_dotbm(str):
return str[4:]
def fetch_key(key,data):
try:
if(key.isdigit()):
return data.loc[int(key)][0]
else:
return data.loc[key][0]
except KeyError:
return "REMOVE"
def load_converter(path):
data=pd.read_csv(path,index_col=0)
return lambda k: fetch_key(str(k),data)
def check_key(key,data):
try:
if(key.isdigit()):
return data.loc[int(key)][0] is None
else:
return data.loc[key][0] is None
except KeyError:
return True
def load_skipper(path,results):
data=pd.read_csv(path,index_col=0)
actualdata=pd.read_csv(results,header=None)
return lambda i: check_key(cut_dotbm(actualdata.iat[i,0]),data)
def load_benchmark(path):
config=json.load(open(path+"/plotconfig.json","rt"))
if(config.get("version")!=None):
if(config.get("version")=="v2"):
return load_newbenchmark(path,config)
if(config.get("version")=="v3"):
return load_benchmarkv3(path,config)
if(config.get("version")=="v1"):
return load_oldbenchmark(path,config)
return load_newbenchmark(path,config)
def load_newbenchmark(path,config):
converter=cut_dotbm
skipper=lambda x : False
if(config.get("mapping")!=None):
converter=combine_funcs(cut_dotbm,load_converter(path+"/"+config["mapping"]))
skipper=load_skipper(path+"/"+config["mapping"],path+"/results.csv")
data=pd.read_csv(path+"/results.csv",header=None,converters={0:converter},skiprows=skipper,index_col=0)
datacolumns=len(data.columns)
linesperprog=config["lines"]
resultcolumns=[[] for i in range(0,linesperprog-1)]
timescolumns=[]
if(datacolumns%linesperprog!=0):
raise Exception("Invalid number of columns: "+path)
rightvalues=[]
for i in range(0,linesperprog):
if i!=config["time"]:
rightvalues.append(data.iat[0,i])
for i in range(0,datacolumns):
if i%linesperprog==config["time"]:
timescolumns.append(i)
else:
if i%linesperprog<config["time"]:
resultcolumns[i%linesperprog].append(i)
else:
resultcolumns[(i-1)%linesperprog].append(i)
for i in range(0,linesperprog-1):
if not data.take(resultcolumns[i],axis=1).applymap(lambda x : x==rightvalues[i]).all(axis=None):
print(data.take(resultcolumns[i],axis=1))
raise Exception("not all result values match!")
times=data.take(timescolumns,axis=1)
times=times.rename(columns={0:'Configuration'})
dists=pd.Series(times.index.map(distance_to_fully_typed), name='Distance to Fully Typed/Nominal')
means=times.mean(axis=1,numeric_only=True).rename("Running Time in Seconds")
stdevs=times.std(axis=1,numeric_only=True).rename("Running Time Standard Deviation")
extended=pd.concat([pd.Series(times.index),dists],join="inner",axis=1)
extended=extended.set_index([0])
dtable=pd.DataFrame(extended).join(means).join(stdevs)
return dtable
def load_benchmarkv3(path,config):
fullresultsfiles=[(x.string,x.group()[(x.string.find("-")+1):-9]) for x in [re.search("[a-zA-Z]+\-([0-9\-]+)_finished.csv",f) for f in listdir(path) if re.search("[a-zA-Z]+\-([0-9\-]+)_finished.csv",f)!=None]]
fullresultsfiles.sort(key=lambda x:x[1],reverse=True)
data=pd.read_csv(path+"/"+fullresultsfiles[0][0],header=None,index_col=0)
datacolumns=len(data.columns)
timescolumns=[]
for i in range(0,datacolumns):
timescolumns.append(i)
times=data.take(timescolumns,axis=1)
times=times.rename(columns={0:'Configuration'})
dists=pd.Series(times.index.map(distance_to_fully_typed), name='Distance to Fully Typed/Nominal')
means=times.mean(axis=1,numeric_only=True).rename("Running Time in Seconds")
stdevs=times.std(axis=1,numeric_only=True).rename("Running Time Standard Deviation")
extended=pd.concat([pd.Series(times.index),dists],join="inner",axis=1)
extended=extended.set_index([0])
dtable=pd.DataFrame(extended).join(means).join(stdevs)
return dtable
def load_oldbenchmark(path, config):
fullresultsfiles=[(x.string,x.group()[8:-9]) for x in [re.search("results_([0-9\-]+)_full.csv",f) for f in listdir(path+"/benchmark") if re.search("results_([0-9\-]+)_full.csv",f)!=None]]
fullresultsfiles.sort(key=lambda x:x[1],reverse=True)
converter=load_converter(path+"/"+config["mapping"])
data=pd.read_csv(path+"/benchmark/"+fullresultsfiles[0][0],header=0,converters={"Folder":converter}).pivot(index="Folder",columns="Run",values="Seconds")
means=data.mean(axis=1,numeric_only=True).rename("Running Time in Seconds")
stdevs=data.std(axis=1,numeric_only=True).rename("Running Time Standard Deviation")
dists=pd.Series(data.index.map(distance_to_fully_typed), name='Distance to Fully Typed/Nominal')
extended=pd.concat([pd.Series(data.index),dists],join="inner",axis=1)
extended=extended.set_index(["Folder"])
dtable= | pd.DataFrame(extended) | pandas.DataFrame |
Subsets and Splits