prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# coding: utf-8
# # [Memanggil Library Pandas](https://academy.dqlab.id/main/livecode/178/346/1682)
# In[1]:
import pandas as pd
import numpy as np
# # [DataFrame & Series](https://academy.dqlab.id/main/livecode/178/346/1683)
# In[2]:
import pandas as pd
# Series
number_list = pd.Series([1, 2, 3, 4, 5, 6])
print("Series:")
print(number_list)
# DataFrame
matrix = [[1, 2, 3],
['a','b','c'],
[3, 4, 5],
['d',4,6]]
matrix_list = pd.DataFrame(matrix)
print("DataFrame:")
print(matrix_list)
# # [Atribut DataFrame & Series - Part 1](https://academy.dqlab.id/main/livecode/178/346/1684)
# In[3]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [1] attribute .info()
print("[1] attribute .info()")
print(matrix_list.info())
# [2] attribute .shape
print("\n[2] attribute .shape")
print(" Shape dari number_list:", number_list.shape)
print(" Shape dari matrix_list:", matrix_list.shape)
# [3] attribute .dtypes
print("\n[3] attribute .dtypes")
print(" Tipe data number_list:", number_list.dtypes)
print(" Tipe data matrix_list:", matrix_list.dtypes)
# [4] attribute .astype()
print("\n[4] attribute .astype()")
print(" Konversi number_list ke str:", number_list.astype("str"))
print(" Konversi matrix_list ke str:", matrix_list.astype("str"))
# # [Atribut DataFrame & Series - Part 2](https://academy.dqlab.id/main/livecode/178/346/1685)
# In[4]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [5] attribute .copy()
print("[5] attribute .copy()")
num_list = number_list.copy()
print(" Copy number_list ke num_list:", num_list)
mtr_list = matrix_list.copy()
print(" Copy matrix_list ke mtr_list:", mtr_list)
# [6] attribute .to_list()
print("[6] attribute .to_list()")
print(number_list.to_list())
# [7] attribute .unique()
print("[7] attribute .unique()")
print(number_list.unique())
# # [Atribut DataFrame & Series - Part 3](https://academy.dqlab.id/main/livecode/178/346/1686)
# In[5]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [8] attribute .index
print("[8] attribute .index")
print(" Index number_list:", number_list.index)
print(" Index matrix_list:", matrix_list.index)
# [9] attribute .columns
print("[9] attribute .columns")
print(" Column matrix_list:", matrix_list.columns)
# [10] attribute .loc
print("[10] attribute .loc")
print(" .loc[0:1] pada number_list:", number_list.loc[0:1])
print(" .loc[0:1] pada matrix_list:", matrix_list.loc[0:1])
# [11] attribute .iloc
print("[11] attribute .iloc")
print(" iloc[0:1] pada number_list:", number_list.iloc[0:1])
print(" iloc[0:1] pada matrix_list:", matrix_list.iloc[0:1])
# # [Creating Series & Dataframe from List](https://academy.dqlab.id/main/livecode/178/346/1688)
# In[6]:
import pandas as pd
# Creating series from list
ex_list = ['a',1,3,5,'c','d']
ex_series = | pd.Series(ex_list) | pandas.Series |
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import logging
logger = logging.getLogger(__name__)
def provide_ax(func):
from raptgen.visualization import get_ax
def wrapper_provide_ax(*args, **kwargs):
no_ax_in_args = all(not isinstance(
arg, matplotlib.axes.Axes) for arg in args)
if no_ax_in_args and "ax" not in kwargs.keys():
logger.info("ax not provided")
fig, ax = get_ax(return_fig=True)
kwargs["ax"] = ax
kwargs["fig"] = fig
func(*args, **kwargs)
return wrapper_provide_ax
def get_results_df(result_dir: str) -> pd.DataFrame:
"""get results in the dir and make to dataframe with specified naming rule"""
result = list()
result_dir = Path(result_dir)
for filepath in result_dir.glob("*.csv"):
df = | pd.read_csv(filepath) | pandas.read_csv |
from unittest.mock import patch
import pytest
from AWS_AACT_Pipeline.categorize_driver import Driver
from AWS_AACT_Pipeline.mock_db_manager import MockDatabaseManager
from AWS_AACT_Pipeline.categorizer import Categorizer
from AWS_AACT_Pipeline.mock_db import MockDatabase
import pandas as pd
def test_missing_json_file():
categorizer = Categorizer()
pytest.raises(Exception, categorizer.read_file_conditions, "fake_json")
def test_misformatted_json_file():
categorizer = Categorizer()
pytest.raises(Exception, categorizer.read_file_conditions, "misformatted_json")
def test_good_driver_call():
og_df = pd.DataFrame(columns=['color', 'nct_id'], index=['kylie','willy', 'riley', 'ben', 'jonah'])
og_df.loc['kylie'] = pd.Series({'color': "yellow", 'nct_id': 1})
og_df.loc['willy'] = pd.Series({'color': "turquoise", 'nct_id': 2})
og_df.loc['riley'] = pd.Series({'color': "blue", 'nct_id': 3})
og_df.loc['ben'] = pd.Series({'color': "blue", 'nct_id': 4})
og_df.loc['jonah'] = | pd.Series({'color': "blue", 'nct_id': 5}) | pandas.Series |
import os, sys, json, pickle
import datetime
import numpy as np
import pandas as pd
| pd.set_option('display.max_colwidth', 300) | pandas.set_option |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
| tm.assert_index_equal(ordered, expected) | pandas.util.testing.assert_index_equal |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import numpy as np
import pandas as pd
import re
from collections import defaultdict, Counter
import collections
import copy
import os
import sys
import random
import logging
import argparse
def add_label(def_gold):
if def_gold == "yes":
return "entailment", "neutral"
elif def_gold == "unk":
return "neutral", "entailment"
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--input_dir", nargs='?', type=str, help="input file")
parser.add_argument("--obj", action='store_true', help="object")
ARGS = parser.parse_args()
files = glob.glob(ARGS.input_dir+"/*")
sentences = []
for fi in files:
print(fi)
if re.search("all", fi):
continue
if not re.search("(yes|unk)", fi):
continue
def_gold = re.search("(yes|unk)", fi).group(1)
def_label, rev_label = add_label(def_gold)
pat = re.compile("."+def_gold)
tmp = re.sub(pat, '', os.path.basename(fi))
origenre = re.sub('.txt', '', tmp)
with open(fi, "r") as f:
for line in f:
genre = origenre
s1, s2 = line.split("\t")
if re.search("emptydet", s1):
s1 = re.sub("emptydet ", "several ", s1)
s2 = re.sub("emptydet ", "several ", s2)
genre = genre+".empty"
s1 = s1[0].upper() + s1[1:]
s1 = s1.strip()+"."
s2 = s2[0].upper() + s2[1:]
s2 = s2.strip()+"."
sentences.append([genre, s1, s2, def_label])
sentences.append([genre, s2, s1, rev_label])
df = pd.DataFrame(sentences, columns=['genre', 'sentence1', 'sentence2', 'gold_label'])
df8 = df
train =pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])
train['index'] = df8.index
train['promptID'] = df8.index
train['pairID'] = df8.index
train['gold_label'] = df8["gold_label"]
train['genre'] = df8["genre"]
train['sentence1'] = df8["sentence1"]
train['sentence2'] = df8["sentence2"]
final_train = train.sample(frac=1)
final_train.to_csv(ARGS.input_dir+"/all_formatted.tsv", sep="\t", index=False)
if ARGS.obj:
pass
else:
depth0 = final_train.query('genre.str.contains("depth0")', engine='python')
depth0.to_csv(ARGS.input_dir+"/depth0.tsv", sep="\t", index=False)
depth1 = final_train.query('genre.str.contains("depth1")', engine='python')
depth1.to_csv(ARGS.input_dir+"/depth1.tsv", sep="\t", index=False)
depth2 = final_train.query('genre.str.contains("depth2")', engine='python')
depth2.to_csv(ARGS.input_dir+"/depth2.tsv", sep="\t", index=False)
depth3 = final_train.query('genre.str.contains("depth3")', engine='python')
depth3.to_csv(ARGS.input_dir+"/depth3.tsv", sep="\t", index=False)
depth4 = final_train.query('genre.str.contains("depth4")', engine='python')
depth4.to_csv(ARGS.input_dir+"/depth4.tsv", sep="\t", index=False)
sample_lex1_1 = depth0.query('genre.str.contains("empty")', engine='python')
rest_1 = depth0.query('not genre.str.contains("empty")', engine='python')
sample_lex1_2 = depth0.query('sentence1.str.contains("No ")', engine='python')
rest_2 = depth0.query('not sentence1.str.contains("No ")', engine='python')
allq_lex1_1_l = rest_1.query('genre.str.contains("lex.")', engine='python')
allq_lex1_2_l = rest_2.query('genre.str.contains("lex.")', engine='python')
rest_1_l = rest_1.query('not genre.str.contains("lex.")', engine='python')
rest_2_l = rest_2.query('not genre.str.contains("lex.")', engine='python')
allq_lex1_1_p = rest_1.query('genre.str.contains("pp.")', engine='python')
allq_lex1_2_p = rest_2.query('genre.str.contains("pp.")', engine='python')
rest_1_p = rest_1.query('not genre.str.contains("pp.")', engine='python')
rest_2_p = rest_2.query('not genre.str.contains("pp.")', engine='python')
rest_types = [[rest_1_l,sample_lex1_2,allq_lex1_2_l,sample_lex1_1,allq_lex1_1_l],
[rest_2_l,sample_lex1_2,allq_lex1_2_l,sample_lex1_1,allq_lex1_1_l],
[rest_1_p,sample_lex1_2,allq_lex1_2_p,sample_lex1_1,allq_lex1_1_p],
[rest_2_p,sample_lex1_2,allq_lex1_2_p,sample_lex1_1,allq_lex1_1_p]]
for i, rest_type in enumerate(rest_types):
#sampling lex_1
train = pd.concat([rest_type[1],rest_type[2]]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test = rest_type[0]
train.to_csv(ARGS.input_dir+"/lex_1_"+str(i)+".tsv", sep="\t", index=False)
test.to_csv(ARGS.input_dir+"/dev_matched_lex_1_"+str(i)+".tsv", sep="\t", index=False)
#1.{at least three, at most three}, {less than three, more than three},{a few, few}
#2.{a few, few}, {at least three, at most three}, {less than three, more than three}
at = test.query('sentence1.str.contains("At ")', engine='python')
than = test.query('sentence1.str.contains(" than ")', engine='python')
few = test.query('sentence1.str.contains("ew ")', engine='python')
rest = test.query('not sentence1.str.contains("At ") and not sentence1.str.contains(" than ") and not sentence1.str.contains("ew ")', engine='python')
lex_2 = pd.concat([rest_type[3],rest_type[4], at]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_2 = pd.concat([than, few, rest]).drop_duplicates().reset_index(drop=True)
lex_2.to_csv(ARGS.input_dir+"/lex_2_"+str(i)+"_1.tsv", sep="\t", index=False)
test_lex_2.to_csv(ARGS.input_dir+"/dev_matched_lex_2_"+str(i)+"_1.tsv", sep="\t", index=False)
lex_3 = | pd.concat([rest_type[3],rest_type[4], at, than]) | pandas.concat |
import pandas as pd
confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv '
recovered = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_recovered_global.csv '
deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_deaths_global.csv '
deaths = pd.read_csv(deaths)
recovered = pd.read_csv(recovered)
confirmed = pd.read_csv(confirmed)
recovered = recovered.drop(columns=['Province/State'])
deaths = deaths.drop(columns=['Province/State'])
confirmed = confirmed.drop(columns=['Province/State'])
recovered = recovered.rename(columns={'Country/Region': 'Country'})
deaths = deaths.rename(columns={'Country/Region': 'Country'})
confirmed = confirmed.rename(columns={'Country/Region': 'Country'})
class GlobalCases:
def confirmed(self):
df = confirmed.iloc[:, 4:].sum().max()
df = {'Confirmed': int(df)}
return df
def deaths(self):
df = deaths.iloc[:, 4:].sum().max()
df = {'Deaths': int(df)}
return df
def recovered(self):
df = recovered.iloc[:, 4:].sum().max()
df = {'Recovered': int(df)}
return df
def active(self):
df = GlobalCases.confirmed(self)['Confirmed'] - GlobalCases.deaths(self)['Deaths'] \
- GlobalCases.recovered(self)['Recovered']
df = {'Active': int(df)}
return df
def complete_world(self):
df = {
'Confirmed': GlobalCases.confirmed(self),
'Deaths': GlobalCases.deaths(self),
'Recovered': GlobalCases.recovered(self),
'Active': GlobalCases.active(self)
}
return df
def death_rate(self=None):
df = GlobalCases.deaths(self)['Deaths'] / GlobalCases.confirmed(self)['Confirmed'] * 100
df = {'Death Rate': float(df)}
return df
def recovery_rate(self):
df = GlobalCases.recovered(self)['Recovered'] / GlobalCases.confirmed(self)['Confirmed'] * 100
df = {'Recovery Rate': float(df)}
return df
def active_perc(self):
df = GlobalCases.active(self)['Active'] / GlobalCases.confirmed(self)['Confirmed'] * 100
df = {'Active Percantage': float(df)}
return df
def daily_confirmed(self):
df = confirmed.iloc[:, 3:].sum(axis=0)
df.index = pd.to_datetime(df.index)
df = pd.DataFrame(df).reset_index()
df.columns = ['Date', 'Confirmed']
#df["Confirmed"].astype(int)
return df.to_dict()
def daily_deaths(self):
df = deaths.iloc[:, 3:].sum(axis=0)
df.index = pd.to_datetime(df.index)
df = | pd.DataFrame(df) | pandas.DataFrame |
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]
arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]
arrays += [pd.array([True, False, True, None], dtype="boolean")]
@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays])
def data(request):
return request.param
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = pa.array(
data.to_numpy(object, na_value=None),
type=pa.from_numpy_dtype(data.dtype.numpy_dtype),
)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow", min_version="0.15.1.dev")
def test_arrow_load_from_zero_chunks(data):
# GH-41040
import pyarrow as pa
df = pd.DataFrame({"a": data[0:0]})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
table = pa.table(
[pa.chunked_array([], type=table.field("a").type)], schema=table.schema
)
result = table.to_pandas()
assert result["a"].dtype == data.dtype
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
import pyarrow as pa
dtype = pd.UInt32Dtype()
result = dtype.__from_arrow__(pa.array([1, 2, 3, 4, None], type="int64"))
expected = pd.array([1, 2, 3, 4, None], dtype="UInt32")
tm.assert_extension_array_equal(result, expected)
@ | td.skip_if_no("pyarrow", min_version="0.16.0") | pandas.util._test_decorators.skip_if_no |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown( | pd.DataFrame() | pandas.DataFrame |
import os
os.chdir('seqFISH_AllenVISp/')
import pickle
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import time as tm
with open ('data/SpaGE_pkl/seqFISH_Cortex.pkl', 'rb') as f:
datadict = pickle.load(f)
seqFISH_data = datadict['seqFISH_data']
seqFISH_data_scaled = datadict['seqFISH_data_scaled']
seqFISH_meta= datadict['seqFISH_meta']
del datadict
with open ('data/SpaGE_pkl/Allen_VISp.pkl', 'rb') as f:
datadict = pickle.load(f)
RNA_data = datadict['RNA_data']
RNA_data_scaled = datadict['RNA_data_scaled']
del datadict
#### Leave One Out Validation ####
Common_data = RNA_data_scaled[np.intersect1d(seqFISH_data_scaled.columns,RNA_data_scaled.columns)]
Imp_Genes = pd.DataFrame(columns=Common_data.columns)
precise_time = []
knn_time = []
for i in Common_data.columns:
print(i)
start = tm.time()
from principal_vectors import PVComputation
n_factors = 50
n_pv = 50
dim_reduction = 'pca'
dim_reduction_target = 'pca'
pv_FISH_RNA = PVComputation(
n_factors = n_factors,
n_pv = n_pv,
dim_reduction = dim_reduction,
dim_reduction_target = dim_reduction_target
)
pv_FISH_RNA.fit(Common_data.drop(i,axis=1),seqFISH_data_scaled[Common_data.columns].drop(i,axis=1))
S = pv_FISH_RNA.source_components_.T
Effective_n_pv = sum(np.diag(pv_FISH_RNA.cosine_similarity_matrix_) > 0.3)
S = S[:,0:Effective_n_pv]
Common_data_t = Common_data.drop(i,axis=1).dot(S)
FISH_exp_t = seqFISH_data_scaled[Common_data.columns].drop(i,axis=1).dot(S)
precise_time.append(tm.time()-start)
start = tm.time()
nbrs = NearestNeighbors(n_neighbors=50, algorithm='auto',metric = 'cosine').fit(Common_data_t)
distances, indices = nbrs.kneighbors(FISH_exp_t)
Imp_Gene = np.zeros(seqFISH_data.shape[0])
for j in range(0,seqFISH_data.shape[0]):
weights = 1-(distances[j,:][distances[j,:]<1])/(np.sum(distances[j,:][distances[j,:]<1]))
weights = weights/(len(weights)-1)
Imp_Gene[j] = np.sum(np.multiply(RNA_data[i][indices[j,:][distances[j,:] < 1]],weights))
Imp_Gene[np.isnan(Imp_Gene)] = 0
Imp_Genes[i] = Imp_Gene
knn_time.append(tm.time()-start)
Imp_Genes.to_csv('Results/SpaGE_LeaveOneOut.csv')
precise_time = | pd.DataFrame(precise_time) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np, pandas as pd
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from glob import glob
import os
def make_prioritycut_ctl(datadir='/Users/luke/local/TIC/CTL71/',
prioritycut=0.0015,
subcols = ['RA', 'DEC', 'TESSMAG', 'TEFF', 'PRIORITY',
'RADIUS', 'MASS', 'CONTRATIO', 'ECLONG',
'ECLAT', 'DIST', 'TICID', 'SPEC_LIST'],
savpath = '../data/TIC71_prioritycut.csv'):
'''
I downloaded the 2018/07/07 CTL direct from
http://astro.phy.vanderbilt.edu/~oelkerrj/tic7_ctl1_20182606.tar.gz.
It's only 2Gb, but regardless I put in on a storage drive.
From the docs at https://filtergraph.com/tess_ctl:
This portal was updated to reflect the CTL of TIC-7.1 on July 7, 2018.
This Candidate Target List (CTL-7.1) is a compilation of several
catalogs, including 2MASS, Gaia DR1, UCAC-4 & 5, Tycho-2, APASS DR9 and
others. The CTL is the current best effort to identify stars most
suitable for transit detection with TESS. Stars are considered for the
CTL if they are: 1) identified as RPMJ dwarfs with greater than 2-sigma
confidence; and 2) meet one of the following temperature/magnitude
criteria: (TESSmag < 12 and Teff >= 5500K) or (TESSmag < 13 and Teff <
5500K). Alternatively, a star is included in the CTL, regardless of the
conditions above, if the star is a member of the bright star list
(TESSmag < 6) or the specially curated cool dwarf, hot subdwarf, and
known planet lists. Users who are interested only in the top 200K or
400K stars may use a filter on the priority of 0.0017 and 0.0011
respectively. The full TIC & CTL will be available for download at
MAST. The full machine-readable version of this CTL filtergraph portal
is available as a comma-separated file at (above link).
Kwargs:
datadir, extracted should start looking like:
luke@brik:~/local/TIC/CTL71$ tree -L 1
.
├── 00-02.csv
├── 02-04.csv
├── 04-06.csv
├── 06-08.csv
├── 08-10.csv
├── 10-12.csv
├── 12-14.csv
├── 14-16.csv
├── 16-18.csv
├── 18-20.csv
├── 20-22.csv
├── 22-24.csv
└── header.txt
prioritycut: 0.0015 corresponds to top 300k or so.
subcols: to write out in prioritycut csv
'''
with open(datadir+'header.txt') as f:
hdr = f.readlines()[0]
columns = [l.strip('\n') for l in hdr.split(',')]
subcats = np.sort(glob(datadir+'??-??.csv'))
print('making priority cut catalog...')
for ix, subcat in enumerate(subcats):
print(ix)
if os.path.exists(datadir+'temp_{:d}.csv'.format(ix)):
continue
sc = pd.read_csv(subcat, names=columns)
sc = sc[subcols]
sc = sc[sc['PRIORITY']>prioritycut]
sc.to_csv(datadir+'temp_{:d}.csv'.format(ix), index=False)
temps = np.sort(glob(datadir+'temp_*.csv'))
for ix, temp in enumerate(temps):
if ix == 0:
df = | pd.read_csv(temp) | pandas.read_csv |
'''
script to join epsilon in the log file with the calculated metrics
(for older version of the data)
'''
import os
import pandas as pd
import argparse
from pathlib import Path
import re
from tqdm import tqdm
PATTERN = re.compile("ε = (.*), δ = (.*)\) for α = (.*)")
def get_epsilon(logs, model_path):
epsilons = []
for i, line in enumerate(logs):
if model_path in line:
if 'ε' in logs[i-2]:
e, d, a = PATTERN.search(logs[i-2]).group(1), PATTERN.search(logs[i-2]).group(2), PATTERN.search(logs[i-2]).group(3)
elif 'ε' in logs[i-1]:
e, d, a = PATTERN.search(logs[i-1]).group(1), PATTERN.search(logs[i-1]).group(2), PATTERN.search(logs[i-1]).group(3)
else:
print("no privacy found, must be nodp")
e, d, a = 0, 0, 0
# raise ValueError(f'{model_path}, {line}')
epsilons.append([e,d,a])
assert len(epsilons) == 1, f'{model_path}'
return epsilons[0]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--checkpoint', '-ckpt', type=str,
help='model checkpoint to use')
parser.add_argument('--log_file', '-log', type=str,
help='log file')
parser.add_argument('--csv_file', '-csv', type=str,
help='csv file')
args = parser.parse_args()
with open(args.log_file, 'r') as fh:
logs = fh.readlines()
df = pd.read_csv(args.csv_file)
records = []
paths = sorted(Path(args.checkpoint).iterdir(), key=os.path.getmtime)
for model_path in tqdm(paths):
model_path = str(model_path)
model_ppl, model_acc, epoch_num = float(model_path.split('ppl-')[-1].split('_')[0]), float(model_path.split('acc-')[-1].split('_')[0]), int(model_path.split('epoch-')[-1])
e, d, a = get_epsilon(logs, model_path)
record = [epoch_num, model_ppl, model_acc, e, d, a, model_path]
records.append(record)
records = pd.DataFrame(records, columns=['epoch', 'model_ppl', 'model_acc', 'epsilon', 'delta', 'alpha', 'model_path'])
# import pdb; pdb.set_trace()
df_new = | pd.merge(df, records, on=['epoch', 'model_ppl', 'model_acc']) | pandas.merge |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet as fp
except ImportError: # pragma: no cover
fp = None
from .... import dataframe as md
from .... import tensor as mt
from ...datasource.read_csv import DataFrameReadCSV
from ...datasource.read_sql import DataFrameReadSQL
from ...datasource.read_parquet import DataFrameReadParquet
@pytest.mark.parametrize('chunk_size', [2, (2, 3)])
def test_set_index(setup, chunk_size):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=chunk_size)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, df3.execute().fetch())
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, df4.execute().fetch())
expected = df1.set_index('y')
df2.set_index('y', inplace=True)
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
def test_iloc_getitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
result = df3.execute(extra_config={'check_series_name': False}).fetch()
pd.testing.assert_series_equal(
expected, result)
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, df4.execute().fetch())
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, df5.execute().fetch())
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df6.execute().fetch())
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df7.execute().fetch())
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df8.execute().fetch())
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df9.execute().fetch())
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, df10.execute().fetch())
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
assert expected == df11.execute().fetch()
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, df12.execute().fetch())
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, df14.execute().fetch())
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, df13.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
assert series.execute().fetch() == data.iloc[4]
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# test index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)[:3]
pd.testing.assert_index_equal(
index.execute().fetch(), data[:3])
index = md.Index(data, chunk_size=3)[4]
assert index.execute().fetch() == data[4]
index = md.Index(data, chunk_size=3)[[2, 3, 4, 9]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[2, 3, 4, 9]])
index = md.Index(data, chunk_size=3)[[4, 3, 9, 2]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[4, 3, 9, 2]])
index = md.Index(data)[5:]
pd.testing.assert_index_equal(
index.execute().fetch(), data[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
index = md.Index(data)[selection]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
index = md.Index(data)[mt.tensor(selection, chunk_size=4)]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
def test_iloc_setitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
series.execute().fetch(), data)
# test Index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)
with pytest.raises(TypeError):
index[5:] = 4
def test_loc_getitem(setup):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
raw6 = raw1[:0]
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df6 = md.DataFrame(raw6)
df = df2.loc[3, 'b']
result = df.execute().fetch()
expected = raw2.loc[3, 'b']
assert result == expected
df = df1.loc['a3', 'b']
result = df.execute(extra_config={'check_shape': False}).fetch()
expected = raw1.loc['a3', 'b']
assert result == expected
# test empty list
df = df1.loc[[]]
result = df.execute().fetch()
expected = raw1.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[[]]
result = df.execute().fetch()
expected = raw2.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[1:4, 'b':'d']
result = df.execute().fetch()
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = df.execute().fetch()
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = df.execute().fetch()
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = df.execute().fetch()
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = df.execute().fetch()
expected = raw3.loc[:, 'b']
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
# --------------
import pandas as pd
# Code Starts Here
def load_data(path= path):
df= pd.read_csv(path)
df= df[['description', 'variety']]
df= df.iloc[:80000]
print(df.head())
return df
df= load_data()
# Code Ends here
# --------------
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.stem import LancasterStemmer
import numpy as np
custom = set(stopwords.words('english')+list(punctuation))
# Code Starts Here
df= load_data()
df= df.groupby('variety').filter(lambda x: len(x)>1000)
def to_lower(x):
return x.lower()
df= df.apply(np.vectorize(to_lower))
df.variety= df.variety.str.replace(" ", "_")
df= df.reset_index()
all_text= pd.DataFrame(df.description)
lancaster= LancasterStemmer()
all_text_list= list(all_text.description)
stemmed_text= list()
for i in range(len(all_text_list)):
stemmed_text.append(lancaster.stem(all_text_list[i]))
all_text= pd.DataFrame({'description':stemmed_text})
# Stemming the data
def remove_stopwords(x):
clean= [word for word in x.split() if word not in custom]
return " ".join(clean)
all_text= all_text.apply(np.vectorize(remove_stopwords))
# Initialize Tfidf vectorizer and LabelEncoder
tfidf= TfidfVectorizer(stop_words= 'english')
le= LabelEncoder()
tfidf.fit(all_text.description)
X= tfidf.transform(all_text.description).toarray()
y= pd.DataFrame(df.variety)
y= le.fit_transform(y.variety)
# print(type(y))
# Code Ends Here
# --------------
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
# Code Starts here
# Splitting the dataset
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size= .3, random_state= 42)
# Initializing Navie bayes
nb= MultinomialNB()
nb.fit(X_train, y_train)
y_pred_nb= nb.predict(X_test)
nb_acc= accuracy_score(y_test, y_pred_nb)
# Code Ends here
# --------------
from collections import Counter
from sklearn.model_selection import train_test_split
import numpy as np
# Code Starts Here
#Load the dataset from path
df= | pd.read_csv(path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 13:48:34 2019
@author: vrrodovalho
"""
import os
import sys
import re
import pathlib
import pandas as pd
import numpy as np
import KEGG as kegg
import matplotlib.pyplot as plt
import seaborn as sns
from adjustText import adjust_text
from tabulate import tabulate
'''
'''
def update_annotation(df, column, replace_dict):
'''
Updates a dataframe column based on a dictionary.
Parameters
----------
df : DataFrame
DataFrame that will be modified.
column : str
Name of the column that will be modified.
replace_dict : dict
Dictionary whose keys will be replaced by its values in the selected
column in df.
Returns
-------
df : DataFrame
The updated DataFrame.
'''
df = df.copy()
df[column] = df[column].replace(replace_dict, regex=True)
return df
def export_gmt(df, cat_col='KEGG_Pathway', cat_sep=',', genes_col='gi',
description_map={}, replace_dict={}, cat_fill_na='?',
ref_org='', drop_unknowns=True, filter_by_size=False,
size_limit=(2,150), forbidden_set_prefixes=['map'],
output_dir='', file_name=''):
'''
Converts a df mapping gene -> categories to a df mapping category -> genes
And creates a GMT file for using with gProfiler
Parameters
----------
df : DataFrame
DESCRIPTION.
cat_col : str, optional
Name of the column with the categories annotation (KEGG, COG...).
The default is 'KEGG_Pathway'.
cat_sep : str, optional
The delimiter that separates multiple categories in a row.
The default is ','.
genes_col : str, optional
Name of the column with the genes. The default is 'gi'.
description_map : dict, optional
A dictionary that gives a description to each category.
That could be COG letters as keys and their meaning as values.
The default is {}.
replace_dict : dict, optional
A dictionary to replace row values, useful to take out obsolete
annotation. The default is {}.
cat_fill_na : str, optional
A value to fill missing values. The default is '?'.
ref_org : str, optional
A kegg organism code to be used as a reference for orthologues
filtering. The default is ''.
drop_unknowns : bool, optional
Whether to drop the functional category defined as unknown, that
previously had na values. The default is False.
filter_by_size : bool, optional
Whether to filter functional categories by min/max size.
The default is False.
size_limit : tuple, optional
A tuple containing 2 integers, a min and a max size for the sets of
functional categories. The default is (2,150).
forbidden_set_prefixes : list, optional
If some gene sets are forbidden, they can be identified in a prefix
list to be removed from the dataset. The default is ['map'].
output_dir : str, optional
Output directory. The default is ''.
file_name : str, optional
Output file name. The default is ''.
Returns
-------
sub_df : DataFrame
A DataFrame close to the GMT file produced.
'''
# simplify df
sub_df = df.loc[:, [genes_col, cat_col]].copy()
# make needed replacements
if replace_dict:
sub_df = update_annotation(sub_df, column=cat_col,
replace_dict=replace_dict)
# fill na as specified
sub_df[cat_col].fillna(cat_fill_na, inplace=True)
# devide rows with multiple annotation based on delimiter
if cat_sep == '':
sub_df = (sub_df.set_index([genes_col])
.stack()
.apply(lambda x: pd.Series(list(x)))
.stack()
.unstack(-2)
.reset_index(-1, drop=True)
.reset_index()
)
else:
sub_df = (sub_df.set_index([genes_col])
.stack()
.str.split(cat_sep, expand=True)
.stack()
.unstack(-2)
.reset_index(-1, drop=True)
.reset_index()
)
sub_df = ( sub_df.groupby(by=cat_col)[genes_col]
.apply(set)
.reset_index()
)
# # filter by set size, to eliminate sets too short or too long
if filter_by_size:
if size_limit:
min_size = min(size_limit)
max_size = max(size_limit)
sub_df['size'] = sub_df[genes_col].apply(len)
sub_df = sub_df.sort_values(by=['size'], ascending=False)
sub_df = sub_df.loc[ ( ( sub_df['size'] > min_size ) & \
( sub_df['size'] < max_size ) ),
[cat_col, genes_col]]
else:
str1 = "If filter_by_size is True, size_limit should be defined "
str2 = "as a tuple containing 2 int: a min and a max limit."
print(str1 + str2)
sub_df = sub_df.set_index(cat_col)
# take out unknown category (privously na)
if drop_unknowns:
sub_df = sub_df.drop([cat_fill_na])
# take out forbidden gene sets
if forbidden_set_prefixes:
for i in forbidden_set_prefixes:
sub_df = sub_df[~sub_df.index.str.startswith(i)]
# Use a KEGG reference organism to drop unrelated pathways
if ref_org:
allowed_ids = search_allowed_pathways_ids(ref_org)
sub_df = sub_df[sub_df.index.isin(allowed_ids)]
# change 1-column set to several columns and name them accordingly
f = lambda x: 'element_{}'.format(x + 1)
sub_df = pd.DataFrame(sub_df[genes_col].values.tolist(),
sub_df.index, dtype=object
).rename(columns=f)
sub_df = sub_df.reset_index()
# Add description to gene sets, if available
if description_map:
description_map[cat_fill_na] = 'Unknown'
sub_df['description'] = sub_df[cat_col].map(description_map)
else:
sub_df['description'] = np.nan
# reorder description column, to be in GMT style
cols = list(sub_df.columns)
cols.remove('description')
cols.insert(1, 'description')
sub_df = sub_df.loc[:,cols]
# # save and return
output_file = output_dir / file_name
sub_df.to_csv(output_file, header=False, index=False, sep='\t')
return sub_df
def generate_gprofiler_list(df, id_column='', category_filter={},
ordered_by='', output_dir='', file_name=''):
'''
Returns a list of genes to use in GProfiler.
Parameters
----------
df : DataFrame
The initial DataFrame.
id_column : str
The name of the column that contains gene IDs.
category_filter : dict, optional
A dictionary in which keys are column names (str) and values are
allowed rows in that column (str). The default is {}.
ordered_by : str, optional
The name of the column that will be used to sort gene list.
It could be a expression measure. The default is ''.
output_dir : str
Output directory.
file_name : str
Output file name.
Returns
-------
string : str
A list of genes to be used in GProfiler.
'''
df = df.copy()
# Filter gene list by column values (such as a category)
if category_filter:
for col in category_filter:
value = category_filter[col]
df = df.loc[df[col] == value, :]
# Sort gene list by column values (such as expression)
if ordered_by:
df[ordered_by] = df[ordered_by].abs()
df = df.sort_values(by=ordered_by, ascending=False)
min_value = df.iloc[0, df.columns.get_loc(ordered_by)]
max_value = df.iloc[-1, df.columns.get_loc(ordered_by)]
string = "Ordered in {}, from {} to {}. ".format(ordered_by,
min_value, max_value)
print(string)
# Make final string and files
proteins = df.loc[:, id_column].astype(str).to_list()
string = '\n'.join(proteins)
output_file = output_dir / file_name
with open(output_file, 'w') as output:
output.write(string)
return string
def merge_enrichment_sources(source_files={'name': 'dataframe'},
max_p_val=0.05, v_limit=6, replace_values={},
output_dir='', file_name=''):
'''
Merge enrichment results from different sources (KEGG, COG) into the
same dataframe, corresponding to the same set of proteins.
Parameters
----------
source_files : dict
A dictionary where the keys are string identifiers (KEGG, COG) and the
values are the dataframes corresponding to the enrichment results
corresponding to those strings.
max_p_val : float, optional
The p-value threshold of significance. The default is 0.05.
v_limit : float, optional
Vertical superior limit of log(p-value). Values exceeding that
threshold are capped to it. The default is 6.
replace_values : dict, optional
A dictionary where the keys are column names and the values are
replacement dictionaries, containing key-value pairs for replacing
values in that column. The default is {}.
output_dir : str, optional
Output directory. The default is ''.
file_name : str, optional
Output file name. The default is ''.
Returns
-------
df : DataFrame
A merged DataFrame.
'''
df = pd.DataFrame()
for item_name in source_files:
item = source_files[item_name]
item['source'] = item_name
df = pd.concat([df, item])
df['log_p_value'] = np.log10(df['adjusted_p_value']) * -1
df['sig'] = np.where(df['adjusted_p_value'] <= max_p_val, 'sig.', 'not sig.')
df = df.sort_values(by=['source', 'log_p_value'], ascending=False)
df['log_p_value_capped'] = np.where(df['log_p_value'] >= v_limit,
v_limit, df['log_p_value'])
if replace_values:
for col in replace_values:
replace_dict = replace_values[col]
df[col] = df[col].replace(replace_dict, regex=True)
# save file
df.to_excel(output_dir/file_name, index=False)
return df
def plot_enrichment(df, data = {'x':'source', 'y':'log_p_value_capped',
'label_col':'term_id', 'label_desc_col':'term_name'},
v_limit=6, max_p_val=0.05,
significancy={'column':'sig','true':'sig.','false':'not sig.'},
jitter_val=0.3, s=4, reg_categories= {'column': 'sig',
'true':'up', 'false':'down', 'true_color':'blue',
'false_color':'red'}, title='Functional enrichment',
save_fig=True,output_dir='',file_name='',file_format='tif',
dpi=300):
'''
Plot enrichment
Parameters
----------
df : DataFrame
A dataframe containing the data to be plotted. Ideally generated by
merge_enrichment_sources function.
data : dict, optional
A dictionary specifying column names in df for x, y and label values.
The default is {'x':'source', 'y':'log_p_value_capped',
'label_col':'term_id', 'label_desc_col':'term_name'}.
max_p_val : float, optional
The p-value threshold of significance. The default is 0.05.
v_limit : float, optional
Vertical superior limit of log(p-value). Values exceeding that
threshold are capped to it. The default is 6.
significancy : dict, optional
A dictionary specifying which is the significancy column and what
values should be considered True and False.
The default is {'column':'sig','true':'sig.','false':'not sig.'}.
jitter_val : float, optional
Parameter for stripplot. Affects the points distribution.
The default is 0.3.
s : float, optional
The size of the points in the graph. The default is 4.
reg_categories : dict, optional
A dictionary specifying regulation categories (up-regulated,
down-regulated), the column, their values and colors.
The default is {'column':'sig', 'true':'up', 'false':'down',
'true_color':'blue', 'false_color':'red'}.
title : str, optional
A title string to be plotted in the graph.
The default is 'Functional enrichment'.
save_fig : bool, optional
Wether to save figure or not. The default is True.
output_dir : str, optional
Output directory. The default is ''.
file_name : str, optional
Output file name. The default is ''.
file_format : str, optional
File format. The default is 'tif'.
dpi : int, optional
Resolution. The default is 300.
Returns
-------
dict
A dictionary containing the final DataFrame and a legend string.
'''
df = df.copy()
fig = plt.figure()
ax = plt.axes()
sub_df_sig = df.loc[ df[significancy['column']] == significancy['true'] ]
sub_df_not = df.loc[ df[significancy['column']] == significancy['false'] ]
x = data['x']
y = data['y']
commons = {'ax':ax,'x':x,'y':y,'size':s,'marker':'s','jitter':jitter_val}
# plot not significtives
sns.stripplot(data=sub_df_not, linewidth=0.1, alpha=0.5, color='grey',
**commons)
# plot significatives
if reg_categories:
palette = {reg_categories['true']:reg_categories['true_color'],
reg_categories['false']:reg_categories['false_color']}
sns.stripplot(data=sub_df_sig,linewidth=0.5,alpha=1.0,palette=palette,
hue=reg_categories['column'],dodge=True, **commons)
else:
sns.stripplot(data=sub_df_sig,linewidth=0.5,alpha=1.0,color='blue',
**commons)
# title?
if title != '':
plt.title(title, loc='center')
# plot lines
ax.set(ylim=(-0.2, v_limit+1))
log_max_p_val = np.log10(max_p_val) * -1
plt.axhline(y=log_max_p_val , color='grey', linewidth=0.5, linestyle='--')
plt.axhline(y=v_limit , color='grey', linewidth=0.5, linestyle='--')
# plot labels
plt.xlabel('', fontsize=12, fontname="sans-serif")
plt.ylabel('Statistical significance [-log10(P-value)]', fontsize=12,
fontname="sans-serif")
# create a df with x-y coordinates only for significatives
df_graph = pd.DataFrame({'x' : [], y : []})
for i in range(len(ax.collections)):
coll = ax.collections[i]
x_values, y_values = np.array(coll.get_offsets()).T
# look for significative y
annotate = False
for i in y_values:
if i >= log_max_p_val:
annotate = True
break
# if found significative y, add to df that will be used to annotate
if annotate:
sub_df = pd.DataFrame({'x':x_values, y:y_values})
df_graph = pd.concat([df_graph, sub_df])
# transfer id col to df_graph in order to have unique identifiers
# and avoid label confusion
unique_id = data['label_col']
unique_id_desc = data['label_desc_col']
df_graph[unique_id] = sub_df_sig[unique_id]
# anottate significative y
merged = sub_df_sig.merge(df_graph, on=[y, unique_id], how='left')
sig_x = merged['x']
sig_y = merged[y]
labels = merged[unique_id]
coordinates = []
for xi, yi, label in zip(sig_x, sig_y, labels):
element = ax.annotate(label, xy=(xi,yi), xytext=(3,3), size=8,
ha="center", va="top", textcoords="offset points")
coordinates.append(element)
# ajust labels to avoid superposition
adjust_text(coordinates, autoalign='xy',
arrowprops=dict(arrowstyle='<-, head_length=0.05, head_width=0.05',
color='black', alpha=0.6, linewidth=0.5))
plt.show()
# return a legend string and file
legend_df = sub_df_sig.loc[:,[unique_id, unique_id_desc]]
legend = tabulate(legend_df, showindex=False)
legend_file_name = '.'.join(file_name.split('.')[:-1]) + '.txt'
output_legend = output_dir / legend_file_name
with open(output_legend, 'w') as output:
output.write(legend)
# save
if save_fig:
fig.savefig(output_dir/file_name, format=file_format, dpi=dpi,
bbox_inches="tight")
return {'sub_df_sig':sub_df_sig, 'df_graph':df_graph,
'df':merged, 'legend':legend}
def search_allowed_pathways_ids(ref_org, unknown='?'):
'''
Search in KEGG all the pathways ids for an organism
Parameters
----------
ref_org : str
KEGG organism code.
Returns
-------
allowed_ids : list
List of allowed ids (with ko prefix).
'''
kegg_data = kegg.get_KEGG_data(org=ref_org, get_pathway_list=True,
get_genes_and_pathways=False, format_conversion=False,
genes_names=False)
org_pathways = kegg.parse_KEGG_pathways_description(kegg_data['pathways'])
allowed_ref_ids = list(org_pathways.keys())
allowed_ids = []
p = '[a-z]+([0-9]+)'
for ref_id in allowed_ref_ids:
general_id = re.match(p,ref_id).groups()[0]
general_id = 'ko' + general_id
allowed_ids.append(general_id)
allowed_ids.append(unknown)
return allowed_ids
def export_tables(proteomics_df=None, proteomics_id_col='', enrichment_df=None,
enrichment_id_col='', enrichment_src_col='', merge_all=False,
enrichment_desc_col='', split_ch=',', enrichment_filter={},
map_src2annot={}, output_dir='', file_name_prefix=''):
'''
Function to export merge proteomics annotation and functional enrichment
table and filter based on specific rules.
Parameters
----------
proteomics_df : DataFrame
A DataFrame containing proteomics annotation.
proteomics_id_col : str
The name of the column in proteomics_df where the protein ids are.
enrichment_df : DataFrame
A DataFrame containing enrichment results for proteins in proteomics_df.
enrichment_id_col : str
The name of the column where the functional category ids are specified.
enrichment_src_col : str
The name of the column where the source database is specified.
enrichment_desc_col : str
The name of the column where the description of id is specified.
split_ch : str
A character to split a string into a list of items in
enrichment_id_set_col. The default is ','.
merge_all : bool
Whether to merge all enriched categories elements in one single
dataframe. Otherwise, they will be returned separated by category
in a dictionary. The default is 'False'.
enrichment_filter : dict, optional
A dictionary describing a filter for enrichment_df the format
{ col_name : [allowed_values] }. Only rows fulfilling these rules are
accepted.
map_src2annot : dict
A dictionary with the relationship between
{ col_name : [allowed_values] }. Only rows fulfilling these rules are
accepted.
output_dir : str,
The output directory.
file_name_prefix : str
A prefix for every output file name. The default is ''.
Returns
-------
None.
'''
prot = proteomics_df.copy()
enri = enrichment_df.copy()
# get descritions
desc = dict(zip( enri[enrichment_id_col], enri[enrichment_desc_col]))
# filter enrichment data (significative)
if enrichment_filter:
for col in enrichment_filter:
col_values = enrichment_filter[col]
enri = enri.loc[enri[col].isin(col_values) ,:]
# get dictionary of enriched categories by enrichment source
enri_items = enri.loc[:,[enrichment_src_col, enrichment_id_col]]
enri_items = ( enri_items.groupby(enrichment_src_col)[enrichment_id_col]
.apply(set).to_dict() )
# search items in proteomics_df that correspond to enriched categories
enriched_elements = {}
appended_data = []
prot = prot.fillna('?')
for src in enri_items:
where2look = map_src2annot[src]
cats = enri_items[src]
for cat in cats:
description = desc[cat]
sub_prot = prot.loc[prot[where2look].str.contains(cat) ,:]
n_prot = sub_prot.shape[0]
appended_data.append(sub_prot)
print("{} \t{} \t(n={}) \t{}".format(src, cat, n_prot,
description))
enriched_elements[cat + ' : ' + description] = sub_prot
file_name = '{}_{}_{}_{}.xlsx'.format(file_name_prefix, src, cat,
description)
sub_prot.astype(str).to_excel(output_dir / file_name, index=False)
single_df = pd.concat(appended_data)
single_df = single_df.drop_duplicates()
file_name = '{}_merged.xlsx'.format(file_name_prefix)
single_df.astype(str).to_excel(output_dir / file_name, index=False)
# merge all enriched categories elements
if merge_all:
enriched_elements = single_df
return enriched_elements
##############################################################################
# DIRECTORY SYSTEM
src_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
main_dir = os.path.dirname(src_dir)
root_dir = os.path.dirname(main_dir)
data_dir = pathlib.Path(main_dir) / 'data'
input_dir = pathlib.Path(data_dir) / 'input'
output_dir = pathlib.Path(data_dir) / 'output'
sys.path.insert(0, root_dir)
# FILE PATHS
proteomics_SEC_and_UC_file = input_dir / 'proteomics_SEC_and_UC_curated.xlsx'
proteomics_UC_file = input_dir / 'proteomics_UC.xlsx'
proteomics_core_file = input_dir / 'proteome_core.xlsx'
proteomics_accessory_file = input_dir / 'proteome_accessory.xlsx'
proteomics_single_file = input_dir / 'proteome_single.xlsx'
proteomics_not_EVs_file = input_dir / 'proteome_not_EVs.xlsx'
cogs_file = input_dir / 'COGs.xlsx'
kegg_ko_storage_file = input_dir / 'kegg_ko.data'
gprofiler_core_kegg_file = input_dir / 'gProfiler_core_kegg.csv'
gprofiler_core_cog_file = input_dir / 'gProfiler_core_cog.csv'
gprofiler_accessory_kegg_file = input_dir / 'gProfiler_accessory_kegg.csv'
gprofiler_accessory_cog_file = input_dir / 'gProfiler_accessory_cog.csv'
gprofiler_single_kegg_file = input_dir / 'gProfiler_single_kegg.csv'
gprofiler_single_cog_file = input_dir / 'gProfiler_single_cog.csv'
# READ FILES
proteomics_SEC_and_UC = pd.read_excel(proteomics_SEC_and_UC_file)
proteomics_UC = pd.read_excel(proteomics_UC_file)
proteomics_core = pd.read_excel(proteomics_core_file)
proteomics_accessory = pd.read_excel(proteomics_accessory_file)
proteomics_single = | pd.read_excel(proteomics_single_file) | pandas.read_excel |
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from copy import deepcopy
import random
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
class AllocationManager:
def __init__(self, data, time_window=60, grid_res=40):
self.time_window = time_window
self.grid_res = grid_res
self.data = data.copy()
self.aps = None
self.edcs = None
df = data.copy()
df['epoch'] = np.round(df['epoch'] / self.time_window)
x_grid = (np.max(df['x']) + 1) / self.grid_res
y_grid = (np.max(df['y']) + 1) / self.grid_res
self.grid_step = np.minimum(x_grid, y_grid)
self.grid = np.zeros(
[int(np.floor(np.max(df['x']) / self.grid_step)), int(np.floor(np.max(df['y']) / self.grid_step))])
for e in df['epoch'].unique():
aux_x = (df[df['epoch'] == e]['x'] / self.grid_step).apply(np.floor).astype(np.int32)
aux_y = (df[df['epoch'] == e]['y'] / self.grid_step).apply(np.floor).astype(np.int32)
d = | pd.DataFrame(data={'aux_x': aux_x, 'aux_y': aux_y}) | pandas.DataFrame |
"""Test the DropTokensByList pipeline stage."""
import pandas as pd
import pdpipe as pdp
def test_drop_tokens_by_list_short():
data = [[4, ["a", "bad", "cat"]], [5, ["bad", "not", "good"]]]
df = pd.DataFrame(data, [1, 2], ["age", "text"])
filter_tokens = pdp.DropTokensByList('text', ['bad'])
res_df = filter_tokens(df)
assert 'age' in res_df.columns
assert 'text' in res_df.columns
assert 'bad' not in res_df.loc[1]['text']
assert 'a' in res_df.loc[1]['text']
assert 'cat' in res_df.loc[1]['text']
assert 'bad' not in res_df.loc[2]['text']
assert 'not' in res_df.loc[2]['text']
assert 'good' in res_df.loc[2]['text']
def test_drop_tokens_by_list_short_no_drop():
data = [[4, ["a", "bad", "cat"]], [5, ["bad", "not", "good"]]]
df = pd.DataFrame(data, [1, 2], ["age", "text"])
filter_tokens = pdp.DropTokensByList('text', ['bad'], drop=False)
res_df = filter_tokens(df)
assert 'age' in res_df.columns
assert 'text' in res_df.columns
assert 'text_filtered' in res_df.columns
assert 'bad' not in res_df.loc[1]['text_filtered']
assert 'a' in res_df.loc[1]['text_filtered']
assert 'cat' in res_df.loc[1]['text_filtered']
assert 'bad' not in res_df.loc[2]['text_filtered']
assert 'not' in res_df.loc[2]['text_filtered']
assert 'good' in res_df.loc[2]['text_filtered']
def test_drop_tokens_by_long_short():
data = [[4, ["a", "bad", "cat"]], [5, ["bad", "not", "good"]]]
df = | pd.DataFrame(data, [1, 2], ["age", "text"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 09:14:34 2021
Combines all our current features into two big ol' csvs, one with all
numeric data, one with all categorical data. Also generates a list of all
column names, of all numerical columns then all categorical columns.
runtime: a few seconds.
@author: Kirby
"""
import numpy as np
import pandas as pd
from pathlib import Path
from time import time
from datetime import datetime
start = time()
now = str(datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))
file_path = Path(__file__)
feature_path = file_path.parent.parent.joinpath('Features')
cohort_path = file_path.parent.parent.joinpath('Cohort')
#%% Load in data.
ids = pd.read_csv(cohort_path.joinpath('ICU_readmissions_dataset.csv'))
#Numeric data.
static_num = pd.read_csv(feature_path.joinpath('Static','static_features.csv'),
usecols=range(4,20))
labs = pd.read_csv(feature_path.joinpath('Labs','lab_feature_data.csv'),
usecols=range(4,355))
gcs = pd.read_csv(feature_path.joinpath('NurseCharting','GCS_feature.csv'),
usecols=range(1,5))
rass = pd.read_csv(feature_path.joinpath('NurseCharting','rass_feature.csv'),
usecols=range(1,1))
temp = pd.read_csv(feature_path.joinpath('NurseCharting','temp_feature.csv'),
usecols=range(1,1))
urine = pd.read_csv(feature_path.joinpath('IntakeOutput',
'urine_transfusions_features.csv'),
usecols=['last_24hr_urine'])
vent = pd.read_csv(feature_path.joinpath('Ventilation',
'MV_duration.csv'),
usecols=[1])
#Categorical/binary data.
static_cat = pd.read_csv(feature_path.joinpath('Static','static_features.csv'),
usecols=range(20,76))
meds = pd.read_csv(feature_path.joinpath('Medications','AllDrugFeatures.csv'),
usecols=range(4,55))
hist = pd.read_csv(feature_path.joinpath('History','HistoryFeatures.csv'),
usecols=range(4,58))
transf = pd.read_csv(feature_path.joinpath('IntakeOutput',
'urine_transfusions_features.csv'),
usecols=range(7,10))
dial = pd.read_csv(feature_path.joinpath('Dialysis','dialysis_feature.csv'),
usecols=['dialysis'])
elix = pd.read_csv(feature_path.joinpath('Comorbidity',
'Elixhauser_features.csv'),
usecols=range(1,32))
seps = pd.read_csv(feature_path.joinpath('Sepsis','sepsis_and_infection.csv'),
usecols=range(1,6))
#%% Put it all together
num = pd.concat([static_num,labs,gcs,rass,temp,urine],axis=1)
cat = pd.concat([static_cat,meds,hist,vent,transf,dial,elix],axis=1)
num.to_csv('numeric_data.csv',index=False)
cat.to_csv('categorical_data.csv',index=False)
#Column names.
cols1 = num.columns.to_frame(index=False)
cols2 = cat.columns.to_frame(index=False)
all_cols = | pd.concat([cols1,cols2],axis=0) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
from collections import defaultdict
import numpy as np
import re
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.tokenize.regexp import RegexpTokenizer
import pandas as pd
def clean_tokens(tokens, to_replace='[^\w\-\+\&\.\'\"]+'):
lemma = WordNetLemmatizer()
tokens = [re.sub(to_replace, ' ', token) for token in tokens]
tokens = [lemma.lemmatize(token) for token in tokens]
return tokens
def tokenize(mystr):
tokenizer = RegexpTokenizer('[^ ]+')
return tokenizer.tokenize(mystr)
def make_causal_input(lod, map_, silent=True):
"""
:param lod: list of dictionaries
:param map_: mapping of tags and values of interest, i.e. [('cause', 'C'), ('effect', 'E')]. The silent tags are by default taggerd as 'O'
:return: dict of list of tuples for each sentence
"""
dd = defaultdict(list)
dd_ = []
rx = re.compile(r"(\b[-']\b)|[\W_]")
rxlist = [r'("\\)', r'(\\")']
rx = re.compile('|'.join(rxlist))
for i in range(len(lod)):
line_ = lod[i]['sentence']
line = re.sub(rx, '', line_)
caus = lod[i]['cause']
caus = re.sub(rx, '', caus)
effe = lod[i]['effect']
effe = re.sub(rx, '', effe)
d = defaultdict(list)
index = 0
for idx, w in enumerate(word_tokenize(line)):
index = line.find(w, index)
if not index == -1:
d[idx].append([w, index])
index += len(w)
d_ = defaultdict(list)
for idx in d:
d_[idx].append([tuple([d[idx][0][0], 'O']), d[idx][0][1]])
init_e = line.find(effe)
init_e = 0 if init_e == -1 else init_e
init_c = line.find(caus)
init_c = 0 if init_c == -1 else init_c
for c, cl in enumerate(word_tokenize(caus)):
init_c = line.find(cl, init_c)
stop = line.find(cl, init_c) + len(cl)
word = line[init_c:stop]
for idx in d_:
if int(init_c) == int(d_[idx][0][1]):
und_ = defaultdict(list)
und_[idx].append([tuple([word, 'C']), line.find(word, init_c)])
d_[idx] = und_[idx]
init_c += len(cl)
for e, el in enumerate(word_tokenize(effe)):
init_e = line.find(el, init_e)
stop = line.find(el, init_e) + len(el)
word = line[init_e:stop]
for idx in d_:
if int(init_e) == int(d_[idx][0][1]):
und_ = defaultdict(list)
und_[idx].append([tuple([word, 'E']), line.find(word, init_e)])
d_[idx] = und_[idx]
init_e += len(word)
dd[i].append(d_)
for dict_ in dd:
dd_.append([item[0][0] for sub in [[j for j in i.values()] for i in lflatten(dd[dict_])] for item in sub])
return dd_
def s2dict(lines, lot):
d = defaultdict(list)
for line_, tag_ in zip(lines, lot):
d[tag_] = line_
return d
def make_data(df):
lodict_ = []
for rows in df.itertuples():
list_ = [rows[2], rows[3], rows[4]]
map1 = ['sentence', 'cause', 'effect']
dict_ = s2dict(list_, map1)
lodict_.append(dict_)
map_ = [('cause', 'C'), ('effect', 'E')]
return zip(*[tuple(zip(*x)) for x in make_causal_input(lodict_, map_)])
def make_data2(df):
lodict_ = []
for rows in df.itertuples():
list_ = [rows[2], rows[3], rows[4]]
map1 = ['sentence', 'cause', 'effect']
dict_ = s2dict(list_, map1)
lodict_.append(dict_)
map_ = [('cause', 'C'), ('effect', 'E')]
import itertools
return list(itertools.chain(*make_causal_input(lodict_, map_)))
def create_data_files(input_file_path, validation=False):
df = pd.read_csv(input_file_path, delimiter='; ', engine='python', header=0)
# Make train and test sets keeping multiple cause / effects blocks together.
df['IdxSplit'] = df.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
df.set_index('IdxSplit', inplace=True)
np.random.seed(0)
testrows = np.random.choice(df.index.values, int(len(df) / 4))
test_sents = df.loc[testrows].drop_duplicates(subset='Index')
train_sents = df.drop(test_sents.index)
if validation is True:
validrows = np.random.choice(train_sents.index.values, int(len(train_sents) / 4))
valid_sents = train_sents.loc[validrows]
train_sents = df.drop(valid_sents.index)
pairs = make_data2(valid_sents)
pd.DataFrame(pairs).to_csv('valid_data.csv', sep=' ', index=None, header=False)
pairs = make_data2(train_sents)
pd.DataFrame(pairs).to_csv('train_data.csv', sep=' ', index=None, header=False)
pairs = make_data2(test_sents)
pd.DataFrame(pairs).to_csv('test_data.csv', sep=' ', index=None, header=False)
def create_data_files2(input_file_path, validation=False):
def write_list(lst, outfile):
with open(outfile, 'w') as f:
for item in lst:
f.write("%s\n" % item)
df = pd.read_csv(input_file_path, delimiter='; ', engine='python', header=0)
# Make train and test sets keeping multiple cause / effects blocks together.
df['IdxSplit'] = df.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
df.set_index('IdxSplit', inplace=True)
np.random.seed(0)
testrows = np.random.choice(df.index.values, int(len(df) / 4))
test_sents = df.loc[testrows].drop_duplicates(subset='Index')
train_sents = df.drop(test_sents.index)
if validation is True:
validrows = np.random.choice(train_sents.index.values, int(len(train_sents) / 4))
valid_sents = train_sents.loc[validrows]
train_sents = train_sents.drop(valid_sents.index)
sentences, tags = make_data(valid_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'testa.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'testa.tags.txt')
sentences, tags = make_data(train_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'train.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'train.tags.txt')
sentences, tags = make_data(test_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'testb.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'testb.tags.txt')
def create_data_files3(input_file_path, test_file_path, validation=False):
def write_list(lst, outfile):
with open(outfile, 'w') as f:
for item in lst:
f.write("%s\n" % item)
train_sents = pd.read_csv(input_file_path, delimiter='; ', engine='python', header=0)
train_sents['IdxSplit'] = train_sents.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
train_sents.set_index('IdxSplit', inplace=True)
test_sents = pd.read_csv(test_file_path, delimiter='; ', engine='python', header=0)
test_sents['IdxSplit'] = test_sents.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
test_sents.set_index('IdxSplit', inplace=True)
np.random.seed(0)
if validation is True:
validrows = np.random.choice(train_sents.index.values, int(len(train_sents) / 4))
valid_sents = train_sents.loc[validrows]
train_sents = train_sents.drop(valid_sents.index)
sentences, tags = make_data(valid_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'testa.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'testa.tags.txt')
sentences, tags = make_data(train_sents)
write_list(list(map(lambda x: ' '.join(x), sentences)), 'train.words.txt')
write_list(list(map(lambda x: ' '.join(x), tags)), 'train.tags.txt')
sentences = [' '.join([ word for idx, word in enumerate(word_tokenize(row[2]))]) for row in test_sents.itertuples()]
write_list(sentences, 'testb.words.txt')
# Just temp tags
tags = [' '.join('O' for _ in word_tokenize(row[2])) for row in test_sents.itertuples()]
write_list(tags, 'testb.tags.txt')
def evaluate(test_file_path, modelpath='', args_idx = 1):
pred_file = '/mnt/DATA/python/tf_ner/models/chars_lstm_lstm_crf/results/score/testb.preds.txt'
with open(pred_file, 'r') as f:
predicted = []
sent_data = []
for line in f:
line = line.strip()
if len(line) > 0:
items = line.split(' ')
sent_data.append((items[0], items[1], items[2]))
else:
predicted.append(sent_data)
sent_data = []
if len(sent_data) > 0:
predicted.append(sent_data)
labels = {"C": 1, "E": 2, "O": 0}
predictions = np.array([labels[pred] for sent in predicted for _, _, pred in sent])
truths = np.array([labels[t] for sent in predicted for _, t, _ in sent])
print(np.sum(truths == predictions) / len(truths))
y_test = [[t for _, t, _ in sent] for sent in predicted]
y_pred = [[pred for __, _, pred in sent] for sent in predicted]
tokens_test = [[token for token, _, _ in sent] for sent in predicted]
ll = []
for i, (pred, token) in enumerate(zip(y_pred, tokens_test)):
l = defaultdict(list)
for j, (y, word) in enumerate(zip(pred, token)):
print(y, word)
l[j] = (word, y)
ll.append(l)
nl = []
for line, yt, yp in zip(ll, y_test, y_pred):
d_ = defaultdict(list)
d_["truth"] = yt
d_["pred"] = yp
d_["diverge"] = 0
for k, v in line.items():
d_[v[1]].append(''.join(v[0]))
if d_["truth"] != d_["pred"]:
d_["diverge"] = 1
d_['Cause'] = ' '.join(el for el in d_['C'])
cause_extend = len(d_['Cause']) + 1 # add 1 extra space at start
d_[' Cause'] = d_['Cause'].rjust(cause_extend)
d_['_'] = ' '.join(el for el in d_['_'])
d_['Effect'] = ' '.join(el for el in d_['E'])
effect_extend = len(d_['Effect']) + 1
d_[' Effect'] = d_['Effect'].rjust(effect_extend)
nl.append(d_)
fieldn = sorted(list(set(k for d in nl for k in d)))
with open(os.path.join(modelpath, ("controls_" + str(args_idx)) + ".csv"), "w+", encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldn, delimiter="~")
writer.writeheader()
for line in nl:
writer.writerow(line)
test = pd.read_csv(test_file_path, delimiter='; ', engine='python', header=0)
test['IdxSplit'] = test.Index.apply(lambda x: ''.join(x.split(".")[0:2]))
test.set_index('IdxSplit', inplace=True)
tmp = | pd.DataFrame.from_records(nl) | pandas.DataFrame.from_records |
import unittest
import pandas as pd
from mavedbconvert import validators, constants, exceptions
class TestHGVSPatternsBackend(unittest.TestCase):
def setUp(self):
self.backend = validators.HGVSPatternsBackend()
def test_validate_hgvs_raise_HGVSValidationError(self):
with self.assertRaises(exceptions.HGVSValidationError):
self.backend.validate("p.1102A>G")
with self.assertRaises(exceptions.HGVSValidationError):
self.backend.validate("x.102A>G")
def test_validate_passes_on_special(self):
self.backend.validate(constants.enrich2_wildtype)
self.backend.validate(constants.enrich2_synonymous)
def test_returns_str_variant(self):
self.assertIsInstance(self.backend.validate("c.1A>G"), str)
class TestValidateHGVS(unittest.TestCase):
def test_uses_patterns_backend_as_default(self):
result = validators.validate_variants(["c.[1A>G;2A>G]"], n_jobs=2, verbose=0)
self.assertIsInstance(result[0], str)
def test_can_specify_backend(self):
backend = validators.HGVSPatternsBackend()
result = validators.validate_variants(
["c.[1A>G;2A>G]"], n_jobs=2, verbose=0, validation_backend=backend
)
self.assertIsInstance(result[0], str)
class TestDfValidators(unittest.TestCase):
def test_validate_column_raise_keyerror_column_not_exist(self):
df = pd.DataFrame({"a": [1]})
with self.assertRaises(KeyError):
validators.validate_has_column(df, "b")
def test_validate_column_passes_when_column_exists(self):
df = pd.DataFrame({"a": [1]})
validators.validate_has_column(df, "a")
def test_error_some_values_non_numeric(self):
df = pd.DataFrame({"A": ["a", 1, 2]})
with self.assertRaises(TypeError):
validators.validate_columns_are_numeric(df)
def test_pass_all_numeric(self):
df = pd.DataFrame({"A": [1, 2, 1.0]})
validators.validate_columns_are_numeric(df)
class TestHGVSValidators(unittest.TestCase):
def test_validate_hgvs_uniqueness(self):
df = pd.DataFrame({constants.nt_variant_col: ["a", "b"]})
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col) # Should pass
df = pd.DataFrame({constants.nt_variant_col: ["a", "b", "a"]})
with self.assertRaises(ValueError):
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col)
# test multi-variant formatting
df = pd.DataFrame({constants.nt_variant_col: list("abcdefg" * 2)})
with self.assertRaises(ValueError) as cm:
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col)
self.assertTrue(str(cm.exception).endswith(", ..."))
def test_validate_hgvs_uniqueness_bad_column(self):
df = pd.DataFrame({constants.nt_variant_col: ["a", "b", "a"]})
with self.assertRaises(KeyError):
validators.validate_hgvs_uniqueness(df, constants.pro_variant_col)
def test_validate_hgvs_uniqueness_ignores_none(self):
df = pd.DataFrame({constants.nt_variant_col: ["a", "b", None, None]})
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col) # Should pass
class TestMaveDBCompliance(unittest.TestCase):
def test_error_primary_column_contains_null(self):
df = pd.DataFrame(
{
constants.nt_variant_col: ["c.100A>G", None],
constants.pro_variant_col: ["p.G4L", "p.G5L"],
}
)
with self.assertRaises(ValueError):
validators.validate_mavedb_compliance(df, df_type=None)
def test_error_primary_column_as_pro_contains_null(self):
df = pd.DataFrame(
{
constants.nt_variant_col: [None, None],
constants.pro_variant_col: ["p.G4L", None],
}
)
with self.assertRaises(ValueError):
validators.validate_mavedb_compliance(df, df_type=None)
def test_pass_coding_(self):
df = pd.DataFrame(
{
constants.nt_variant_col: ["c.100A>G", "c.101A>G"],
constants.pro_variant_col: ["p.G4L", "p.G5L"],
}
)
validators.validate_mavedb_compliance(df, df_type=None)
df = pd.DataFrame(
{
constants.nt_variant_col: ["n.100A>G", "n.101A>G"],
constants.pro_variant_col: [None, None],
}
)
validators.validate_mavedb_compliance(df, df_type=None)
def test_error_missing_nt_pro_columns(self):
df = pd.DataFrame({"A": ["c.100A>G", "c.101A>G"], "B": [None, None]})
with self.assertRaises(ValueError):
validators.validate_mavedb_compliance(df, df_type=None)
def test_error_neither_column_defines_variants(self):
df = pd.DataFrame(
{
constants.nt_variant_col: [None, None],
constants.pro_variant_col: [None, None],
}
)
with self.assertRaises(ValueError):
validators.validate_mavedb_compliance(df, df_type=None)
def test_allows_duplicates_in_pro_col(self):
df = pd.DataFrame(
{
constants.nt_variant_col: [None, None],
constants.pro_variant_col: ["p.G4L", "p.G4L"],
}
)
validators.validate_mavedb_compliance(df, df_type=None) # passes
def test_error_duplicates_in_nt_col(self):
df = pd.DataFrame(
{
constants.nt_variant_col: ["c.100A>G", "c.100A>G"],
constants.pro_variant_col: ["p.G4L", "p.G4L"],
}
)
with self.assertRaises(ValueError):
validators.validate_mavedb_compliance(df, df_type=None)
def test_keyerror_missing_score_column_df_type_is_scores(self):
df = pd.DataFrame(
{
constants.pro_variant_col: [None, "pG4L"],
constants.nt_variant_col: ["c.100A>G", "c.101A>G"],
}
)
with self.assertRaises(KeyError):
validators.validate_mavedb_compliance(df, df_type=constants.score_type)
class TestValidateSameVariants(unittest.TestCase):
def test_ve_counts_defines_different_nt_variants(self):
scores = pd.DataFrame(
{
constants.nt_variant_col: ["c.1A>G"],
constants.pro_variant_col: ["p.Leu5Glu"],
}
)
counts = pd.DataFrame(
{
constants.nt_variant_col: ["c.2A>G"],
constants.pro_variant_col: ["p.Leu5Glu"],
}
)
with self.assertRaises(AssertionError):
validators.validate_datasets_define_same_variants(scores, counts)
scores = pd.DataFrame({constants.nt_variant_col: ["n.1A>G"]})
counts = pd.DataFrame({constants.nt_variant_col: ["n.2A>G"]})
with self.assertRaises(AssertionError):
validators.validate_datasets_define_same_variants(scores, counts)
def test_ve_counts_defines_different_pro_variants(self):
scores = pd.DataFrame(
{
constants.nt_variant_col: ["c.1A>G"],
constants.pro_variant_col: ["p.Leu5Glu"],
}
)
counts = pd.DataFrame(
{
constants.nt_variant_col: ["c.1A>G"],
constants.pro_variant_col: ["p.Leu75Glu"],
}
)
with self.assertRaises(AssertionError):
validators.validate_datasets_define_same_variants(scores, counts)
scores = pd.DataFrame({constants.pro_variant_col: ["p.Leu5Glu"]})
counts = pd.DataFrame({constants.pro_variant_col: ["p.Leu75Glu"]})
with self.assertRaises(AssertionError):
validators.validate_datasets_define_same_variants(scores, counts)
def test_passes_when_same_variants_defined(self):
scores = pd.DataFrame(
{
constants.nt_variant_col: ["c.1A>G"],
constants.pro_variant_col: ["p.Leu5Glu"],
}
)
counts = pd.DataFrame(
{
constants.nt_variant_col: ["c.1A>G"],
constants.pro_variant_col: ["p.Leu5Glu"],
}
)
validators.validate_datasets_define_same_variants(scores, counts)
scores = pd.DataFrame({constants.nt_variant_col: ["n.1A>G"]})
counts = pd.DataFrame({constants.nt_variant_col: ["n.1A>G"]})
validators.validate_datasets_define_same_variants(scores, counts)
scores = pd.DataFrame({constants.pro_variant_col: ["p.Leu5Glu"]})
counts = pd.DataFrame({constants.pro_variant_col: ["p.Leu5Glu"]})
validators.validate_datasets_define_same_variants(scores, counts)
def test_error_dfs_define_different_hgvs_columns(self):
scores = pd.DataFrame({constants.nt_variant_col: ["c.1A>G"]})
counts = | pd.DataFrame({constants.pro_variant_col: ["p.Leu75Glu"]}) | pandas.DataFrame |
import glob
import subprocess
import csv
import pandas as pd
import os
if __name__ == '__main__':
# test1.py executed as script
# do something
paths = []#["../data/burma14.tsp", "../data/berlin52.tsp", "../data/eil51.tsp", "../data/att48.tsp", "../data/st70.tsp", "../data/pr76.tsp"]
for name in glob.glob('../data/heuristics/*'):
paths.append(name)
#methods = ["GREEDY", "GREEDY_ITER", "EXTR_MILE", "GRASP", "GRASP_ITER"]
methods = ["2OPT_GREEDY", "2OPT_GREEDY_ITER", "2OPT_EXTR_MIL","2OPT_GRASP", "2OPT_GRASP_ITER"]
time_limit = "600"
csv_filename="constructive_heuristics_2opt_new.csv"
#if file csv do not exist, create it.
if not os.path.exists('../results/'+csv_filename):
df=pd.DataFrame(index=paths,columns=methods)
df.to_csv('../results/'+csv_filename,index=True)
else:
df= | pd.read_csv('../results/'+csv_filename,index_col=0) | pandas.read_csv |
# Copyright 2021 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pandas as pd
import os
import caliperreader as cr
import hatchet.graphframe
from hatchet.node import Node
from hatchet.graph import Graph
from hatchet.frame import Frame
from hatchet.util.timer import Timer
class CaliperNativeReader:
"""Read in a native `.cali` file using Caliper's python reader."""
def __init__(self, filename_or_caliperreader):
"""Read in a native cali using Caliper's python reader.
Args:
filename_or_caliperreader (str or CaliperReader): name of a `cali` file OR
a CaliperReader object
"""
self.filename_or_caliperreader = filename_or_caliperreader
self.filename_ext = ""
self.df_nodes = {}
self.metric_cols = []
self.record_data_cols = []
self.node_dicts = []
self.callpath_to_node = {}
self.idx_to_node = {}
self.callpath_to_idx = {}
self.global_nid = 0
self.default_metric = None
self.timer = Timer()
if isinstance(self.filename_or_caliperreader, str):
_, self.filename_ext = os.path.splitext(filename_or_caliperreader)
def read_metrics(self, ctx="path"):
all_metrics = []
records = self.filename_or_caliperreader.records
# read metadata from the caliper reader
for record in records:
node_dict = {}
if ctx in record:
# get the node label and callpath for the record
if isinstance(record[ctx], list):
# specify how to parse cupti records
if "cupti.activity.kind" in record:
if record["cupti.activity.kind"] == "kernel":
node_label = record["cupti.kernel.name"]
node_callpath = tuple(record[ctx] + [node_label])
elif record["cupti.activity.kind"] == "memcpy":
node_label = record["cupti.activity.kind"]
node_callpath = tuple(record[ctx] + [node_label])
else:
node_label = record[ctx][-1]
node_callpath = tuple(record[ctx])
else:
node_label = record[ctx][-1]
node_callpath = tuple([record[ctx]])
# get node nid based on callpath
node_dict["nid"] = self.callpath_to_idx.get(node_callpath)
for item in record.keys():
if item != ctx:
if item not in self.record_data_cols:
self.record_data_cols.append(item)
if (
self.filename_or_caliperreader.attribute(
item
).attribute_type()
== "double"
):
node_dict[item] = float(record[item])
elif (
self.filename_or_caliperreader.attribute(
item
).attribute_type()
== "int"
):
node_dict[item] = int(record[item])
elif item == "function":
if isinstance(record[item], list):
node_dict[item] = record[item][-1]
else:
node_dict[item] = record[item]
else:
node_dict[item] = record[item]
all_metrics.append(node_dict)
# make list of metric columns
for col in self.record_data_cols:
if self.filename_or_caliperreader.attribute(col).is_value():
self.metric_cols.append(col)
df_metrics = pd.DataFrame.from_dict(data=all_metrics)
return df_metrics
def create_graph(self, ctx="path"):
def _create_parent(child_node, parent_callpath):
"""We may encounter a parent node in the callpath before we see it
as a child node. In this case, we need to create a hatchet node for
the parent.
This function recursively creates parent nodes in a callpath
until it reaches the already existing parent in that callpath.
"""
parent_node = self.callpath_to_node.get(parent_callpath)
if parent_node:
# return if arrives at the parent
parent_node.add_child(child_node)
child_node.add_parent(parent_node)
return
else:
# else create the parent and add parent/child
grandparent_callpath = parent_callpath[:-1]
parent_name = parent_callpath[-1]
parent_node = Node(
Frame({"type": "function", "name": parent_name}), None
)
self.callpath_to_node[parent_callpath] = parent_node
self.callpath_to_idx[parent_callpath] = self.global_nid
node_dict = dict(
{"name": parent_name, "node": parent_node, "nid": self.global_nid},
)
self.idx_to_node[self.global_nid] = node_dict
self.global_nid += 1
parent_node.add_child(child_node)
child_node.add_parent(parent_node)
_create_parent(parent_node, grandparent_callpath)
list_roots = []
parent_hnode = None
records = self.filename_or_caliperreader.records
for record in records:
node_label = ""
if ctx in record:
# if it's a list, then it's a callpath
if isinstance(record[ctx], list):
# specify how to parse cupti records
if "cupti.activity.kind" in record:
if record["cupti.activity.kind"] == "kernel":
node_label = record["cupti.kernel.name"]
node_callpath = tuple(record[ctx] + [node_label])
parent_callpath = node_callpath[:-1]
node_type = "kernel"
elif record["cupti.activity.kind"] == "memcpy":
node_label = record["cupti.activity.kind"]
node_callpath = tuple(record[ctx] + [node_label])
parent_callpath = node_callpath[:-1]
node_type = "memcpy"
else:
Exception("Haven't seen this activity kind yet")
else:
node_label = record[ctx][-1]
node_callpath = tuple(record[ctx])
parent_callpath = node_callpath[:-1]
node_type = "function"
hnode = self.callpath_to_node.get(node_callpath)
if not hnode:
frame = Frame({"type": node_type, "name": node_label})
hnode = Node(frame, None)
self.callpath_to_node[node_callpath] = hnode
# get parent from node callpath
parent_hnode = self.callpath_to_node.get(parent_callpath)
# create parent if it doesn't exist
# else if parent already exists, add child-parent
if not parent_hnode:
_create_parent(hnode, parent_callpath)
else:
parent_hnode.add_child(hnode)
hnode.add_parent(parent_hnode)
self.callpath_to_idx[node_callpath] = self.global_nid
node_dict = dict(
{"name": node_label, "node": hnode, "nid": self.global_nid},
)
self.idx_to_node[self.global_nid] = node_dict
self.global_nid += 1
# if it's a string, then it's a root
else:
root_label = record[ctx]
root_callpath = tuple([root_label])
if root_callpath not in self.callpath_to_node:
# create the root since it doesn't exist
frame = Frame({"type": "function", "name": root_label})
graph_root = Node(frame, None)
# store callpaths to identify the root
self.callpath_to_node[root_callpath] = graph_root
self.callpath_to_idx[root_callpath] = self.global_nid
list_roots.append(graph_root)
node_dict = dict(
{
"name": root_label,
"node": graph_root,
"nid": self.global_nid,
}
)
self.idx_to_node[self.global_nid] = node_dict
self.global_nid += 1
return list_roots
def read(self):
"""Read the caliper records to extract the calling context tree."""
if isinstance(self.filename_or_caliperreader, str):
if self.filename_ext != ".cali":
raise ValueError("from_caliperreader() needs a .cali file")
else:
cali_file = self.filename_or_caliperreader
self.filename_or_caliperreader = cr.CaliperReader()
self.filename_or_caliperreader.read(cali_file)
with self.timer.phase("graph construction"):
list_roots = self.create_graph()
self.df_nodes = pd.DataFrame(data=list(self.idx_to_node.values()))
# create a graph object once all the nodes have been added
graph = Graph(list_roots)
graph.enumerate_traverse()
with self.timer.phase("read metrics"):
df_fixed_data = self.read_metrics()
metrics = | pd.DataFrame.from_dict(data=df_fixed_data) | pandas.DataFrame.from_dict |
# coding=utf-8
from collections import defaultdict
import numpy as np
import pandas as pd
import param
import util
from gensim.models import Word2Vec
############################ 加载数据 & 模型 ############################
df_all = | pd.read_csv(param.data_path + '/output/corpus/all_data.csv', encoding='utf8', nrows=param.train_num) | pandas.read_csv |
import unittest
import pandas as pd
from featurefilter import TargetCorrelationFilter
def test_low_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 0, 1, 1], 'Y': [0, 1, 0, 1]})
target_correlation_filter = TargetCorrelationFilter(target_column='Y')
train_df = target_correlation_filter.fit(train_df)
assert target_correlation_filter.columns_to_drop == ['A']
def test_high_negative_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 0], 'B': [1, 0], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': [0, 1], 'B': [1, 1], 'Y': [0, 1]})
target_correlation_filter = TargetCorrelationFilter(target_column='Y')
train_df = target_correlation_filter.fit_transform(train_df)
test_df = target_correlation_filter.transform(test_df)
# Make sure column 'B' is dropped for both train and test set
# Also, column 'A' must not be dropped for the test set even though its
# correlation in the test set is above the threshold
assert train_df.equals(pd.DataFrame({'A': [0, 0], 'Y': [0, 1]}))
assert test_df.equals(pd.DataFrame({'A': [0, 1], 'Y': [0, 1]}))
def test_high_positive_continuous_correlation():
train_df = | pd.DataFrame({'A': [0, 0], 'B': [0, 1], 'Y': [0, 1]}) | pandas.DataFrame |
import boto3
import glob
import gzip
import io
import json
import logging
import os
import sys
import argparse
import pandas as pd
import numpy as np
from progress.bar import IncrementalBar
"""
Parses CloudTrail log files, combines them, and writes them to an XLSX file
Sync the relevant files to your local filesystem
Pass in the path to the .json.gz files as a "globular" express in quotes
Provide a results file name
"""
parser = argparse.ArgumentParser(description='Parse CloudTrail JSON files.')
parser.add_argument('-r', '--resultfile', type=str,
help=('Result File. A ".csv" extension will create'
'a CSV file. An ".xlsx" or no extension will'
' generate an .xlsx file.'))
parser.add_argument('jsfile', metavar="path", type=str,
help='JSON file(s) to be analyzed. Expects a glob: "AWSLogs/*/CloudTrail/*/*/*/*/*gz"')
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
if args.verbose is not None:
logging.basicConfig(level=logging.DEBUG)
if args.resultfile is not None:
resFileExt = os.path.splitext(args.resultfile)[1]
if resFileExt != ".xlsx" and resFileExt != ".csv":
resultfile = args.resultfile + ".xlsx"
else:
resultfile = args.resultfile
else:
resultfile = 'results.xlsx'
logging.debug('args.jsfile: ' + args.jsfile)
logging.debug('resultfile: ' + resultfile)
files = glob.glob(args.jsfile)
global myDf
myDf = []
if not files:
print('File does not exist: ' + args.jsfile, file=sys.stderr)
print(files)
exit()
bar = IncrementalBar('Processing CloudTrail Files', max=len(files))
for file in files:
bar.next()
if args.verbose is not None:
print('File exists: ' + file)
extension = os.path.splitext(file)[1]
if extension == ".gz":
with gzip.open(file, 'rt', encoding='utf-8') as f:
myLogsJson = json.load(f)
else:
with open(file, 'rt', encoding='utf8') as jsfile:
myLogsJson = json.loads(jsfile.read())
myDf.append(pd.io.json.json_normalize(myLogsJson['Records']))
bar.finish()
logging.debug("myDf List Size: " + str(len(myDf)))
print("Combining records. This may take a minute. {:.1f} KB".format(sys.getsizeof(myDf)/1024))
unsortedDf = | pd.concat(myDf, sort=False) | pandas.concat |
import pandas as pd
import numpy as np
import requests
from fake_useragent import UserAgent
import io
import os
import time
import json
import demjson
from datetime import datetime
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
url = {
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"philfed": "https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/",
"chicagofed": "https://www.chicagofed.org/~/media/publications/",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
def date_transform(df, format_origin, format_after):
return_list = []
for i in range(0, len(df)):
return_list.append(datetime.strptime(df[i], format_origin).strftime(format_after))
return return_list
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>omestic Product
Description: Billions of Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDP",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "GDP"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["GDP"] = df["GDP"].astype(float)
return df
def gdpc1_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDPC1",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def oecd_gdp_monthly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USALORSGPNOSTSAM",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def payems_monthly(startdate="1939-01-01", enddate="2021-01-01"):
"""
Full Name: All Employees, Total Nonfarm
Description: Thousands of Persons,Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PAYEMS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "Payems"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["Payems"] = df["Payems"].astype(float)
return df
def ppi():
tmp_url = url["fred_econ"] + "bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=PPIACO,PCUOMFGOMFG&scale=left,left&cosd=1913-01-01,1984-12-01&coed=2021-04-01,2021-04-01&line_color=%234572a7,%23aa4643&link_values=false,false&line_style=solid,solid&mark_type=none,none&mw=3,3&lw=2,2&ost=-99999,-99999&oet=99999,99999&mma=0,0&fml=a,a&fq=Monthly,Monthly&fam=avg,avg&fgst=lin,lin&fgsnd=2020-02-01,2020-02-01&line_index=1,2&transformation=lin,lin&vintage_date=2021-06-10,2021-06-10&revision_date=2021-06-10,2021-06-10&nd=1913-01-01,1984-12-01"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df["DATE"] = pd.to_datetime(df["DATE"], format="%Y-%m-%d")
#df = df[list(df.columns[1:])].replace(".", np.nan).astype(float)
name_list = {
"PPIACO": "Producer Price Index by Commodity: All Commodities",
"PCUOMFGOMFG": "Producer Price Index by Industry: Total Manufacturing Industries"
}
df.replace(".", np.nan, inplace = True)
df.columns = ["Date", "PPI_C", "PPI_I"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["PPI_C", "PPI_I"]] = df[["PPI_C", "PPI_I"]].astype(float)
return df
def pmi():
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
PMI_I = pd.DataFrame()
PMI_I["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_I["ISM_PMI_I"] = np.array(temp_df).astype(float)
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
PMI_NI = pd.DataFrame()
PMI_NI["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_NI["ISM_PMI_NI"] = np.array(temp_df).astype(float)
PMI = pd.merge_asof(PMI_I, PMI_NI, on = "Date")
return PMI
def unrate(startdate="1948-01-01", enddate="2021-01-01"):
"""
Full Name: Unemployment Rate: Aged 15-64: All Persons for the United States
Description: Percent, Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "UR_Monthly", "UR_Quarterly", "UR_Annually"]
return df
def erate(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Employment Rate: Aged 25-54: All Persons for the United States
Description: Percent,Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "ER_Monthly", "ER_Quarterly", "ER_Annually"]
def pce_monthly(startdate="1959-01-01", enddate="2021-01-01"):
"""
Full Name: PCE
Description: Percent, Monthly, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PCE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def cpi(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: Total All Items for the United States
Description: Percent, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USM661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USQ661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USA661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]] = df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]].astype(float)
return df
def m1(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM1NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_quarterly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = [
"Date",
"M1_Weekly",
"M1_Monthly",
"M1_Quarterly",
"M1_Annually"]
return df
def m2(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>
Description: Seasonally Adjusted, Weekly, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM2NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "M2SL",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df.columns = ["Date", "M2_Weekly", "M2_Monthly"]
return df
def m3(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "M3_Monthly", "M3_Quarterly", "M3_Annually"]
return df
def ltgby_10(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Long-Term Government Bond Yields: 10-year: Main (Including Benchmark) for the United States
Description: Percent,Not Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USM156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USQ156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USA156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "ltgby_Monthly", "ltgby_Quarterly", "ltgby_Annually"]
return df
def gdp_ipd(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Long-<NAME>: 10-year: Main (Including Benchmark) for the United States
Description: Percent,Not Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGDPDEFQISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGDPDEFAISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "gdp_ipd_Quarterly", "gdp_ipd_Annually"]
return df
def cci(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Opinion Surveys: Confidence Indicators: Composite Indicators: OECD Indicator for the United States
Description: Normalised (Normal=100), Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CSCICP03USM665S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "CCI_Monthly"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
return df
def bci(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Business confidence index OECD Indicator for the United States
Description: Normalised (Normal=100), Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "BSCICP03USM665S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "BCI_Annually"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
return df
def ibr_3(startdate="1965-01-01", enddate="2021-01-01"):
"""
Full Name: 3-Month or 90-day Rates and Yields: Interbank Rates for the United States
Description: Percent, Not Seasonally Adjusted, Monthly and Quarterly
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IR3TIB01USM156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IR3TIB01USQ156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_quarterly,
on="DATE",
direction="backward")
df.columns = ["Date", "ibr3_Monthly", "ibr3_Quarterly"]
def gfcf_3(startdate="1965-01-01", enddate="2021-01-01"):
"""
Full Name: Gross Fixed Capital Formation in United States
Description: United States Dollars,Not Seasonally Adjusted, Quarterly and Annually
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGFCFQDSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAGFCFADSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_quarterly,
on="DATE",
direction="backward")
df.columns = ["Date", "ibr3_Monthly", "ibr3_Annually"]
return df
def pfce(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Private Final Consumption Expenditure in United States
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAPFCEQDSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USAPFCEADSMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "PFCE_Quarterly", "PFCE_Annually"]
return df
def tlp(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Early Estimate of Quarterly ULC Indicators: Total Labor Productivity for the United States
Description: Growth Rate Previous Period,Seasonally Adjusted, Quarterly and YoY
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "ULQELP01USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "ULQELP01USQ659S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_quarterly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "PFCE_Quarterly", "PFCE_Quarterly_YoY"]
return df
def rt(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name:Total Retail Trade in United States
Description: Monthly and Anually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USASARTMISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USASARTAISMEI",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_annually,
on="DATE",
direction="backward")
df.columns = ["Date", "RT_Quarterly", "RT_Annually"]
return df
def bir(startdate="2003-01-01", enddate="2021-01-01"):
"""
Full Name:Total Retail Trade in United States
Description: Monthly and Anually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "T5YIE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_5y = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_5y["DATE"] = pd.to_datetime(df_5y["DATE"], format="%Y-%m-%d")
request_header = {"User-Agent": ua.random}
request_params = {
"id": "T10YIE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_10y = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_10y["DATE"] = pd.to_datetime(df_10y["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_5y, df_10y, on="DATE", direction="backward")
df.columns = ["Date", "BIR_5y", "BIR_10y"]
return df
def adsbci():
"""
An index designed to track real business conditions at high observation frequency
"""
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
tmp_url = url["philfed"] + "ads"
r = requests.get(tmp_url, headers=request_header)
file = open("ads_temp.xls", "wb")
file.write(r.content)
file.close()
df = | pd.read_excel("ads_temp.xls") | pandas.read_excel |
# -*- coding: utf-8 -*-
import pandas as pd
import io
import requests
import json
import webbrowser
from Macroeconomia.Argentina.ProductoInternoBruto import ProductoInternoBruto
class IndicadoresDePrecios:
def __init__(self):
"""
Inicializa
"""
self.__PIB = ProductoInternoBruto()
def getPIB(self):
return self.__PIB
def getDeflactorBase2004(self, periodo = "Anual"):
"""
Se puede utilizar como un indicador de precio, tiene mayor cobertura
que el IPC pero no incluye bienes intermedios.
Parameters
----------
periodo : str, optional (puede ser "Anual" o "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame()
"""
return self.__PIB.getIndicePreciosImplicitosBase2004(periodo)
def getIndicePreciosAlConsumidorCordobaBaseJulio2012(self):
"""
Se elabora en la mayoria de los paises mensualmente, mide las
variaciones de los precios de un conjunto de bienes y servicios para
un tiempo determinado con una base determinada (en este caso el año 2016)
El IPC Cordoba solo tiene en cuenta la provincia de cordoba.
Esta solo disponible como serie mensual.
Returns
-------
pd.DataFrame()
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-precios-al-consumidor-provincia-cordoba-base-2014-100"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 0
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = | pd.read_csv(flujoCVS) | pandas.read_csv |
# %%
'''
'''
## Se importan las librerias necesarias
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import glob as glob
import datetime
import re
import jenkspy
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
# %%
def profiling():
#### Read Databases
datas=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/data_con_drop.csv',sep=';',encoding='utf-8',dtype='str')
salida=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/salida_limpia.csv',sep=';',encoding='utf-8',dtype='str')
seguimiento=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/seguimiento.csv',sep=';',encoding='utf-8',dtype='str')
virtuales=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/virtuales.csv',encoding='utf-8',sep=';')
df=datas.copy()
out=salida.copy()
seg=seguimiento.copy()
vir=virtuales.copy()
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],inplace=True)
out=out[out['Repetido CC']=='0']
out=out[~out.duplicated(keep='last')]
## Cleaning
df['Marca Score']=df['Marca Score'].str.strip().fillna('NO REGISTRA')
df['Marca Score'][df['Marca Score']==''] ='NO REGISTRA'
df['Analisis De Habito']=df['Analisis De Habito'].fillna('NO DEFINE')
df['Analisis De Habito'][df['Analisis De Habito']==' '] ='NO DEFINE'
df['Tipo de Cliente'][df['Tipo de Cliente']==' '] ='NO DEFINE'
df['Marca Funcional']=df['Marca Funcional'].str.replace(' ','0')
df['Marca']=df['Marca'].str.replace(' ','0')
df['Antiguedad Cliente'][df['Antiguedad Cliente']==' '] ='NO REGISTRA'
df['Perfil Digital']=df['Perfil Digital'].fillna('Sin perfil')
df['Nivel de riesgo experian']=df['Nivel de riesgo experian'].str.replace(' ','NO REGISTRA')
df['Nivel de Riesgo']=df['Nivel de Riesgo'].str.replace(' ','NO REGISTRA')
df['Nivel Estrategia Cobro']=df['Nivel Estrategia Cobro'].str.replace(' ','NO REGISTRA')
df['Real reportado en central de riesgos']=df['Real reportado en central de riesgos'].str.replace(' ','0')
df['Nivel de Riesgo'][df['Nivel de Riesgo']==' '] ='NO REGISTRA'
df['Estado del Cliente'][df['Estado del Cliente']==' '] ='SIN IDENTIFICAR'
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] ='SIN IDENTIFICAR'
df['Estrategia'][df['Estrategia']==' '] ='SIN ESTRATEGIA'
df['Autopago'][df['Autopago']==' '] ='NO APLICA'
df['Tipo de Cliente']=df['Tipo de Cliente'].fillna('NO DEFINE')
df['Tipo de Reporte a Central de Riesgos'][df['Tipo de Reporte a Central de Riesgos']==' '] ='NO REGISTRA'
df['Codigo edad de mora(para central de riesgos)']=df['Codigo edad de mora(para central de riesgos)'].str.replace(' ','NO REGISTRA')
df['Análisis Vector'][df['Análisis Vector']==' '] ='SIN IDENTIFICAR'
df['Análisis Vector_PAGOS_PARCIAL'] = np.where(df['Análisis Vector'].str.contains("PAGO PARCIAL|PAGOS PARCIAL"),"1",'0')
df['Análisis Vector_PAGO OPORTUNO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_SIN_IDENTIFICAR'] = np.where(df['Análisis Vector'].str.contains("SIN IDENTIFICAR"),"1",'0')
df['Análisis Vector_SIN_PAGO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_suspension'] = np.where(df['Análisis Vector'].str.contains("SUSPENSIO"),"1",'0')
df['Análisis Vector_indeterminado'] = np.where(df['Análisis Vector'].str.contains("PAGO OPORTUNO Y NO OPORTUNO"),"1",'0')
df['Análisis Vector_pago_no_oport'] = np.where(df['Análisis Vector'].str.contains("PAGO NO OPORTUNO"),"1",'0')
df['Análisis Vector_otro_caso'] = np.where(df['Análisis Vector'].str.contains("NUEVO|FACTURAS AJUSTADAS|PROBLEMAS RECLAMACION"),"1",'0')
df['Vector Cualitativo # Suscripción'][df['Vector Cualitativo # Suscripción']==' '] = df["Vector Cualitativo # Suscripción"].mode()[0]
df['Fecha Ult Gestion']=pd.to_datetime(df['Fecha Ult Gestion'],format='%Y-%m-%d')
###PARSE DATES AND CREATE NEW FEATURES
df['Fecha de Asignacion']=pd.to_datetime(df['Fecha de Asignacion'],format='%Y-%m-%d %H:%M:%S')
df['Fecha Ult pago']=pd.to_datetime(df['Fecha Ult pago'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(df['Fecha de cuenta de cobro mas antigua'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["Dias_ult_pago"] = (df['Fecha Ult pago']).dt.day
df["dia_semana_ult_pago"] = (df['Fecha Ult pago']).dt.weekday
df["mes_ult_pago"]=df["Fecha Ult pago"].dt.month
df["semana_ult_pago"]=df["Fecha Ult pago"].dt.week
df["trimestre_ult_pago"] = df["Fecha Ult pago"].dt.quarter
df["año_ult_pago"] = df["Fecha Ult pago"].dt.year
df["DIAS_desde_ult_pago"] = (df["Fecha Ult Gestion"] - df["Fecha Ult pago"]).dt.days
df["Fecha estado corte"]=pd.to_datetime(df["Fecha estado corte"],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["dias_ult_pago_cobro"] = (df["Fecha Ult pago"]-df["Fecha estado corte"]).dt.days
df["dias_ult_pago_fac_ant"] = (df["Fecha Ult pago"]-df["Fecha de cuenta de cobro mas antigua"]).dt.days
df['Fecha de Asignacion_mes']=df["Fecha de Asignacion"].dt.month
df['Fecha de Instalacion']=pd.to_datetime(df['Fecha de Instalacion'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['antiguedad_mes']=(dt.datetime.now()-df['Fecha de Instalacion']).dt.days/365
df['Fecha Retiro']=pd.to_datetime(df['Fecha Retiro'].str.replace('4732','2020'),format='%Y-%m-%d',errors = "coerce")
df['Fecha Vencimiento Sin Recargo']=pd.to_datetime(df['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
df['dias_desde_ult_gestion']=(dt.datetime.now()-df['Fecha Ult Gestion']).dt.days
## Group labels
df['Descripcion subcategoria']=df['Descripcion subcategoria']\
.str.replace('Consumos EPM Telco|INALAMBRICOS NO JAC|unica|COMERCIAL|ENTERPRISE|MONOPRODUCTO|PYME|------------------------------|LINEA BUZON','NO REGISTRA')\
.str.replace('ESTRATO MEDIO ALTO|MEDIO ALTO','ESTRATO 4')\
.str.replace('ESTRATO ALTO|ALTO','ESTRATO 6')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('ESTRATO MEDIO|MEDIO','ESTRATO 3')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('BAJO BAJO|ESTRATO BAJO-BAJO|ESTRATO BAJO|BAJO','ESTRATO 1')
df['Descripcion subcategoria'][df['Descripcion subcategoria']=='-'] ='NO REGISTRA' ## No registra
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] = df["Tipificación Cliente"].mode()[0] ## Reemplazo con la moda
df['Dias Suspension'][df['Dias Suspension']==' ']=0
df['Dias Suspension']=df['Dias Suspension'].astype('int')
## Group labels
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip().str.upper()\
.str.replace('TELEVISION UNE|TELEVISION INTERACTIVA|TV CABLE|TV INTERACTIVA|UNE TV|TELEVISION SIN SEÃƑ‘AL|TELEVISION SIN SEÃƑ‘AL|TV CABLE SIN SEÑAL','TELEVISION')\
.str.replace('INTERNET BANDA ANCHA|SEGUNDA CONEXION INTERNET|BANDA ANCHA|INTERNET EDATEL|INTERNET INSTANTANEO|CABLE MODEM|INTERNET DEDICADO 11|ADSL BASICO','INTERNET')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA|TIGO|ETB','UNEMOVIL')\
.str.replace('TOIP|TELEFONICA TELECOM|TELECOM|TO_SINVOZ','TELEFONIA')\
.str.replace('LÃƑÂNEA BÃƑ¡SICA','LINEA BASICA')
df['Descripcion categoria']=df['Descripcion categoria'].str.replace("[^a-zA-Z ]+", "NO REGISTRA")
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip()\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV|TELEVISIONSIN SEÑAL','TELEVISION')\
.str.replace('Internet Banda Ancha|Internet EDATEL|CABLE MODEM','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA','UNEMOVIL')\
.str.replace('TOIP','TELEFONIA')
df['Descripcion producto']=df['Descripcion producto'].str.strip().str.replace('-','')\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV','TELEVISION')\
.str.replace('Internet Banda Ancha','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')
conteo3=df['Descripcion producto'].value_counts().iloc[:7].index.tolist()
df['Descripcion producto_resumen']=df.apply(
lambda row: row['Descripcion producto'] if (row['Descripcion producto'] in conteo3)
else 'OTRO PRODUCTO',axis=1)
df['Descripcion producto_resumen']=df['Descripcion producto_resumen'].str.strip()
df['Tipo Contactabilidad'][df['Tipo Contactabilidad']==' '] ='NO REGISTRA'
df['Indicador BI'][df['Indicador BI']==' '] ='NO REGISTRA'
## Create variable
df['antiguedad_mes']=df['antiguedad_mes'].astype(int)
col = 'antiguedad_mes'
condi = [ df[col] < 12, df[col].between(12, 24, inclusive = True),df[col]>24 ]
seg_ = [ "SEGMENTO YOUNG", 'SEGMENTO MASTER','SEGMENTO LEGEND']
df["Hogar"] = np.select(condi, seg_, default=np.nan)
df['Calificación A Nivel De Suscripción'][df['Calificación A Nivel De Suscripción']==' ']=df['Calificación A Nivel De Suscripción'].mode()[0]
df['Calificación A Nivel De Suscripción']=df['Calificación A Nivel De Suscripción'].astype('int')
df['Califica_suscr_class']=pd.cut(df['Calificación A Nivel De Suscripción'],bins=5,labels=["A","B","C","D","E"]).astype(str)
df['Tipo De Documento'][df['Tipo De Documento']=='13'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].fillna('NO REGISTRA')
df['Tipo De Documento'][df['Tipo De Documento']=='1'] ='CC'
df['Tipo De Documento'][df['Tipo De Documento']==' '] ='NO REGISTRA'
df['Tipo De Documento'][df['Tipo De Documento']=='C'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].str.replace('3 Cedula Extranjeria|3|1CE','CE')\
.str.replace('1 Cedula','CC')\
.str.replace('2 Nit|2',' Nit')\
.str.replace('4 Tarjeta de Identidad|4',' TI')
#### Create, clean & group variables
df['Banco 1'][df['Banco 1']==' '] ='NO REGISTRA'
df['Banco 2'][df['Banco 2']==' '] ='NO REGISTRA'
df['Banco 1'].fillna('NO REGISTRA',inplace=True)
df['Banco 2'].fillna('NO REGISTRA',inplace=True)
df['Banco 1']=df['Banco 1'].str.upper().str.strip()
df['Banco 2']=df['Banco 2'].str.upper().str.strip()
df['Banco 1']=df['Banco 1'].str.replace('BANCO COLPATRIA','COLPATRIA')\
.str.replace('COLPATRIA ENLINEA','COLPATRIA EN LINEA')\
.str.replace('GANA GANA','GANA')\
.str.replace('GANA GANA','GANA')
df["Banco 1_virtual"] =\
np.where(df["Banco 1"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
df["Banco 2_Virtual"] =\
np.where(df["Banco 2"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
conteo_banco=df['Banco 1'].value_counts().iloc[:10].index.tolist()
df['Banco 1_Cl']=df.apply(
lambda row: row['Banco 1'] if (row['Banco 1'] in conteo_banco)
else 'OTRO BANCO',axis=1)
conteo_banco2=df['Banco 2'].value_counts().iloc[:10].index.tolist()
df['Banco 2_Cl']=df.apply(
lambda row: row['Banco 2'] if (row['Banco 2'] in conteo_banco2)
else 'OTRO BANCO',axis=1)
df['Causal'][df['Causal']==' '] ='NO REGISTRA'
df['Causal_Cl']=df['Causal']\
.str.replace('FACTURA MAYOR A LA CAPACIDAD DE PAGO|CLIENTE SE ACOGE PRODUCTO MINIMO VITAL|PRIORIDAD INGRESOS A LA CANASTA BASICA|INDISPONIBILIDAD DE MEDIOS DE PAGO POR EMERGENCIA SANITARIA|NO TIENE DINERO|INCONVENIENTES ECONOMICOS|INCONVENIENTES ECONOMICOS|CONTINGENCIA COVID-19|DESEMPLEADO|INDEPENDIENTE SIN INGRESOS DURANTE CUARENTENA|DISMINUCIÓN INGRESOS / INCONVENIENTES CON NÓMINA',
'DISMINUCIÓN DE INGRESOS')\
.str.replace('OLVIDO DE PAGO|FUERA DE LA CIUDAD|DEUDOR SE OLVIDO DEL PAGO|OLVIDO DEL PAGO / ESTA DE VIAJE',
'OLVIDO')\
.str.replace('PAGA CADA DOS MESES|PAGO BIMESTRAL','PAGO BIMESTRAL')\
.str.replace('INCONFORMIDAD EN EL VALOR FACTURADO|INCONFORMIDAD POR CAMBIO DE DOMICILIO|INCOMFORMIDAD POR CAMBIO DE DOMICILIO|PQR PENDIENTE|TIENE RECLAMO PENDIENTE','INCONFORMIDAD')\
.str.replace('OTRA PERSONA ES LA ENCARGADA DEL PAGO','OTRA PERSONA ES LA ENCARGADA DEL PAGO').str.strip()\
.str.replace('PROBLEMAS FACTURACIÓN|INCONSISTENCIAS EN CARGOS FACTURADOS|RECLAMACIÓN EN TRÃMITE|NO LE LLEGA LA FACTURA / LLEGO DESPUES DE LA FECHA DE VENCIMIENTO|LLEGO LA FACTURA DESPUES DE LA FECHA DE VENCIMIENTO|NO LLEGO FACTURA',
'FACTURA')\
.str.replace('SE NIEGA A RECIBIR INFORMACION',
'RENUENTE')\
.str.replace('INCONVENIENTES CON CANALES DE PAGO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON EL CANAL DE RECAUDO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON LA ENTIDAD BANCARIA',
'INCONVENIENTES CON PAGO')\
.str.replace('REALIZARA RETIRO DEL SERVICIO|REALIZARA RETIRO / CANCELACION SERVICIO',
'REALIZARA RETIRO')
conteo_Causa=df['Causal_Cl'].value_counts().iloc[:12].index.tolist()
df['Causal_Cl']=df.apply(
lambda row: row['Causal_Cl'] if (row['Causal_Cl'] in conteo_Causa)
else 'OTRA CAUSA',axis=1)
conteo_Corte=df['Descripcion estado de corte'].value_counts().iloc[:12].index.tolist()
df['Descripcion estado de corte_Cl']=df.apply(
lambda row: row['Descripcion estado de corte'] if (row['Descripcion estado de corte'] in conteo_Corte)
else 'OTRA MOTIVO',axis=1)
df['Descripcion estado de corte_conexión'] = np.where(df['Descripcion estado de corte'].str.contains("CONEXION"),"1",'0')
df['Descripcion estado de corte_suspención'] = np.where(df['Descripcion estado de corte'].str.contains("SUSPENSION"),"1",'0')
df['Descripcion estado de corte_retiro'] = np.where(df['Descripcion estado de corte'].str.contains("RETIRO"),"1",'0')
df['Valor Total Cobrar']=df['Valor Total Cobrar'].astype('float64')
df['Valor Vencido']=df['Valor Vencido'].astype('float64')
df['Valor Factura']=df['Valor Factura'].astype('float64')
df['Valor Intereses de Mora']=df['Valor Intereses de Mora'].astype('float64')
df['Valor financiado']=df['Valor financiado'].astype('float64')
## DROPING VARIABLES
df.drop(['Causal','Codigo edad de mora(para central de riesgos)','Codigo edad de mora(para central de riesgos)',
'Estado Adminfo','Celular con mejor Contactabilidad','Archivo Convergente','Usuario','Vector de Pago'],axis=1,inplace=True)
anis=['Teléfono última gestión','Email','Telefono con mejor Contactabilidad','Email',
'Ultimo Celular Grabado','Ultimo Telefono Grabado','Ultimo Email Grabado','Celular con mejor Contactabilidad']
df.dropna(subset = ["Direccion de instalacion"], inplace=True)
df['llave']=df['Identificacion']+"_"+df['Direccion de instalacion']
df=df.sort_values('Fecha de Asignacion',ascending=True)
## Elimino los duplicados presnetados en la combinación de dichas variables
df=df[~df[['llave','# servicio suscrito/abonado','Fecha de Asignacion','Valor Total Cobrar','Valor Vencido','Descripcion localidad']].duplicated()]
df.sort_values(by=['Identificacion','# servicio suscrito/abonado','Fecha de Asignacion'],ascending=[True,True,True]).drop_duplicates('# servicio suscrito/abonado',keep='last',inplace=True)
### Cuidado con esos pendientes por gestionar
## Cantidad de servicios
cant_serv=df.groupby(['Identificacion']).agg({'Descripcion producto':'nunique','Direccion de instalacion':'nunique'})\
.reset_index().sort_values('Descripcion producto',ascending=False)\
.rename(columns={'Descripcion producto':'cantidad_ser_dir','Direccion de instalacion':'serv_dir'})
df=pd.merge(df,cant_serv,on='Identificacion')
df=df[~df.duplicated()]
# Creo dicha variabel para evitar que hayan duplicados el mismo día
df['llave_2']=df['Identificacion']+"_"+(df['Fecha de Asignacion'].astype('str'))
#
conteo=df.groupby(['Identificacion','Fecha de Asignacion','Fecha de Asignacion_mes']).agg({'Identificacion':'nunique'}).rename(columns={'Identificacion':'cantidad_mes'}).reset_index()
conteo.sort_values('Fecha de Asignacion',ascending=True,inplace=True)
conteo=conteo[~conteo['Identificacion'].duplicated(keep='last')]
conteo['llave_2']=conteo['Identificacion']+"_"+(conteo['Fecha de Asignacion'].astype('str'))
#Se crea con el fin de identificar y quedarme con las claves de cada uno
consolidar=pd.merge(df,conteo['llave_2'],on='llave_2')
#Creo variables dummies para identificar en una misma cantidad de servicios
cer1=pd.concat([pd.get_dummies(consolidar['Descripcion producto_resumen']),consolidar],axis=1) # concateno
cer1['llave_2']=cer1['Identificacion']+"_"+(cer1['Fecha de Asignacion'].astype('str'))
cer=cer1.groupby(['Identificacion']).agg({
'Descripcion producto_resumen':np.array,'Descripcion producto_resumen':'sum',
'TELEFONIA':'sum','INTERNET':'sum','TELEVISION':'sum','UNEMOVIL':'sum',
'LARGA DISTANCIA UNE':'sum','PAQUETE':'sum','OTRO PRODUCTO':'sum','LINEA BASICA':'sum',
"Valor Vencido":"sum","Valor Total Cobrar":"sum",
"Valor financiado":"sum",
"Valor Intereses de Mora":"sum"}).reset_index().\
rename(columns={'Valor Vencido':'valor vencido_sum',
'Valor Factura':'Valor Factura_sum',
'Valor financiado':'Valor financiado_sum',
'Valor Total Cobrar':'Valor Total Cobrar_sum',
'Descripcion producto_resumen':'Total servicio',
'Valor Intereses de Mora':'Valor Intereses de Mora_sum'})
cer.drop(['Total servicio'],axis=1,inplace=True)
data=pd.merge(consolidar,cer,on='Identificacion')
data=data.sort_values(['Fecha de Asignacion','Identificacion'],ascending=[True,True]).drop_duplicates('Identificacion',keep='last')
### Base de datos de la salida
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],ascending=[True,True]).drop_duplicates(keep='last',inplace=True)
out.drop(['Unnamed: 19'],axis=1,inplace=True)
## Cruce de bases de datos de salida
full=pd.merge(data,out[['Identificacion Del Cliente','Efectivo Pago','Fecha_Pago']],
left_on='Identificacion',right_on='Identificacion Del Cliente')
full=full[~full.duplicated()]
full=full.sort_values(['Identificacion','Efectivo Pago'],ascending=[True,True]).drop_duplicates(['Identificacion'],keep='first')
full['llave_exp']=full['Identificacion']+full['# servicio suscrito/abonado']
full['valor vencido_sum'][full['valor vencido_sum'] < 0] = 0
full['ratio_vlr_vencido_cobro']=full['valor vencido_sum']/full['Valor Total Cobrar_sum']
full.drop(['llave_2','Direccion de instalacion','Banco 1','Banco 2'],axis=1,inplace=True)
### Exporto y envio a la carpeta para trabajarlo
seg['FECHA DE GESTION']=pd.to_datetime(seg['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
seg=seg.sort_values(['IDENTIFICACIóN','FECHA DE GESTION']).drop_duplicates('IDENTIFICACIóN',keep='last')
vir['Identificación']=vir['Identificación'].astype('str')
fulll=pd.merge(full,seg[['IDENTIFICACIóN','FECHA DE GESTION','CLASE DE GESTION',
'LINEA/AGENCIA/ABOGADO','CAUSAL','CICLO','OTRA GESTION',
'SE DEJO MENSAJE EN BUZON', 'DEUDOR REALIZA PROMESA DE PAGO TOTAL',
'NO CONTESTAN / OCUPADO', 'DEUDOR REALIZA PROMESA DE PAGO PARCIAL',
'NO HUBO ACUERDO', 'SE ENVIA CUPON DE PAGO','SE DEJO MENSAJE CON TERCERO',
'OTRA GESTION_sum', 'Total_segui','Cantidad_de_cobros_diff_mes', 'Cantidad_recontactos_mes',
'class_Cantidad_de_cobros_diff_mes','class_Cantidad_recontactos_mes']],
left_on='Identificacion',right_on='IDENTIFICACIóN',how='left').\
merge(vir,left_on='Identificacion',right_on='Identificación',how='left')
#libero memoria
del cer
del cer1
fulll["Efectivo Pago"] = (fulll["Efectivo Pago"]=="Efectivo").astype(int)
fulll.drop(['Valor financiado_sum','Fecha_Pago','Valor Intereses de Mora_sum','Valor Total Cobrar','Valor Total Cobrar_sum','Valor Intereses de Mora','Agencia B2B Convergente','Codigo Fraude','CAUSAL','LINEA/AGENCIA/ABOGADO',
'Celular','Valor financiado','# servicio suscrito/abonado','Fecha Ult pago','Fecha estado corte','Codigo Departamento','Centrales de riesgos','dias_desde_ult_gestion',
'Valor Honorarios','Dias_ult_pago','dia_semana_ult_pago','mes_ult_pago','semana_ult_pago','Marca','Marca Funcional','Reportado a central de riesgos','Marca Score','Autopago',
'trimestre_ult_pago','año_ult_pago','DIAS_desde_ult_pago','dias_ult_pago_cobro','Primera Mora','CICLO','Codigo Categoria','Subsegmento',
'dias_ult_pago_fac_ant','Fecha de cuenta de cobro mas antigua','Fecha estado corte','Fecha estado corte','Descripcion Gestion Resultado'],axis=1,inplace=True)
dd=fulll.copy()
dd['class_Cantidad_recontactos_mes']=dd['class_Cantidad_recontactos_mes'].fillna('0')
dd['class_Cantidad_de_cobros_diff_mes'].fillna('0',inplace=True)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Fecha de Asignacion']=pd.to_datetime(dd['Fecha de Asignacion'],format='%Y-%m-%d')
dd['Fecha Ult Gestion']=pd.to_datetime(dd['Fecha Ult Gestion'],format='%Y-%m-%d')
dd['Fecha Actualizacion']=pd.to_datetime(dd['Fecha Actualizacion'],format='%Y-%m-%d')
dd['Fecha Vencimiento Sin Recargo']=pd.to_datetime(dd['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
# dd['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(dd['Fecha de cuenta de cobro mas antigua'],format='%Y-%m-%d')
dd['FECHA DE GESTION']=pd.to_datetime(dd['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
dd['Fecha Debido Cobrar']=pd.to_datetime(dd['Fecha Debido Cobrar'],format='%Y-%m-%d %H:%M:%S', errors='coerce')
dd['Score Contactabilidad'][dd['Score Contactabilidad']==' '] =np.nan
dd['Score Contactabilidad']=dd['Score Contactabilidad'].fillna(dd['Score Contactabilidad'].median())
dd['Score Contactabilidad']=dd['Score Contactabilidad'].astype('float')
dd['Tiene Compromiso'] = (dd['Tiene Compromiso']=="S").astype(int)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] =0
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Financiado'] = (dd["Financiado"]=="SI").astype(int)
dd['Obligaciones con celular']= (dd['Obligaciones con celular']=="S").astype(int)
dd['Inscrito Factura Web']= (dd['Inscrito Factura Web']=="S").astype(int)
dd['Real reportado en central de riesgos']= (dd['Real reportado en central de riesgos']=="S").astype(int)
dd['Tipo Habito de Pago'][dd['Tipo Habito de Pago']==' '] ='NO REGISTRA'
dd['Calificación Identificación'][dd['Calificación Identificación']==' '] =dd["Calificación Identificación"].mode()[0]
dd["Calificación Identificación"]=dd["Calificación Identificación"].astype(float)
dd['CLASE DE GESTION'][dd['CLASE DE GESTION']==' ']='NO REGISTRA'
### Clasificaciones
dd['Class_Total valor pendiente suscripcion']=pd.qcut(dd['Total valor pendiente suscripcion'].astype(float), 5,
labels=["A", "B", "C","D","E"]).astype('str')
dd['Total valor pendiente suscripcion']=dd['Total valor pendiente suscripcion'].astype(float)
dd['Valor Pendiente']=dd['Valor Pendiente'].astype(float)
dd['# de Dias De Mora']=dd['# de Dias De Mora'].astype(float)
dd['Dias sin Gestion']=dd['Dias sin Gestion'].astype(float)
dd['antiguedad_mes']=dd['antiguedad_mes'].astype(float)
dd['Minimo Cuentas con Saldo Suscripción']=dd['Minimo Cuentas con Saldo Suscripción'].astype(float)
dd['Maximo Cuentas con Saldo Suscripción']=dd['Maximo Cuentas con Saldo Suscripción'].astype(float)
dd['Total_segui']=dd['Total_segui'].astype(float)
### OULIERS
qtil9_vlrvencido=dd['valor vencido_sum'].quantile(0.95)
qtil9_vlfac=dd['Valor Factura'].quantile(0.90)
qtil9_total=dd['Total valor pendiente suscripcion'].quantile(0.90)
qtil9_total_ven=dd['Valor Vencido'].quantile(0.90)
qtil_75_dia=dd['# de Dias De Mora'].quantile(0.75)
qtil_75_dia_ges=dd['Dias sin Gestion'].quantile(0.80)
qtil_mes=dd['antiguedad_mes'].quantile(0.95)
qtil_min_cuentas=dd['Minimo Cuentas con Saldo Suscripción'].quantile(0.99)
qtil_max_cuentas=dd['Maximo Cuentas con Saldo Suscripción'].quantile(0.99)
qtil_sus=dd['Dias Suspension'].quantile(0.85)
qtil_segui=dd['Total_segui'].quantile(0.95)
dd['valor vencido_sum']= np.where(dd["valor vencido_sum"] > qtil9_vlrvencido, qtil9_vlrvencido ,dd["valor vencido_sum"])
dd['Valor Factura'] = np.where(dd['Valor Factura'] > qtil9_vlfac, qtil9_vlfac,dd["Valor Factura"])
dd['Valor Factura'] = np.where(dd['Valor Factura'] < 0, dd["Valor Factura"].quantile(0.5),dd["Valor Factura"])
dd['Total valor pendiente suscripcion']=np.where(dd['Total valor pendiente suscripcion'] > qtil9_total, qtil9_total,dd["Total valor pendiente suscripcion"])
dd['Valor Vencido']=np.where(dd['Valor Vencido'] > qtil9_total_ven, qtil9_total_ven,dd["Valor Vencido"])
dd['Valor Vencido']=np.where(dd['Valor Vencido'] < dd['Valor Vencido'].quantile(0.1), dd['Valor Vencido'].quantile(0.3),dd["Valor Vencido"])
dd['# de Dias De Mora']=np.where(dd['# de Dias De Mora'] > qtil_75_dia, qtil_75_dia,dd['# de Dias De Mora'])
dd['Dias sin Gestion']=np.where(dd['Dias sin Gestion'] > qtil_75_dia_ges, qtil_75_dia_ges,dd['Dias sin Gestion'])
dd['ratio_vlr_vencido_cobro'].fillna(dd['ratio_vlr_vencido_cobro'].median(),inplace=True)
dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].fillna(dd['Calificación Servicio Suscrito'].median())
dd['antiguedad_mes']=np.where(dd['antiguedad_mes'] > qtil_mes, qtil_mes,dd['antiguedad_mes'])
dd['Minimo Cuentas con Saldo Suscripción']=np.where(dd['Minimo Cuentas con Saldo Suscripción'] > qtil_min_cuentas, qtil_min_cuentas,dd['Minimo Cuentas con Saldo Suscripción'])
dd['Maximo Cuentas con Saldo Suscripción']=np.where(dd['Maximo Cuentas con Saldo Suscripción'] > qtil_max_cuentas, qtil_max_cuentas,dd['Maximo Cuentas con Saldo Suscripción'])
dd['Dias Suspension']=np.where(dd['Dias Suspension'] > qtil_sus, qtil_sus,dd['Dias Suspension'])
### Drop
dd.drop(['Descripcion Mejor Codigo Gestion Mes','Codigo de Gestion Resultado Visita','Análisis Vector',
'Fecha de Instalacion','DÃa Pago 3','Descripcion localidad',
'Fecha Ingreso Fraude','Maxima fecha Ult Gestion','Usuario Grabador',
'DÃa Pago 1','DÃa Pago 2','Ultimo Codigo de Gestion Agrupado','# de Suscripción',
'fecha de importacion',
'Fecha de Asignacion_mes','Descripcion producto','Fecha Financiacion','Codigo estado de corte','Descripcion estado de corte'],axis=1,inplace=True)
dd.ratio_vlr_vencido_cobro.fillna(dd.ratio_vlr_vencido_cobro.median(),inplace=True)
dd['retiro']=np.where(dd['Fecha Retiro'].isna(),0,1)
dd.drop(['Nivel de riesgo experian','Fecha Retiro','Nivel de Riesgo','Indicador BI','Tipo Contactabilidad',
'Gestion comercial','Estrategia','Usuario Fraudulento','Tipo de Reporte a Central de Riesgos','Banco 2_Cl'],axis=1,inplace=True)
dd.ratio_vlr_vencido_cobro.fillna(dd.ratio_vlr_vencido_cobro.median(),inplace=True)
dd['Efectivo Pago']=dd['Efectivo Pago'].astype(str)
dd['Class_Total valor pendiente suscripcion']=dd['Class_Total valor pendiente suscripcion'].astype('str')
dd['Califica_suscr_class']=dd['Califica_suscr_class'].astype('str')
dd['# de Dias De Mora'].fillna(0,inplace=True)
breaks3 = jenkspy.jenks_breaks(dd['# de Dias De Mora'], nb_class=8)
dd['class_# de Dias De Mora'] = pd.cut(dd['# de Dias De Mora'] , bins=breaks3, include_lowest=True).astype(str)
breaks2 = jenkspy.jenks_breaks(dd['ratio_vlr_vencido_cobro'], nb_class=5)
dd['class_ratio_vlr_vencido_cobro_class'] = pd.cut(dd['ratio_vlr_vencido_cobro'] , bins=breaks2, include_lowest=True).astype(str)
dd['Total'].fillna(0,inplace=True)
dd['Total_clasificacion_cant_virtuales'] = pd.cut(x=dd['Total'],
bins=[-1,0,1,2,3,6,10,17,30,1000],
labels=["0","1","2","3","4-6","7-10", "11-17","18-30", ">30"]).astype(str).fillna('0')
### Divido
sin_seg=dd[dd['IDENTIFICACIóN'].isna()]
sin_seg.drop(sin_seg[sin_seg.columns[79:139]].columns,axis=1,inplace=True)
# con seguimiento
dd=dd[~dd['IDENTIFICACIóN'].isna()]
grupo=dd.groupby(['Efectivo Pago','Descripcion departamento', 'sistema origen',
'Vector Cualitativo # Suscripción', 'Tipificación Cliente',
'Perfil Digital', 'Descripcion subcategoria', 'Descripcion categoria', 'Estado del Cliente',
'Tipo Habito de Pago', 'Tipo Producto Servicio Suscrito', 'Analisis De Habito','Hogar',
'Califica_suscr_class', 'Banco 1_Cl','Descripcion estado de corte_Cl','class_Cantidad_de_cobros_diff_mes',
'class_Cantidad_recontactos_mes', 'Class_IVR',
'Class_sms','Class_Total valor pendiente suscripcion','Total_clasificacion_cant_virtuales',
'class_ratio_vlr_vencido_cobro_class','class_# de Dias De Mora']).size().reset_index(name='frecuency')
# dic_reg=pd.crosstab(grupo['Descripcion Regional'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_des_dep=pd.crosstab(grupo['Descripcion departamento'],grupo['Efectivo Pago']).apply(lambda r: r/r.sum(), axis=1)['1'].to_dict()
dic_vec_cua= | pd.crosstab(grupo['Vector Cualitativo # Suscripción'],grupo['Efectivo Pago']) | pandas.crosstab |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import os
import argparse
import types
import pandas as pd
import numpy as np
from pdsql import mssql
from datetime import datetime
import yaml
import itertools
import lowflows as lf
import util
pd.options.display.max_columns = 10
run_time_start = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
try:
#####################################
### Read parameters file
base_dir = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'parameters-test.yml')) as param:
param = yaml.safe_load(param)
# parser = argparse.ArgumentParser()
# parser.add_argument('yaml_path')
# args = parser.parse_args()
#
# with open(args.yaml_path) as param:
# param = yaml.safe_load(param)
## Integrety checks
use_types_check = np.in1d(list(param['misc']['use_types_codes'].keys()), param['misc']['use_types_priorities']).all()
if not use_types_check:
raise ValueError('use_type_priorities parameter does not encompass all of the use type categories. Please fix the parameters file.')
#####################################
### Read the hydro log
# max_date_stmt = "select max(RunTimeStart) from " + param.log_table + " where HydroTable='" + param.process_name + "' and RunResult='pass' and ExtSystem='" + param.ext_system + "'"
#
# last_date1 = mssql.rd_sql(server=param.hydro_server, database=param.hydro_database, stmt=max_date_stmt).loc[0][0]
#
# if last_date1 is None:
# last_date1 = '1900-01-01'
# else:
# last_date1 = str(last_date1.date())
#
# print('Last sucessful date is ' + last_date1)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for i, p in param['source data'].items():
setattr(db, i, mssql.rd_sql(p['server'], p['database'], p['table'], p['col_names'], rename_cols=p['rename_cols'], username=p['username'], password=p['password']))
if (p['database'] == 'Accela') & (not (p['table'] in ['Ecan.vAct_Water_AssociatedPermits', 'Ecan.vQA_Relationship_Actuals'])):
table1 = 'Accela.' + p['table'].split('Ecan.')[1]
print(table1)
t1 = getattr(db, i).copy().dropna(subset=p['pk'])
t1.drop_duplicates(p['pk'], inplace=True)
print('update in db')
new_ones, _ = mssql.update_from_difference(t1, param['output']['server'], param['output']['database'], table1, on=p['pk'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
######################################
### Populate base tables
print('--Update base tables')
## HydroGroup
hf1 = pd.DataFrame(param['misc']['HydroGroup'])
hf1['ModifiedDate'] = run_time_start
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf_diff1 = hf1[~hf1.HydroGroup.isin(hf0.HydroGroup)]
if not hf_diff1.empty:
mssql.to_mssql(hf_diff1, param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])
## Activity
act1 = param['misc']['Activities']['ActivityType']
act2 = pd.DataFrame(list(itertools.product(act1, hf0.HydroGroupID.tolist())), columns=['ActivityType', 'HydroGroupID'])
act2['ModifiedDate'] = run_time_start
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act_diff1 = act2[~act2[['ActivityType', 'HydroGroupID']].isin(act0[['ActivityType', 'HydroGroupID']]).any(axis=1)]
if not act_diff1.empty:
mssql.to_mssql(act_diff1, param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])
# Combine activity and hydro features
act_types1 = pd.merge(act0[['ActivityID', 'ActivityType', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID')
act_types1['ActivityName'] = act_types1['ActivityType'] + ' ' + act_types1['HydroGroup']
## AlloBlock
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
sw_blocks1 = pd.Series(db.wap_allo['sw_allo_block'].unique())
gw_blocks1 = pd.Series(db.allocated_volume['allo_block'].unique())
# Fixes
wap_allo1 = db.wap_allo.copy()
wap_allo1['sw_allo_block'] = wap_allo1['sw_allo_block'].str.strip()
wap_allo1.loc[wap_allo1.sw_allo_block == 'Migration: Not Classified', 'sw_allo_block'] = 'A'
allo_vol1 = db.allocated_volume.copy()
allo_vol1['allo_block'] = allo_vol1['allo_block'].str.strip()
allo_vol1.loc[allo_vol1.allo_block == 'Migration: Not Classified', 'allo_block'] = 'A'
# Determine blocks and what needs to be added
sw_blocks1 = set(wap_allo1['sw_allo_block'].unique())
gw_blocks1 = set(allo_vol1['allo_block'].unique())
blocks1 = sw_blocks1.union(gw_blocks1)
ab1 = pd.DataFrame(list(itertools.product(blocks1, hf0.HydroGroupID.tolist())), columns=['AllocationBlock', 'HydroGroupID'])
ab1['ModifiedDate'] = run_time_start
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab_diff1 = ab1[~ab1[['AllocationBlock', 'HydroGroupID']].isin(ab0[['AllocationBlock', 'HydroGroupID']]).any(axis=1)]
if not ab_diff1.empty:
mssql.to_mssql(ab_diff1, param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])
# Combine alloblock and hydro features
ab_types1 = pd.merge(ab0[['AlloBlockID', 'AllocationBlock', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID').drop('HydroGroupID', axis=1)
## Attributes
att1 = pd.DataFrame(param['misc']['Attributes'])
att1['ModifiedDate'] = run_time_start
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att_diff1 = att1[~att1.Attribute.isin(att0.Attribute)]
if not att_diff1.empty:
mssql.to_mssql(att_diff1, param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])
##################################################
### Sites and streamdepletion
print('--Update sites tables')
## takes
wap_allo1['WAP'] = wap_allo1['WAP'].str.strip().str.upper()
wap_allo1.loc[~wap_allo1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap1 = wap_allo1['WAP'].unique()
wap1 = wap1[~pd.isnull(wap1)]
## Diverts
div1 = db.divert.copy()
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
wap2 = div1['WAP'].unique()
wap2 = wap2[~pd.isnull(wap2)]
## Combo
waps = np.concatenate((wap1, wap2), axis=None)
## Check that all WAPs exist in the USM sites table
usm_waps1 = db.sites[db.sites.ExtSiteID.isin(waps)].copy()
usm_waps1[['NZTMX', 'NZTMY']] = usm_waps1[['NZTMX', 'NZTMY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.ExtSiteID))
print('Missing {} WAPs in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.WAP.isin(miss_waps)].copy()
## Update ConsentsSites table
cs1 = usm_waps1[['ExtSiteID', 'SiteName']].copy()
# cs1['SiteType'] = 'WAP'
new_sites, _ = mssql.update_from_difference(cs1, param['output']['server'], param['output']['database'], 'ConsentsSites', on='ExtSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentsSites', 'pass', '{} sites updated'.format(len(new_sites)), username=param['output']['username'], password=param['output']['password'])
cs0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ConsentsSites', ['SiteID', 'ExtSiteID'], username=param['output']['username'], password=param['output']['password'])
cs_waps2 = pd.merge(cs0, usm_waps1.drop('SiteName', axis=1), on='ExtSiteID')
cs_waps3 = pd.merge(cs_waps2, db.wap_sd, on='ExtSiteID').drop('ExtSiteID', axis=1).round()
new_waps, _ = mssql.update_from_difference(cs_waps3, param['output']['server'], param['output']['database'], 'SiteStreamDepletion', on='SiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'WAP', 'pass', '{} sites updated'.format(len(new_waps)), username=param['output']['username'], password=param['output']['password'])
## Read db table
# wap0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'SiteStreamDepletion')
## Make linked WAP-SiteID table
wap_site = cs0.rename(columns={'ExtSiteID': 'WAP'})
##################################################
### Permit table
print('--Update Permit table')
## Clean data
permits1 = db.permit.copy()
permits1['RecordNumber'] = permits1['RecordNumber'].str.strip().str.upper()
permits1['ConsentStatus'] = permits1['ConsentStatus'].str.strip()
permits1['EcanID'] = permits1['EcanID'].str.strip().str.upper()
permits1['FromDate'] = pd.to_datetime(permits1['FromDate'], infer_datetime_format=True, errors='coerce')
permits1['ToDate'] = pd.to_datetime(permits1['ToDate'], infer_datetime_format=True, errors='coerce')
permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'ToDate'] = permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'FromDate'] + pd.DateOffset(years=30)
permits1[['NZTMX', 'NZTMY']] = permits1[['NZTMX', 'NZTMY']].round()
permits1.loc[(permits1['FromDate'] < '1950-01-01'), 'FromDate'] = np.nan
permits1.loc[(permits1['ToDate'] < '1950-01-01'), 'ToDate'] = np.nan
## Filter data
permits2 = permits1.drop_duplicates('RecordNumber')
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NZTMX.notnull() & permits2.NZTMY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2['FromDate'] = permits2['FromDate'].dt.date
permits2['ToDate'] = permits2['ToDate'].dt.date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = '1900-01-01'
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = '1900-01-01'
## Save results
new_permits, _ = mssql.update_from_difference(permits2, param['output']['server'], param['output']['database'], 'Permit', on='RecordNumber', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'Permit', 'pass', '{} rows updated'.format(len(new_permits)), username=param['output']['username'], password=param['output']['password'])
## Read db table
permits0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Permit', username=param['output']['username'], password=param['output']['password'])
##################################################
### Parent-Child
print('--Update Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
pc1['ParentRecordNumber'] = pc1['ParentRecordNumber'].str.strip().str.upper()
pc1['ChildRecordNumber'] = pc1['ChildRecordNumber'].str.strip().str.upper()
pc1['ParentCategory'] = pc1['ParentCategory'].str.strip()
pc1['ChildCategory'] = pc1['ChildCategory'].str.strip()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys
crc1 = permits0.RecordNumber.unique()
pc2 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
## Save results
new_pc, _ = mssql.update_from_difference(pc2, param['output']['server'], param['output']['database'], 'ParentChild', on=['ParentRecordNumber', 'ChildRecordNumber'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ParentChild', 'pass', '{} rows updated'.format(len(new_pc)), username=param['output']['username'], password=param['output']['password'])
## Read db table
pc0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ParentChild', username=param['output']['username'], password=param['output']['password'])
#################################################
### AllocatedRatesVolumes
print('--Update Allocation tables')
attr1 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', ['AttributeID', 'Attribute'], username=param['output']['username'], password=param['output']['password'])
## Rates
# Clean data
wa1 = wap_allo1.copy()
wa1['RecordNumber'] = wa1['RecordNumber'].str.strip().str.upper()
wa1['take_type'] = wa1['take_type'].str.strip().str.title()
wa1['FromMonth'] = wa1['FromMonth'].str.strip().str.title()
wa1['ToMonth'] = wa1['ToMonth'].str.strip().str.title()
wa1['IncludeInSwAllocation'] = wa1['IncludeInSwAllocation'].str.strip().str.title()
wa1['AllocatedRate'] = pd.to_numeric(wa1['AllocatedRate'], errors='coerce').round(2)
wa1['WapRate'] = pd.to_numeric(wa1['WapRate'], errors='coerce').round(2)
wa1['VolumeDaily'] = pd.to_numeric(wa1['VolumeDaily'], errors='coerce').astype(int)
wa1['VolumeWeekly'] = pd.to_numeric(wa1['VolumeWeekly'], errors='coerce').astype(int)
wa1['Volume150Day'] = pd.to_numeric(wa1['Volume150Day'], errors='coerce').astype(int)
wa1.loc[wa1['FromMonth'] == 'Migration: Not Classified', 'FromMonth'] = 'Jul'
wa1.loc[wa1['ToMonth'] == 'Migration: Not Classified', 'ToMonth'] = 'Jun'
mon_mapping = {'Jan': 7, 'Feb': 8, 'Mar': 9, 'Apr': 10, 'May': 11, 'Jun': 12, 'Jul': 1, 'Aug': 2, 'Sep': 3, 'Oct': 4, 'Nov': 5, 'Dec': 6}
wa1.replace({'FromMonth': mon_mapping, 'ToMonth': mon_mapping}, inplace=True)
wa1.loc[wa1['IncludeInSwAllocation'] == 'No', 'IncludeInSwAllocation'] = False
wa1.loc[wa1['IncludeInSwAllocation'] == 'Yes', 'IncludeInSwAllocation'] = True
wa1.replace({'sw_allo_block': {'In Waitaki': 'A'}}, inplace=True)
# Check foreign keys
wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()
# Filters
# wa4 = wa2[(wa2.AllocatedRate > 0)].copy()
# wa3.loc[~wa3['IncludeInSwAllocation'], ['AllocatedRate', 'SD1', 'SD2']] = 0
# wa4 = wa3.drop('IncludeInSwAllocation', axis=1).copy()
# Find the missing WAPs per consent
crc_wap_mis1 = wa4.loc[wa4.WAP.isnull(), 'RecordNumber'].unique()
crc_wap4 = wa4[['RecordNumber', 'WAP']].drop_duplicates()
for i in crc_wap_mis1:
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values
wap1 = []
while (len(crc2) > 0) & (len(wap1) == 0):
wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'WAP'].values
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values
if len(wap1) > 0:
wa4.loc[wa4.RecordNumber == i, 'WAP'] = wap1[0]
wa4 = wa4[wa4.WAP.notnull()].copy()
wa4.rename(columns={'sw_allo_block': 'AllocationBlock'}, inplace=True)
# Distribute the months
cols1 = wa4.columns.tolist()
from_mon_pos = cols1.index('FromMonth')
to_mon_pos = cols1.index('ToMonth')
allo_rates_list = []
# c1 = 0
for val in wa4.itertuples(False, None):
from_month = int(val[from_mon_pos])
to_month = int(val[to_mon_pos])
if from_month > to_month:
mons = list(range(1, to_month + 1))
# c1 = c1 + 1
else:
mons = range(from_month, to_month + 1)
d1 = [val + (i,) for i in mons]
allo_rates_list.extend(d1)
col_names1 = wa4.columns.tolist()
col_names1.extend(['Month'])
wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)
# Mean of all months
grp1 = wa5.groupby(['RecordNumber', 'take_type', 'AllocationBlock', 'WAP'])
mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD1', 'SD2']].mean().round(2)
include1 = grp1['IncludeInSwAllocation'].first()
mon_min = grp1['Month'].min()
mon_min.name = 'FromMonth'
mon_max = grp1['Month'].max()
mon_max.name = 'ToMonth'
wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()
# wa6['HydroGroup'] = 'Surface Water'
## Allocated Volume
av1 = allo_vol1.copy()
# clean data
av1['RecordNumber'] = av1['RecordNumber'].str.strip().str.upper()
av1['take_type'] = av1['take_type'].str.strip().str.title()
av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].str.strip().str.title()
av1.loc[av1['IncludeInGwAllocation'] == 'No', 'IncludeInGwAllocation'] = False
av1.loc[av1['IncludeInGwAllocation'] == 'Yes', 'IncludeInGwAllocation'] = True
av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].astype(bool)
# av1['AllocatedAnnualVolume'] = pd.to_numeric(av1['AllocatedAnnualVolume'], errors='coerce').astype(int)
av1['FullAnnualVolume'] = pd.to_numeric(av1['FullAnnualVolume'], errors='coerce').astype(int)
# av1.loc[av1['AllocatedAnnualVolume'] <= 0, 'AllocatedAnnualVolume'] = 0
# av1 = av1.loc[av1['AllocatedAnnualVolume'] > 0]
av1.rename(columns={'allo_block': 'AllocationBlock'}, inplace=True)
av1.drop('AllocatedAnnualVolume', axis=1, inplace=True)
av1.replace({'AllocationBlock': {'In Waitaki': 'A'}}, inplace=True)
av1.drop_duplicates(subset=['RecordNumber', 'take_type', 'AllocationBlock'], inplace=True)
## Combine volumes with rates
wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'take_type', 'AllocationBlock'])
## Distribute the volumes by WapRate
wa8 = wa7.copy()
grp3 = wa8.groupby(['RecordNumber', 'take_type', 'AllocationBlock'])
wa8['WapRateAgg'] = grp3['WapRate'].transform('sum')
wa8['ratio'] = wa8['WapRate'] / wa8['WapRateAgg']
wa8.loc[wa8['ratio'].isnull(), 'ratio'] = 1
wa8['FullAnnualVolume'] = (wa8['FullAnnualVolume'] * wa8['ratio']).round()
wa8.drop(['WapRateAgg', 'ratio', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD2', 'WapRate'], axis=1, inplace=True)
wa8 = wa8[wa8.FullAnnualVolume >= 0].copy()
## Add in stream depletion
# wa9 = pd.merge(wa8, db.wap_sd.rename(columns={'ExtSiteID': 'WAP'}), on='WAP').drop(['SD1_NZTMX', 'SD1_NZTMY', 'SD1_30Day', 'SD2_NZTMX', 'SD2_NZTMY', 'SD2_7Day', 'SD2_30Day', 'SD2_150Day', 'SD1', 'SD2'], axis=1)
#
# wa9['SD1_7Day'] = pd.to_numeric(wa9['SD1_7Day'], errors='coerce').round(0)
# wa9['SD1_150Day'] = pd.to_numeric(wa9['SD1_150Day'], errors='coerce').round(0)
## Combine with aquifer test storativity
# aq1 = db.wap_aquifer_test.dropna(subset=['storativity']).copy()
# aq1.rename(columns={'ExtSiteID': 'WAP'}, inplace=True)
# aq2 = aq1.groupby('WAP')['storativity'].mean().dropna().reset_index()
# aq2.storativity = True
#
# wa9 = pd.merge(wa9, aq2, on='WAP', how='left')
# wa9.loc[wa9.storativity.isnull(), 'storativity'] = False
## Distribute the rates and volumes by allocation hydro group
wa8['sw_rate'] = 0
wa8['gw_rate'] = 0
wa8['sw_vol'] = 0
wa8['gw_vol'] = 0
wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'AllocatedRate']
wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']
wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate'] - wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']
wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_vol'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'FullAnnualVolume']
wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']
wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']
allo_list = []
for k, row in wa8.iterrows():
# print(k)
if row['IncludeInSwAllocation']:
sw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'sw_rate', 'sw_vol']].rename({'sw_rate': 'AllocatedRate', 'sw_vol': 'AllocatedAnnualVolume'})
sw1['HydroGroup'] = 'Surface Water'
allo_list.append(sw1.to_frame().T)
if row['IncludeInGwAllocation']:
gw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'gw_rate', 'gw_vol']].rename({'gw_rate': 'AllocatedRate', 'gw_vol': 'AllocatedAnnualVolume'})
gw1['HydroGroup'] = 'Groundwater'
allo_list.append(gw1.to_frame().T)
rv1 = pd.concat(allo_list)
rv1['AllocatedAnnualVolume'] = pd.to_numeric(rv1['AllocatedAnnualVolume'])
rv1['AllocatedRate'] = pd.to_numeric(rv1['AllocatedRate'])
rv1['FromMonth'] = pd.to_numeric(rv1['FromMonth'], downcast='integer')
rv1['ToMonth'] = pd.to_numeric(rv1['ToMonth'], downcast='integer')
rv1.loc[rv1['AllocatedAnnualVolume'].isnull(), 'AllocatedAnnualVolume'] = 0
rv1.loc[rv1['AllocatedAnnualVolume'] == np.inf, 'AllocatedAnnualVolume'] = 0
rv1.loc[rv1['AllocatedRate'].isnull(), 'AllocatedRate'] = 0
rv1.loc[rv1['AllocatedRate'] == np.inf, 'AllocatedRate'] = 0
# Cut out the fat
rv4 = rv1[(rv1['AllocatedAnnualVolume'] > 0) | (rv1['AllocatedRate'] > 0)].copy()
## Calculate missing volumes and rates
ann_bool = rv4.AllocatedAnnualVolume == 0
rv4.loc[ann_bool, 'AllocatedAnnualVolume'] = (rv4.loc[ann_bool, 'AllocatedRate'] * 0.001*60*60*24*30.42* (rv4.loc[ann_bool, 'ToMonth'] - rv4.loc[ann_bool, 'FromMonth'] + 1))
rate_bool = rv4.AllocatedRate == 0
rv4.loc[rate_bool, 'AllocatedRate'] = (rv4.loc[rate_bool, 'AllocatedAnnualVolume'] / 60/60/24/30.42/ (rv4.loc[rate_bool, 'ToMonth'] - rv4.loc[rate_bool, 'FromMonth'] + 1) * 1000)
## Convert the rates and volumes to integers
rv4['AllocatedAnnualVolume'] = rv4['AllocatedAnnualVolume'].round().astype(int)
rv4['AllocatedRate'] = rv4['AllocatedRate'].round().astype(int)
## Merge tables for IDs
avr5 = pd.merge(rv4, ab_types1, on=['AllocationBlock', 'HydroGroup']).drop(['AllocationBlock', 'HydroGroup'], axis=1).copy()
avr6 = pd.merge(avr5, wap_site, on='WAP').drop('WAP', axis=1)
## Update CrcAlloSite table
crc_allo = avr6[['RecordNumber', 'AlloBlockID', 'SiteID']].copy()
crc_allo['SiteAllo'] = True
crc_allo['SiteType'] = 'WAP'
## Determine which rows should be updated
# old_crc_allo = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', where_in={'SiteAllo': [1], 'SiteType': ['WAP']})
#
# diff_dict = mssql.compare_dfs(old_crc_allo.drop(['CrcAlloSiteID', 'ModifiedDate'], axis=1), crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID'])
#
# both1 = pd.concat([diff_dict['new'], diff_dict['diff']])
#
# rem1 = diff_dict['remove']
# Save results
new_crc_allo, rem_crc_allo = mssql.update_from_difference(crc_allo, param['output']['server'], param['output']['database'], 'CrcAlloSite', on=['RecordNumber', 'AlloBlockID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcAlloSite', 'pass', '{} rows updated'.format(len(new_crc_allo)), username=param['output']['username'], password=param['output']['password'])
# Read db table
allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
# Remove old data if needed
if not rem_crc_allo.empty:
rem_crc_allo1 = pd.merge(allo_site0, rem_crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1)
mssql.del_table_rows(param['output']['server'], param['output']['database'], 'AllocatedRateVolume', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'TSLowFlowRestr', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'LowFlowConditions', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'CrcAlloSite', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])
allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## Update AllocatedRateVolume table
avr7 = pd.merge(allo_site0, avr6, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1).drop_duplicates('CrcAlloSiteID')
# Save results
new_avr, _ = mssql.update_from_difference(avr7, param['output']['server'], param['output']['database'], 'AllocatedRateVolume', on='CrcAlloSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'AllocatedRateVolume', 'pass', '{} rows updated'.format(len(new_avr)), username=param['output']['username'], password=param['output']['password'])
#################################################
### ConsentedRateVolume
print('--Update Consent tables')
## Clean data
crv1 = db.consented_takes.copy()
crv1['RecordNumber'] = crv1['RecordNumber'].str.strip().str.upper()
crv1['take_type'] = crv1['take_type'].str.strip().str.title()
crv1['LowflowCondition'] = crv1['LowflowCondition'].str.strip().str.upper()
crv1['ConsentedAnnualVolume'] = pd.to_numeric(crv1['ConsentedAnnualVolume'], errors='coerce').round()
crv1['ConsentedMultiDayVolume'] = pd.to_numeric(crv1['ConsentedMultiDayVolume'], errors='coerce').round()
crv1['ConsentedMultiDayPeriod'] = pd.to_numeric(crv1['ConsentedMultiDayPeriod'], errors='coerce').round()
crv1['ConsentedRate'] = pd.to_numeric(crv1['ConsentedRate'], errors='coerce')
crv1.loc[crv1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
crv1.loc[crv1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
crv1.loc[crv1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
crv1.loc[crv1['ConsentedAnnualVolume'] <= 0, 'ConsentedAnnualVolume'] = np.nan
crv1.loc[crv1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'
crv1.loc[(crv1['LowflowCondition'] == 'COMPLEX'), 'LowflowCondition'] = 'YES'
crv1.loc[crv1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False
crv1.loc[crv1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True
## Filter data
crv2 = crv1[crv1.ConsentedRate.notnull()]
## Check foreign keys
crv2 = crv2[crv2.RecordNumber.isin(crc1)].copy()
## Aggregate take types for counts and min/max month
grp4 = wa4.groupby(['RecordNumber', 'take_type', 'WAP'])
mon_min = grp4['FromMonth'].min()
mon_min.name = 'FromMonth'
mon_max = grp4['ToMonth'].max()
mon_max.name = 'ToMonth'
mon_min_max = pd.concat([mon_min, mon_max], axis=1)
mon_min_max1 = mon_min_max.reset_index()
grp5 = mon_min_max1.groupby(['RecordNumber', 'take_type'])
mon_min_max1['wap_count'] = grp5['WAP'].transform('count')
## Distribute WAPs to consents
crv3 = pd.merge(crv2, mon_min_max1, on=['RecordNumber', 'take_type'])
crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']] = crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']].divide(crv3['wap_count'], 0).round()
crv3['ConsentedRate'] = crv3['ConsentedRate'].divide(crv3['wap_count'], 0).round(2)
## Convert take types to ActivityID
take_types1 = act_types1[act_types1.ActivityType == 'Take'].copy()
crv4 = pd.merge(crv3.drop('wap_count', axis=1), take_types1[['ActivityID', 'ActivityName']], left_on='take_type', right_on='ActivityName').drop(['take_type', 'ActivityName'], axis=1)
## Convert WAPs to SiteIDs
crv5 = pd.merge(crv4, wap_site, on='WAP').drop('WAP', axis=1)
## Create CrcActSite table
crc_act = crv5[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act['SiteActivity'] = True
crc_act['SiteType'] = 'WAP'
# Save results
new_crc_act, rem_crc_act = mssql.update_from_difference(crc_act, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_act)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
# Remove old data if needed
if not rem_crc_act.empty:
rem_crc_act1 = pd.merge(act_site0, rem_crc_act, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)
del_stmt = "delete from {table} where {col} in ({val})"
# del_stmt1 = del_stmt.format(table='ConsentedAttributes', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt1, username=param['output']['username'], password=param['output']['password'])
#
# del_stmt2a = del_stmt.format(table='LinkedPermits', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2a, username=param['output']['username'], password=param['output']['password'])
#
# del_stmt2b = del_stmt.format(table='LinkedPermits', col='OtherCrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2b, username=param['output']['username'], password=param['output']['password'])
del_stmt3 = del_stmt.format(table='ConsentedRateVolume', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt3, username=param['output']['username'], password=param['output']['password'])
# del_stmt4 = del_stmt.format(table='CrcActSite', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))
# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt4, username=param['output']['username'], password=param['output']['password'])
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## Create ConsentedRateVolume table
crv6 = pd.merge(crv5, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1)
# Save results
new_crv, _ = mssql.update_from_difference(crv6, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crv)), username=param['output']['username'], password=param['output']['password'])
###########################################
### Diverts
## Clean
div1 = db.divert.copy()
div1['RecordNumber'] = div1['RecordNumber'].str.strip().str.upper()
div1['DivertType'] = div1['DivertType'].str.strip().str.title()
div1['LowflowCondition'] = div1['LowflowCondition'].str.strip().str.upper()
div1['ConsentedMultiDayVolume'] = pd.to_numeric(div1['ConsentedMultiDayVolume'], errors='coerce').round()
div1['ConsentedMultiDayPeriod'] = pd.to_numeric(div1['ConsentedMultiDayPeriod'], errors='coerce').round()
div1['ConsentedRate'] = pd.to_numeric(div1['ConsentedRate'], errors='coerce').round(2)
div1.loc[div1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
div1.loc[div1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
div1.loc[div1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
div1.loc[div1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'
div1.loc[(~div1['LowflowCondition'].isin(['NO', 'YES'])), 'LowflowCondition'] = 'YES'
div1.loc[div1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False
div1.loc[div1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True
div1['WAP'] = div1['WAP'].str.strip().str.upper()
div1.loc[~div1.WAP.str.contains('[A-Z]+\d\d/\d\d\d\d'), 'WAP'] = np.nan
## Filter
div2 = div1[div1.WAP.notnull()]
## Check foreign keys
div2 = div2[div2.RecordNumber.isin(crc1)].copy()
## Check primary keys
div2 = div2.drop_duplicates(['RecordNumber', 'WAP'])
## Join to get the IDs and filter WAPs
div3 = pd.merge(div2, act_types1[['ActivityID', 'ActivityName']], left_on='DivertType', right_on='ActivityName').drop(['DivertType', 'ActivityName'], axis=1)
div3 = pd.merge(div3, wap_site, on='WAP').drop('WAP', axis=1)
## CrcActSite
crc_act_div = div3[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act_div['SiteActivity'] = True
crc_act_div['SiteType'] = 'WAP'
# Save results
new_crc_div, rem_crc_div = mssql.update_from_difference(crc_act_div, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## ConsentedRateVolume
crc_div = pd.merge(div3, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1).dropna(subset=['ConsentedRate', 'ConsentedMultiDayVolume'], how='all')
crc_div['FromMonth'] = 1
crc_div['ToMonth'] = 12
# Save results
new_crc_div, _ = mssql.update_from_difference(crc_div, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])
###########################################
### Water use types
wu1 = db.water_use.copy()
## Clean
wu1['RecordNumber'] = wu1['RecordNumber'].str.strip().str.upper()
wu1['UseType'] = wu1['UseType'].str.strip().str.title()
wu1['ConsentedMultiDayVolume'] = pd.to_numeric(wu1['ConsentedMultiDayVolume'], errors='coerce').round()
wu1['ConsentedMultiDayPeriod'] = pd.to_numeric(wu1['ConsentedMultiDayPeriod'], errors='coerce').round()
wu1['ConsentedRate'] = pd.to_numeric(wu1['ConsentedRate'], errors='coerce').round(2)
wu1.loc[wu1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan
wu1.loc[wu1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan
wu1.loc[wu1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan
spaces_bool = wu1['UseType'].str[3:5] == ' '
wu1.loc[spaces_bool, 'UseType'] = wu1.loc[spaces_bool, 'UseType'].str[:3] + wu1.loc[spaces_bool, 'UseType'].str[4:]
## Check foreign keys
wu2 = wu1[wu1.RecordNumber.isin(crc1)].copy()
## Split into WAPs by take type equivelant
wu3 = wu2.copy()
wu3['take_type'] = wu3['UseType'].str.replace('Use', 'Take')
wu4 = pd.merge(wu3, mon_min_max1, on=['RecordNumber', 'take_type'])
wu4['ConsentedMultiDayVolume'] = wu4['ConsentedMultiDayVolume'].divide(wu4['wap_count'], 0).round()
wu4['ConsentedRate'] = wu4['ConsentedRate'].divide(wu4['wap_count'], 0).round(2)
wu4.drop(['wap_count', 'take_type'], axis=1, inplace=True)
## Convert Use types to broader categories
types_cat = {}
for key, value in param['misc']['use_types_codes'].items():
for string in value:
types_cat[string] = key
types_check = np.in1d(wu4.WaterUse.unique(), list(types_cat.keys())).all()
if not types_check:
raise ValueError('Some use types are missing in the parameters file. Check the use type table and the parameters file.')
wu4.WaterUse.replace(types_cat, inplace=True)
wu4['WaterUse'] = wu4['WaterUse'].astype('category')
## Join to get the IDs and filter WAPs
wu5 = pd.merge(wu4, act_types1[['ActivityID', 'ActivityName']], left_on='UseType', right_on='ActivityName').drop(['UseType', 'ActivityName'], axis=1)
wu5 = pd.merge(wu5, wap_site, on='WAP').drop('WAP', axis=1)
## Drop duplicate uses
wu5.WaterUse.cat.set_categories(param['misc']['use_types_priorities'], True, inplace=True)
wu5 = wu5.sort_values('WaterUse')
wu6 = wu5.drop_duplicates(['RecordNumber', 'ActivityID', 'SiteID']).copy()
## CrcActSite
crc_act_wu = wu6[['RecordNumber', 'ActivityID', 'SiteID']].copy()
crc_act_wu['SiteActivity'] = True
crc_act_wu['SiteType'] = 'WAP'
# Save results
new_crv_wu, _ = mssql.update_from_difference(crc_act_wu, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])
# Log
log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crv_wu)), username=param['output']['username'], password=param['output']['password'])
# Read db table
act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])
## ConsentedRateVolume
crv_wu = | pd.merge(wu6, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']) | pandas.merge |
# @Author: <NAME> <gio>
# @Date: 10-Aug-2021
# @Email: <EMAIL>
# @Project: FeARLesS
# @Filename: 00_xml2csv.py
# @Last modified by: gio
# @Last modified time: 15-Oct-2021
# @License: MIT
import pandas as pd
import xml.etree.ElementTree as et
import tqdm
import os
#####################
### mac gio
# path = "/Volumes/sharpe/data/Vascular_micromass/Opera/TIMELAPSE/" "Timelapse4_041021/"
# folder_raw = os.path.join(path)
### windows nicola
path = os.path.join('data','Vascular_micromass','Opera','TIMELAPSE','Timelapse4_041021')
folder_raw = os.path.join("X:", os.sep, path)
exp_folder = os.path.join(
"gio_Pecam-Sox9_20x-24h_041021__2021-10-04T16_06_44-Measurement_1"
)
# print(folder_raw)
# print(exp_folder)
#####################
xtree = et.parse(os.path.join(folder_raw, exp_folder, "Images", "Index.idx.xml"))
xroot = xtree.getroot()
images = xroot.findall("{http://www.perkinelmer.com/PEHH/HarmonyV5}Images")[0]
print("images --> ", len(images))
df = pd.DataFrame(
{
"filename": [],
"Xpos": [],
"Ypos": [],
"Zpos": [],
"row": [],
"col": [],
"field": [],
"plane": [],
"channel": [],
"chName": [],
"expTime": [],
}
)
for i, image in tqdm.tqdm(enumerate(images.iter("{http://www.perkinelmer.com/PEHH/HarmonyV5}Image"))):
# print(image.tag, image.attrib)
row = {}
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}URL")
row["filename"] = x.text
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}PositionX")
row["Xpos"] = float(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}PositionY")
row["Ypos"] = float(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}PositionZ")
row["Zpos"] = float(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}Row")
row["row"] = int(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}Col")
row["col"] = int(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}FieldID")
row["field"] = int(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}PlaneID")
row["plane"] = int(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}ChannelID")
row["channel"] = int(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}TimepointID")
row["timepoint"] = int(x.text)
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}ChannelName")
row["chName"] = x.text
x = image.find("{http://www.perkinelmer.com/PEHH/HarmonyV5}ExposureTime")
row["expTime"] = float(x.text)
df = df.append( | pd.Series(row) | pandas.Series |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, List, Optional
import pandas as pd
try:
from fbprophet import Prophet
_no_prophet = False
except ImportError:
_no_prophet = True
Prophet = Dict[str, Any] # for Pyre
from kats.consts import Params, TimeSeriesData
from kats.models.model import Model
from kats.utils.parameter_tuning_utils import (
get_default_prophet_parameter_search_space,
)
class ProphetParams(Params):
"""Parameter class for Prophet model
This is the parameter class for prophet model, it contains all necessary
parameters as definied in Prophet implementation:
https://github.com/facebook/prophet/blob/master/python/prophet/forecaster.py
Attributes:
growth: String 'linear' or 'logistic' to specify a linear or logistic
trend.
changepoints: List of dates at which to include potential changepoints. If
not specified, potential changepoints are selected automatically.
n_changepoints: Number of potential changepoints to include. Not used
if input `changepoints` is supplied. If `changepoints` is not supplied,
then n_changepoints potential changepoints are selected uniformly from
the first `changepoint_range` proportion of the history.
changepoint_range: Proportion of history in which trend changepoints will
be estimated. Defaults to 0.8 for the first 80%. Not used if
`changepoints` is specified.
yearly_seasonality: Fit yearly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
weekly_seasonality: Fit weekly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
daily_seasonality: Fit daily seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
holidays: pd.DataFrame with columns holiday (string) and ds (date type)
and optionally columns lower_window and upper_window which specify a
range of days around the date to be included as holidays.
lower_window=-2 will include 2 days prior to the date as holidays. Also
optionally can have a column prior_scale specifying the prior scale for
that holiday.
seasonality_mode: 'additive' (default) or 'multiplicative'.
seasonality_prior_scale: Parameter modulating the strength of the
seasonality model. Larger values allow the model to fit larger seasonal
fluctuations, smaller values dampen the seasonality. Can be specified
for individual seasonalities using add_seasonality.
holidays_prior_scale: Parameter modulating the strength of the holiday
components model, unless overridden in the holidays input.
changepoint_prior_scale: Parameter modulating the flexibility of the
automatic changepoint selection. Large values will allow many
changepoints, small values will allow few changepoints.
mcmc_samples: Integer, if greater than 0, will do full Bayesian inference
with the specified number of MCMC samples. If 0, will do MAP
estimation.
interval_width: Float, width of the uncertainty intervals provided
for the forecast. If mcmc_samples=0, this will be only the uncertainty
in the trend using the MAP estimate of the extrapolated generative
model. If mcmc.samples>0, this will be integrated over all model
parameters, which will include uncertainty in seasonality.
uncertainty_samples: Number of simulated draws used to estimate
uncertainty intervals. Settings this value to 0 or False will disable
uncertainty estimation and speed up the calculation.
cap: capacity, provided for logistic growth
floor: floor, the fcst value must be greater than the specified floor
custom_seasonlities: customized seasonalities, dict with keys
"name", "period", "fourier_order"
extra_regressors: additional regressors used for fitting, each regressor
is a dict with keys "name" and "value"
"""
growth: str
changepoints: Optional[List[float]]
n_changepoints: int
changepoint_range: float
yearly_seasonality: str
weekly_seasonality: str
daily_seasonality: str
holidays: Optional[pd.DataFrame]
seasonality_mode: str
seasonality_prior_scale: float
holidays_prior_scale: float
changepoint_prior_scale: float
mcmc_samples: int
interval_width: float
uncertainty_samples: int
cap: Optional[float]
floor: Optional[float]
custom_seasonalities: List[Dict[str, Any]]
extra_regressors: List[Dict[str, Any]]
def __init__(
self,
growth: str = "linear",
changepoints: Optional[List[float]] = None,
n_changepoints: int = 25,
changepoint_range: float = 0.8,
yearly_seasonality: str = "auto",
weekly_seasonality: str = "auto",
daily_seasonality: str = "auto",
holidays: Optional[pd.DataFrame] = None,
seasonality_mode: str = "additive",
seasonality_prior_scale: float = 10.0,
holidays_prior_scale: float = 10.0,
changepoint_prior_scale: float = 0.05,
mcmc_samples: int = 0,
interval_width: float = 0.80,
uncertainty_samples: int = 1000,
cap: Optional[float] = None,
floor: Optional[float] = None,
custom_seasonalities: Optional[List[Dict[str, Any]]] = None,
extra_regressors: Optional[List[Dict[str, Any]]] = None,
) -> None:
if _no_prophet:
raise RuntimeError("requires fbprophet to be installed")
super().__init__()
self.growth = growth
self.changepoints = changepoints
self.n_changepoints = n_changepoints
self.changepoint_range = changepoint_range
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.holidays = holidays
self.seasonality_mode = seasonality_mode
self.seasonality_prior_scale = seasonality_prior_scale
self.holidays_prior_scale = holidays_prior_scale
self.changepoint_prior_scale = changepoint_prior_scale
self.mcmc_samples = mcmc_samples
self.interval_width = interval_width
self.uncertainty_samples = uncertainty_samples
self.cap = cap
self.floor = floor
self.custom_seasonalities = (
[] if custom_seasonalities is None else custom_seasonalities
)
self.extra_regressors = [] if extra_regressors is None else extra_regressors
logging.debug(
"Initialized Prophet with parameters. "
"growth:{growth},"
"changepoints:{changepoints},"
"n_changepoints:{n_changepoints},"
"changepoint_range:{changepoint_range},"
"yearly_seasonality:{yearly_seasonality},"
"weekly_seasonality:{weekly_seasonality},"
"daily_seasonality:{daily_seasonality},"
"holidays:{holidays},"
"seasonality_mode:{seasonality_mode},"
"seasonality_prior_scale:{seasonality_prior_scale},"
"holidays_prior_scale:{holidays_prior_scale},"
"changepoint_prior_scale:{changepoint_prior_scale},"
"mcmc_samples:{mcmc_samples},"
"interval_width:{interval_width},"
"uncertainty_samples:{uncertainty_samples},"
"cap:{cap},"
"floor:{floor},"
"custom_seasonalities:{custom_seasonalities},"
"extra_regressors:{extra_regressors}".format(
growth=growth,
changepoints=changepoints,
n_changepoints=n_changepoints,
changepoint_range=changepoint_range,
yearly_seasonality=yearly_seasonality,
weekly_seasonality=weekly_seasonality,
daily_seasonality=daily_seasonality,
holidays=holidays,
seasonality_mode=seasonality_mode,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_prior_scale=changepoint_prior_scale,
mcmc_samples=mcmc_samples,
interval_width=interval_width,
uncertainty_samples=uncertainty_samples,
cap=cap,
floor=floor,
custom_seasonalities=custom_seasonalities,
extra_regressors=None
if extra_regressors is None
else [x["name"] for x in extra_regressors],
)
)
def validate_params(self) -> None:
"""validate Prophet parameters
This method validates some key parameters including growth rate
and custom_seasonalities.
"""
# cap must be given when using logistic growth
if (self.growth == "logistic") and (self.cap is None):
msg = "Capacity must be provided for logistic growth"
logging.error(msg)
raise ValueError(msg)
# If custom_seasonalities passed, ensure they contain the required keys.
reqd_seasonality_keys = ["name", "period", "fourier_order"]
if not all(
req_key in seasonality
for req_key in reqd_seasonality_keys
for seasonality in self.custom_seasonalities
):
msg = f"Custom seasonality dicts must contain the following keys:\n{reqd_seasonality_keys}"
logging.error(msg)
raise ValueError(msg)
# If extra_regressors passed, ensure they contain the required keys.
reqd_regressor_keys = ["name", "value"]
if not all(
req_key in regressor
for req_key in reqd_regressor_keys
for regressor in self.extra_regressors
):
msg = f"Extra regressor dicts must contain the following keys:\n{reqd_regressor_keys}"
logging.error(msg)
raise ValueError(msg)
logging.info("Method validate_params() is not fully implemented.")
pass
class ProphetModel(Model[ProphetParams]):
"""Model class for Prophet
This class provides fit, predict, and plot methods for Prophet model
Attributes:
data: the input time series data as in :class:`kats.consts.TimeSeriesData`
params: the parameter class definied with `ProphetParams`
"""
model: Optional[Prophet] = None
freq: Optional[str] = None
def __init__(self, data: TimeSeriesData, params: ProphetParams) -> None:
super().__init__(data, params)
if _no_prophet:
raise RuntimeError("requires fbprophet to be installed")
if not isinstance(self.data.value, pd.Series):
msg = "Only support univariate time series, but get {type}.".format(
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
def fit(self, **kwargs: Any) -> None:
"""fit Prophet model
Args:
None.
Returns:
The fitted prophet model object
"""
# prepare dataframe for Prophet.fit()
df = pd.DataFrame({"ds": self.data.time, "y": self.data.value})
logging.debug(
"Call fit() with parameters: "
"growth:{growth},"
"changepoints:{changepoints},"
"n_changepoints:{n_changepoints},"
"changepoint_range:{changepoint_range},"
"yearly_seasonality:{yearly_seasonality},"
"weekly_seasonality:{weekly_seasonality},"
"daily_seasonality:{daily_seasonality},"
"holidays:{holidays},"
"seasonality_mode:{seasonality_mode},"
"seasonality_prior_scale:{seasonality_prior_scale},"
"holidays_prior_scale:{holidays_prior_scale},"
"changepoint_prior_scale:{changepoint_prior_scale},"
"mcmc_samples:{mcmc_samples},"
"interval_width:{interval_width},"
"uncertainty_samples:{uncertainty_samples},"
"cap:{cap},"
"floor:{floor},"
"custom_seasonalities:{custom_seasonalities},"
"extra_regressors:{extra_regressors}".format(
growth=self.params.growth,
changepoints=self.params.changepoints,
n_changepoints=self.params.n_changepoints,
changepoint_range=self.params.changepoint_range,
yearly_seasonality=self.params.yearly_seasonality,
weekly_seasonality=self.params.weekly_seasonality,
daily_seasonality=self.params.daily_seasonality,
holidays=self.params.holidays,
seasonality_mode=self.params.seasonality_mode,
seasonality_prior_scale=self.params.seasonality_prior_scale,
holidays_prior_scale=self.params.holidays_prior_scale,
changepoint_prior_scale=self.params.changepoint_prior_scale,
mcmc_samples=self.params.mcmc_samples,
interval_width=self.params.interval_width,
uncertainty_samples=self.params.uncertainty_samples,
cap=self.params.cap,
floor=self.params.floor,
custom_seasonalities=self.params.custom_seasonalities,
extra_regressors=None
if self.params.extra_regressors is None
else [x["name"] for x in self.params.extra_regressors],
),
)
prophet = Prophet(
growth=self.params.growth,
changepoints=self.params.changepoints,
n_changepoints=self.params.n_changepoints,
changepoint_range=self.params.changepoint_range,
yearly_seasonality=self.params.yearly_seasonality,
weekly_seasonality=self.params.weekly_seasonality,
daily_seasonality=self.params.daily_seasonality,
holidays=self.params.holidays,
seasonality_mode=self.params.seasonality_mode,
seasonality_prior_scale=self.params.seasonality_prior_scale,
holidays_prior_scale=self.params.holidays_prior_scale,
changepoint_prior_scale=self.params.changepoint_prior_scale,
mcmc_samples=self.params.mcmc_samples,
interval_width=self.params.interval_width,
uncertainty_samples=self.params.uncertainty_samples,
)
if self.params.growth == "logistic":
# assign cap to a new col as Prophet required
df["cap"] = self.params.cap
# Adding floor if available
if self.params.floor is not None:
df["floor"] = self.params.floor
# Add any specified custom seasonalities.
for custom_seasonality in self.params.custom_seasonalities:
prophet.add_seasonality(**custom_seasonality)
# Add any extra regressors
if self.params.extra_regressors is not None:
for regressor in self.params.extra_regressors:
prophet.add_regressor(
**{k: v for k, v in regressor.items() if k not in ["value"]}
)
df[regressor["name"]] = pd.Series(regressor["value"], index=df.index)
self.model = prophet.fit(df=df)
logging.info("Fitted Prophet model. ")
def predict(
self, steps: int, *args: Any, include_history: bool = False, **kwargs: Any
) -> pd.DataFrame:
"""predict with fitted Prophet model
Args:
steps: the steps or length of prediction horizon
include_history: if include the historical data, default as False
Returns:
The predicted dataframe with following columns:
`time`, `fcst`, `fcst_lower`, and `fcst_upper`
"""
model = self.model
if model is None:
raise ValueError("Call fit() before predict().")
logging.debug(
"Call predict() with parameters. "
"steps:{steps}, kwargs:{kwargs}".format(steps=steps, kwargs=kwargs)
)
self.freq = kwargs.get("freq", | pd.infer_freq(self.data.time) | pandas.infer_freq |
import torch
import argparse
import pandas as pd
import numpy as np
from vel.rl.env.classic_atari import ClassicAtariEnv
from vel.rl.models.policy_gradient_model import PolicyGradientModelFactory
from vel.rl.models.backbone.nature_cnn import NatureCnnFactory
from vel.openai.baselines.common.atari_wrappers import FrameStack
def evaluate_a2c(checkpoint_file_path, environment, optimization, takes=10):
model_checkpoint = torch.load(checkpoint_file_path)
device = torch.device('cuda:0')
env = FrameStack(
ClassicAtariEnv(environment).instantiate(preset='raw'), k=4
)
model = PolicyGradientModelFactory(
backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4)
).instantiate(action_space=env.action_space)
model.load_state_dict(model_checkpoint)
model = model.to(device)
model.eval()
rewards = []
lengths = []
all_rewards = []
for i in range(takes):
result, eval_rewards = record_take(model, env, device)
rewards.append(result['r'])
lengths.append(result['l'])
print(f'Num rewards in evaluation: {len(eval_rewards)}')
all_rewards.append(eval_rewards)
eval_results = pd.concat([pd.Series(x) for x in all_rewards], axis=1)
filename = create_filename(optimization, environment)
eval_results.to_csv(filename, index=False)
print( | pd.DataFrame({'lengths': lengths, 'rewards': rewards}) | pandas.DataFrame |
# diffexp.py
# This script is for identifying super-enhancer associated genes that are differentially expressed between two stages
from DYSE_main import formatFolder
import pandas as pd
import subprocess
import argparse
def diffexp(deFile, SEgenes):
de_list = [x.rstrip().split('\t') for x in deFile]
col = []
for elem in de_list[0]:
if elem != '':
col.append(elem)
diffexp_df = pd.DataFrame(de_list[1:], columns=col)
geneList = set(diffexp_df['gene_id'].tolist())
st_diffexp_genes = list(geneList & set(SEgenes))
wanted = diffexp_df.loc[diffexp_df['gene'].isin(st_diffexp_genes)]
return wanted
def main():
'''
main run call
'''
usage = '%(prog)s [options] -i [INPUT_FILES] -d [RNA-SEQ_DIFF_EXP_FILE] -o [OUTPUT_FOLDER]'
parser = argparse.ArgumentParser(prog='DYSE_diffexp.py', usage=usage)
# Required flags
parser.add_argument("-i", "--i", dest="input", default=None,
help="Comma separated list of SEgene files")
parser.add_argument("-d", "--diffexp", dest="deFile", default=None,
help="RNA-seq differential expression file that includes stages of interest")
parser.add_argument("-o", "--out", dest="out", default=None,
help="Output folder")
# RETRIEVING FLAGS
options = parser.parse_args()
if not options.input or not options.deFile or not options.out:
print("Hi there\nYour code seems to be missing some arguments")
parser.print_help()
exit()
out_dir = formatFolder(options.out, True)
inputFiles = options.input.split(',')
deFile = open(options.deFile).read().rstrip('\n').split('\n')
for stage in inputFiles:
SEgenes = [item.split('\t')[0] for item in open(stage).read().rstrip('\n').split('\n')]
diffexpSE = diffexp(deFile, SEgenes)
temp = pd.DataFrame(columns=list(diffexpSE))
last_col = []
for index,row in diffexpSE.iterrows():
if row['significant'] == 'yes' and float(row['log2(fold_change)']) > 0:
last_col.append('upreg in ' + row['sample_2'])
temp = temp.append(row, ignore_index=True)
elif row['significant'] == 'yes' and float(row['log2(fold_change)']) < 0:
last_col.append('downreg in ' + row['sample_2'])
temp = temp.append(row, ignore_index=True)
last_col_df = pd.DataFrame({'description': last_col})
tofile = pd.concat([temp, last_col_df], axis=1, ignore_index=True)
tofile.columns = list(temp)+list(last_col_df)
fname = out_dir+stage.split('/')[-1].split('.')[0]+'_SEgenes_diffexp.xls'
subprocess.call(['touch', fname])
| pd.DataFrame.to_csv(tofile, path_or_buf=fname, sep='\t', header=True, index=False, line_terminator='\n') | pandas.DataFrame.to_csv |
import pandas as pd
import numpy as np
import pickle
import shap
from lightgbm import LGBMClassifier
def get_new_prediction(bus_line, hour, month, day, bus_carrying_cap, city, temp, pressure, bus_age, total_rain):
'''
This function calculates new predictions for a given bus line, hour, month, day, bus carrying capacity, bus age
(years), city, temperature (degrees celcius), pressure (kPA) and rain (mm). Assumes that a file named
final_fitted.pickle is in the results/ml_model directory.
This is solely for use in the interactive report so the user can dynamically generate a graph
as needed by querying results from the model. Arguments are fed to this function via. user
selected input in the report.
Parameters:
bus_line: A str that represents one of the bus lines in the Greater Vancouver area.
hour: An integer 0-23 representing a particular hour of the day.
month: An integer 1-12 representing a particular month of the year.
day: A str (Mon, Tue, Wed, Thu, Fri, Sat, Sun) that represents a particular day
of the week.
bus_carrying_cap: An integer representing the carrying capacity of a bus.
city: A str representing the city of interest.
temp: A float representing the temperature in degrees celsius.
pressure: A float representing the atmospheric pressure in kPa
bus_age: An integer representing the bus age in years.
total_rain: A float representing the total rain in mm.
Returns:
dict
A dictionary with keys shap, predicted, and column_names containing the
SHAP scores (numpy array), predicted 0/1
scores (numpy array), and column names used in the model fit (list).
'''
shuttles = ["23", "31", "42", "68", "103", "105", "109", "131", "132", "146",
"147", "148", "157", "169", "170", "171", "172", "173", "174", "175", "180", "181",
"182", "184", "185", "186", "187", "189", "215", "227", "251", "252", "256", "262",
"280", "281", "282", "310", "322", "360", "361", "362", "363", "370", "371", "372",
"373", "412", "413", "414", "416", "560", "561", "562", "563", "564", "609", "614",
"616", "617", "618", "619", "719", "722", "733", "741", "743", "744", "745", "746", "748", "749"]
# The values that are held constant: just use the means/modes
new_data = pd.DataFrame({
'hour': pd.Series(hour, dtype='int'),
'day_of_week': pd.Series(day, dtype='str'),
'bus_age': pd.Series(bus_age, dtype='float'),
'bus_carry_capacity': pd.Series(bus_carrying_cap if bus_carrying_cap != "NA" else np.nan, dtype='float'),
'line_no': pd.Series(bus_line, dtype='str'),
'city': pd.Series(city, dtype='str'),
'pressure': pd.Series(pressure, dtype='float'),
'rel_hum': pd.Series(93, dtype='float'),
'elev': pd.Series(2.5, dtype='float'),
'temp': pd.Series(temp, dtype='float'),
'visib': pd.Series(48.3, dtype='float'),
'wind_dir': pd.Series(0, dtype='float'),
'wind_spd': pd.Series(2, dtype='float'),
'total_precip': | pd.Series(total_rain, dtype='float') | pandas.Series |
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
def mergeManagers(managers, gameLogs):
#Sum up doubled data
managers = managers.groupby(['yearID','playerID'], as_index=False)['Games','Wins','Losses'].sum()
#Get visiting managers
visitingManagers = gameLogs[['row','Date','Visiting team manager ID']]
visitingManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingManagers['Date'])).year-1
visitingManagers = pd.merge(visitingManagers, managers, left_on=['yearID','Visiting team manager ID'], right_on=['yearID','playerID'], how="left")
#Get home managers
homeManagers = gameLogs[['row','Date','Home team manager ID']]
homeManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homeManagers['Date'])).year-1
homeManagers = pd.merge(homeManagers, managers, left_on=['yearID','Home team manager ID'], right_on=['yearID','playerID'], how="left")
#Merge managers
homes = homeManagers[['row','Games','Wins','Losses']]
visitings = visitingManagers[['row','Games','Wins','Losses']]
return pd.merge(homes, visitings, on='row', suffixes=(' home manager',' visiting manager'))
def mergePitchings(pitchers, gameLogs):
#Get aggregators for doubled data
aggregators = {}
for column in pitchers.drop(columns=['yearID','playerID']).columns:
if column.find("average")>-1:
aggregators[column] = 'mean'
else:
aggregators[column] = 'sum'
#Aggregate doubled data
pitchers = pitchers.groupby(['yearID','playerID'], as_index=False).agg(aggregators)
#Get visiting pitchers
visitingPitchers = gameLogs[['row','Date','Visiting starting pitcher ID']]
visitingPitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingPitchers['Date'])).year-1
visitingPitchers = pd.merge(visitingPitchers, pitchers, left_on=['yearID','Visiting starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Get home pitchers
homePitchers = gameLogs[['row','Date','Home starting pitcher ID']]
homePitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homePitchers['Date'])).year-1
homePitchers = pd.merge(homePitchers, pitchers, left_on=['yearID','Home starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Merge pitchers
homes = homePitchers.drop(columns=['yearID','Home starting pitcher ID','playerID','Date'])
visitings = visitingPitchers.drop(columns=['yearID','Visiting starting pitcher ID','playerID','Date'])
return pd.merge(homes, visitings, on='row', suffixes=(' home pitcher',' visiting pitcher'))
def mergePeople(people, gameLogs):
#Encode people
people['bats right'] = (people['bats']=="R") | (people['bats']=="B")
people['bats left'] = (people['bats']=="L") | (people['bats']=="B")
people['throws right'] = people['throws']=="R"
people = people.drop(columns=['bats','throws'])
#Merge people
allPeople = []
for IDColumn in gameLogs.columns:
if IDColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','Date',IDColumn]], people, how="left", left_on=[IDColumn], right_on=['playerID'])
merged['age'] = (pd.to_datetime(merged['Date']) - pd.to_datetime(merged['birthdate'])) / np.timedelta64(1, 'Y')
newColumns = {"age":IDColumn.replace(" ID"," "+" age")}
for column in people.drop(columns=['playerID','birthdate']).columns:
newColumns[column] = IDColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPeople.append(merged[['row']+list(newColumns.values())])
mergedPeople = gameLogs['row']
for merSal in allPeople:
mergedPeople = pd.merge(mergedPeople, merSal, how="left", on='row')
return mergedPeople
def mergeTeams(teams, gameLogs):
#Encode team data
teams.loc[(teams['Division winner'] == 'N'), 'Division winner'] = 0
teams.loc[(teams['Division winner'] == 'Y'), 'Division winner'] = 1
teams.loc[(teams['League winner'] == 'N'), 'League winner'] = 0
teams.loc[(teams['League winner'] == 'Y'), 'League winner'] = 1
teams.loc[(teams['World series winner'] == 'N'), 'World series winner'] = 0
teams.loc[(teams['World series winner'] == 'Y'), 'World series winner'] = 1
teams.loc[(teams['Division'] == 'W'), 'Division'] = 0
teams.loc[(teams['Division'] == 'E'), 'Division'] = 1
teams.loc[(teams['Division'] == 'C'), 'Division'] = 2
teams['Pythagorean_expectation'] = (teams['Runs scored'] ** 1.83) / (teams['Runs scored'] ** 1.83 + teams['Opponents runs scored'] ** 1.83)
#Merge teams
mergedTeams = gameLogs[['row','Date','Visiting team','Home team']]
mergedTeams['Date'] = pd.to_datetime(mergedTeams['Date']).dt.year-1
mergedTeams = pd.merge(mergedTeams, teams, left_on=['Date', 'Visiting team'], right_on=['yearID', 'teamID'], how='left')
mergedTeams = pd.merge(mergedTeams, teams, left_on=['Date', 'Home team'], right_on=['yearID', 'teamID'], how='left', suffixes=[' visiting', ' home'])
return mergedTeams[['row', 'Division visiting', 'Rank visiting', 'Games visiting', 'Wins visiting', 'Losses visiting', 'Division winner visiting',
'League winner visiting', 'World series winner visiting', 'Runs scored visiting', 'At bats visiting',
'Hits by batters visiting', 'Doubles visiting', 'Triples visiting', 'Homeruns visiting', 'Walks visiting', 'Strikeouts visiting',
'Stolen bases visiting', 'Cought stealing visiting', 'Batters hit by pitch visiting', 'Sacrifice flies visiting',
'Opponents runs scored visiting', 'Earned runs allowed visiting', 'Earned runs average visiting', 'Shutouts visiting',
'Saves visiting', 'Hits allowed visiting', 'Homeruns allowed visiting', 'Walks allowed visiting',
'Strikeouts allowed visiting', 'Errors visiting', 'Double plays visiting', 'Fielding percentage visiting',
'Pythagorean_expectation visiting', 'Division home', 'Rank home', 'Games home', 'Wins home', 'Losses home',
'Division winner home', 'League winner home', 'World series winner home', 'Runs scored home',
'At bats home', 'Hits by batters home', 'Doubles home', 'Triples home', 'Homeruns home',
'Walks home', 'Strikeouts home', 'Stolen bases home', 'Cought stealing home',
'Batters hit by pitch home', 'Sacrifice flies home', 'Opponents runs scored home',
'Earned runs allowed home', 'Earned runs average home', 'Shutouts home', 'Saves home',
'Hits allowed home', 'Homeruns allowed home', 'Walks allowed home', 'Strikeouts allowed home',
'Errors home', 'Double plays home', 'Fielding percentage home', 'Pythagorean_expectation home']]
def createScorings(gameLogs):
scoreLogs = gameLogs[['row','Visiting team','Home team','Visiting score','Home score']]
scoreLogs['Home team win'] = scoreLogs['Home score']>scoreLogs['Visiting score']
scoreLogs['Home team odd'] = (scoreLogs['Home score'].replace(0,1))/(scoreLogs['Visiting score'].replace(0,1))
homeTeams = {}
for team in scoreLogs['Home team'].unique():
homeTeams[team] = scoreLogs[scoreLogs['Home team']==team]
vistTeams = {}
for team in scoreLogs['Visiting team'].unique():
vistTeams[team] = scoreLogs[scoreLogs['Visiting team']==team]
homeTVers = {}
for hTeam in homeTeams:
homeTeams[hTeam]['Home win ratio'] = homeTeams[hTeam].loc[:,'Home team win'].rolling(10).mean().shift(1)
homeTeams[hTeam]['Home score ratio'] = homeTeams[hTeam].loc[:,'Home score'].rolling(10).mean().shift(1)
homeTeams[hTeam]['Home odd ratio'] = homeTeams[hTeam].loc[:,'Home team odd'].rolling(10).mean().shift(1)
temp = homeTeams[hTeam]
versus = {}
for team in temp['Visiting team'].unique():
versus[team] = temp[temp['Visiting team']==team]
for vTeam in versus:
versus[vTeam]['Home versus win ratio'] = versus[vTeam].loc[:,'Home team win'].rolling(5).mean().shift(1)
versus[vTeam]['Home versus score ratio'] = versus[vTeam].loc[:,'Home score'].rolling(5).mean().shift(1)
versus[vTeam]['Home versus odd ratio'] = versus[vTeam].loc[:,'Home team odd'].rolling(5).mean().shift(1)
homeTVers[hTeam] = pd.concat(versus)
vistTVers = {}
for vTeam in vistTeams:
vistTeams[vTeam]['Visiting win ratio'] = (1-vistTeams[vTeam].loc[:,'Home team win']).rolling(10).mean().shift(1)
vistTeams[vTeam]['Visiting score ratio'] = vistTeams[vTeam].loc[:,'Visiting score'].rolling(10).mean().shift(1)
vistTeams[vTeam]['Visiting odd ratio'] = (1/vistTeams[vTeam].loc[:,'Home team odd']).rolling(10).mean().shift(1)
temp = vistTeams[vTeam]
versus = {}
for team in temp['Home team'].unique():
versus[team] = temp[temp['Home team']==team]
for hTeam in versus:
versus[hTeam]['Visiting versus win ratio'] = (1-versus[hTeam].loc[:,'Home team win']).rolling(5).mean().shift(1)
versus[hTeam]['Visiting versus score ratio'] = versus[hTeam].loc[:,'Visiting score'].rolling(5).mean().shift(1)
versus[hTeam]['Visiting versus odd ratio'] = (1/versus[hTeam].loc[:,'Home team odd']).rolling(5).mean().shift(1)
vistTVers[vTeam] = pd.concat(versus)
merged = pd.merge(pd.concat(vistTeams)[['row'
,'Visiting win ratio'
,'Visiting score ratio'
,'Visiting odd ratio']]
,pd.concat(homeTVers)[['row'
,'Home versus win ratio'
,'Home versus score ratio'
,'Home versus odd ratio']]
, on='row')
merged = pd.merge(pd.concat(vistTVers)[['row'
,'Visiting versus win ratio'
,'Visiting versus score ratio'
,'Visiting versus odd ratio']]
,merged, on='row')
merged = pd.merge(pd.concat(homeTeams)[['row'
,'Home win ratio'
,'Home score ratio'
,'Home odd ratio']]
,merged, on='row')
return pd.merge(scoreLogs[['row','Visiting score','Home score','Home team win','Home team odd']],merged, on='row').fillna(0)
def mergeFieldings(fieldings, gameLogs):
fieldings = fieldings.groupby(['yearID','playerID'], as_index=False).sum()
gameLogs['yearID'] = pd.DatetimeIndex(pd.to_datetime(gameLogs['Date'])).year-1
allPlayers = []
for playerColumn in gameLogs.columns:
if playerColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','yearID',playerColumn]], fieldings, how="left", left_on=[playerColumn,'yearID'], right_on=['playerID','yearID'])
newColumns = {}
for column in fieldings.drop(columns=['playerID','yearID']).columns:
newColumns[column] = playerColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPlayers.append(merged[['row']+list(newColumns.values())])
mergedFieldings = gameLogs['row']
for playerData in allPlayers:
mergedFieldings = pd.merge(mergedFieldings, playerData, how="left", on='row')
return mergedFieldings
def mergeBattings(battings, gameLogs):
battings = battings.groupby(['yearID','playerID'], as_index=False).sum()
gameLogs['yearID'] = pd.DatetimeIndex(pd.to_datetime(gameLogs['Date'])).year-1
allPlayers = []
for playerColumn in gameLogs.columns:
if playerColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','yearID',playerColumn]], battings, how="left", left_on=[playerColumn,'yearID'], right_on=['playerID','yearID'])
newColumns = {}
for column in battings.drop(columns=['playerID','yearID']).columns:
newColumns[column] = playerColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPlayers.append(merged[['row']+list(newColumns.values())])
mergedBattings = gameLogs['row']
for playerData in allPlayers:
mergedBattings = pd.merge(mergedBattings, playerData, how="left", on='row')
return mergedBattings
path = Path
gameLogs = pd.read_csv(path+r'\Filtered\_mlb_filtered_GameLogs.csv', index_col=False)
people = pd.read_csv(path+r'\Filtered\_mlb_filtered_People.csv', index_col=False)
teams = pd.read_csv(path+r'\Filtered\_mlb_filtered_Teams.csv', index_col=False)
managers = pd.read_csv(path+r'\Filtered\_mlb_filtered_Managers.csv', index_col=False)
pitchings = pd.read_csv(path+r'\Filtered\_mlb_filtered_Pitching.csv', index_col=False)
battings = pd.read_csv(path+r'\Filtered\_mlb_filtered_Batting.csv', index_col=False)
fieldings = | pd.read_csv(path+r'\Filtered\_mlb_filtered_Fielding.csv', index_col=False) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[14]:
# Load libraries
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# In[2]:
# Work on 'members' dataset
MEMBERS_FILEPATH = 'data/members_v3.csv'
members = pd.read_csv(MEMBERS_FILEPATH, header=0)
# In[3]:
members.info()
display(members.head())
display(members.isnull().sum())
# In[4]:
# Cast registration_init_time to datetime
members['registration_init_time'] = pd.to_datetime(
members['registration_init_time'], format='%Y%m%d'
)
# Day should be 'relative' to some 0-coordinate
min_date = members['registration_init_time'].min()
members['registration_init_time'] -= min_date
members['registration_init_time'] = members['registration_init_time'].dt.days
# In[5]:
# Fix 'gender' feature
members['gender'] = members['gender'].fillna('NoGender')
# Encode the genders
members['gender'] = members['gender'].map({
'NoGender': 1, 'male': 2, 'female':3
})
# In[6]:
# Bin "registered_via" feature values
members['registered_via'].replace(
[1, 2, 5, 6, 8, 10, 11, 13, 14, 16, 17, 18, 19, -1],
1,
inplace = True
)
# In[7]:
# Drop redundant features
members = members.drop( ['city', 'bd'], axis=1 )
# In[9]:
display(members.head())
# In[10]:
# Work on 'transactions' dataset
TRANSACTIONS_FILEPATH = 'data/transactions_v2.csv'
transactions = | pd.read_csv(TRANSACTIONS_FILEPATH, header=0) | pandas.read_csv |
############################################################################################
# FileName [ mutational_sig.py ]
# PackageName [ lib/analysis ]
# Synopsis [ Implement mutational signature analysis. ]
# Author [ <NAME> ]
# Copyright [ 2021 9 ]
############################################################################################
from numpy.core.numeric import outer
from ..maf_filter import fast_read_maf
from termcolor import colored
import pandas as pd
import numpy as np
import math
import os
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.ticker as mtick
import matplotlib.style
import matplotlib
import sys
from scipy import linalg
COLOR_MAP = ['#266199','#b7d5ea','#acc6aa','#E0CADB','#695D73','#B88655','#DDDDDD','#71a0a5','#841D22','#E08B69']
LABEL_SIZE, TITLE_SIZE = 24,30
#########################################################
# #
# python3 mafAnalysis.py \ #
# -f examples/test_data/maf/ms.maf \ #
# -ms 0 "[SBS1, SBS5, SBS40, SBS87]" \ #
# -o examples/output \ #
# -p examples/pic/ #
# #
# #
# python3 mafAnalysis.py \ #
# -f examples/test_data/maf/ms.maf \ #
# -ms 1 "[2,9,10]" \ #
# -o examples/output \ #
# -p examples/pic/ #
# #
# #
# python3 mafAnalysis.py \ #
# -f examples/test_data/maf/ms.maf \ #
# -ms 2 "[3]" \ #
# -o examples/output \ #
# -p examples/pic/ #
# #
#########################################################
class MutationalSignature:
'''Mutational signature
Arguments:
maf_file {string} -- The input MAF file for all data.
output_folder {string} -- The path for output files.
pic {string} -- The path especially for output figures(.pdf)
rank1, rank2 {int} -- The range for estimate # signature.
epoch {int} -- # estimation running.
sig {int} -- The final factorization rank(# signature)
Parameters:
self.head {string} -- The column names of MAF file.
self.df {pd.DataFrame} -- The data for the MAF file.
self.cosmic {pd.DataFrame} -- The data for 'lib/auxiliary/COSMIC_72.tsv'.
self.contribution {pd.DataFrame} -- The data for signature refitting.
self.reconstructed {pd.DataFrame} -- The data for signature refitting.
self.input {string} -- The input file for plotting.
self.params {list} -- The list for input parameters.
Output files
ms_input.tsv
96_sig.csv
sig_sample.csv
SBS.tsv
Pictures:
Estimation.pdf
SBS_96_plots.pdf
S2S.pdf
SigContribution.pdf
SigSamHeatmap.pdf
Donut_plot.pdf
'''
def __init__(self, maf_file):
print(colored(('\nStart Mutational_Signature....'), 'yellow'))
self.head, self.df = fast_read_maf(maf_file)
self.cosmic = pd.read_csv('lib/auxiliary/COSMIC_72.tsv', sep = '\t', index_col = 0)
self.contribution, self.reconstructed = pd.DataFrame(), pd.DataFrame()
self.input = ""
self.params = list()
def get_input_file(self, output_folder):
output_file = output_folder+'ms_input.tsv'
self.input = output_file
selected_col = self.df[['Tumor_Sample_Barcode','flanking_bps', 'Reference_Allele', 'Tumor_Seq_Allele2']]
selected_col.columns = ['SampleID', 'Three_Allele', 'Ref', 'Mut']
sample_list = selected_col.SampleID.unique()
grouped = selected_col.groupby(selected_col['SampleID'])
df_list = [grouped.get_group(sample).reset_index(drop=True) for sample in sample_list]
final_dict = {}
for d, df in enumerate(df_list):
# order: 'C>A','C>G','C>T','T>A','T>C','T>G'
cata_list = [[],[],[],[],[],[]]
for i in range(len(df)):
item = df.loc[i]
if (item['Ref'] == 'C' and item['Mut'] == 'A') or (item['Ref'] == 'G' and item['Mut'] == 'T'):
cata_list[0].append(item)
elif (item['Ref'] == 'C' and item['Mut'] == 'G') or (item['Ref'] == 'G' and item['Mut'] == 'C'):
cata_list[1].append(item)
elif (item['Ref'] == 'C' and item['Mut'] == 'T') or (item['Ref'] == 'G' and item['Mut'] == 'A'):
cata_list[2].append(item)
elif (item['Ref'] == 'T' and item['Mut'] == 'A') or (item['Ref'] == 'A' and item['Mut'] == 'T'):
cata_list[3].append(item)
elif (item['Ref'] == 'T' and item['Mut'] == 'C') or (item['Ref'] == 'A' and item['Mut'] == 'G'):
cata_list[4].append(item)
elif (item['Ref'] == 'T' and item['Mut'] == 'G') or (item['Ref'] == 'A' and item['Mut'] == 'C'):
cata_list[5].append(item)
list_96 = []
for cata in range(len(cata_list)):
cata_sum_list = [int(0)]*16
if cata in [0,1,2]:
three_allele_dict={'ACA':0, 'TGT':0, 'ACC':1, 'GGT':1, 'ACG':2, 'CGT':2, 'ACT':3, 'AGT':3, \
'CCA':4, 'TGG':4, 'CCC':5, 'GGG':5, 'CCG':6, 'CGG':6, 'CCT':7, 'AGG':7, \
'GCA':8, 'TGC':8, 'GCC':9, 'GGC':9, 'GCG':10, 'CGC':10, 'GCT':11, 'AGC':11,\
'TCA':12, 'TGA':12, 'TCC':13, 'GGA':13, 'TCG':14, 'CGA':14, 'TCT':15, 'AGA':15 }
elif cata in [3,4,5]:
three_allele_dict={'ATA':0, 'TAT':0, 'ATC':1, 'GAT':1, 'ATG':2, 'CAT':2, 'ATT':3, 'AAT':3, \
'CTA':4, 'TAG':4, 'CTC':5, 'GAG':5, 'CTG':6, 'CAG':6, 'CTT':7, 'AAG':7, \
'GTA':8, 'TAC':8, 'GTC':9, 'GAC':9, 'GTG':10, 'CAC':10, 'GTT':11, 'AAC':11,\
'TTA':12, 'TAA':12, 'TTC':13, 'GAA':13, 'TTG':14, 'CAA':14, 'TTT':15, 'AAA':15 }
for j in range(len(cata_list[cata])):
if (cata_list[cata][j])['Three_Allele'] in three_allele_dict:
cata_sum_list[three_allele_dict[(cata_list[cata][j])['Three_Allele']]] += 1;
list_96 += cata_sum_list
final_dict[sample_list[d]] = list_96
new_df = pd.DataFrame.from_dict(final_dict)
list_a = ['A.A', 'A.C', 'A.G', 'A.T', 'C.A', 'C.C', 'C.G', 'C.T',\
'G.A', 'G.C', 'G.G', 'G.T', 'T.A', 'T.C', 'T.G', 'T.T']
list_b = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G']
new_row_name = []
for item in list_b:
for allele in list_a:
new_str = allele[0]+'['+item+']'+allele[2]
new_row_name.append(new_str)
new_df.index = new_row_name
new_df.to_csv(output_file, sep = '\t', index = True)
print(colored('=> Generate input file: ', 'green'))
print(colored((' '+output_file), 'green'))
# def SBSPlot():
# df = (pd.read_csv(output_folder+'96_sig.csv'))
# df = df.set_index(list(df.columns[[0]]))
# fig_x = tuple([ ' '+i[0]+' '+i[6] for i in list(df.index)])
# y_pos = np.arange(len(fig_x))
# fig_name = list(df.columns)
# fig, axes = plt.subplots(df.shape[1], 1, figsize=(12,2*df.shape[1]))#
# if df.shape[1] == 1:
# return
# for r in range(df.shape[1]):
# color_set = ['#02bdee', '#010101','#e32925','#cac9c9', '#a1cf63', '#ecc7c4']
# color_96 = [ c for c in color_set for i in range(16)]
# all_data = df.iloc[:, r]
# all_data /= (all_data.sum())
# maximum = max(all_data)*1.25
# data_list = all_data.tolist()
# axes[r].text(0.01, 0.86, fig_name[r], horizontalalignment='left',verticalalignment='center', transform=axes[r].transAxes, fontweight='bold')
# axes[r].bar(y_pos, data_list, color=color_96, width=0.4)
# axes[r].spines['bottom'].set_color('#cac9c9')
# axes[r].spines['top'].set_color('#cac9c9')
# axes[r].spines['right'].set_color('#cac9c9')
# axes[r].spines['left'].set_color('#cac9c9')
# if r != df.shape[1]-1:
# axes[r].xaxis.set_visible(False)
# axes[r].set_xticklabels([])
# axes[r].tick_params(axis='x',length=0)
# axes[r].set_xlim([-0.8,len(data_list)-.8])
# axes[r].tick_params(axis='y',direction='in', color='#cac9c9', labelsize=10)
# axes[r].set_ylabel('Percentage', fontweight='bold')
# axes[r].tick_params(axis='y', labelsize=10)
# axes[r].set_ylim(top = max(all_data)*1.25)
# axes[r].yaxis.set_major_locator(ticker.LinearLocator(5))
# axes[r].yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=1))
# for i in range(6):
# axes[r].add_patch(matplotlib.patches.Rectangle((0+16*i ,maximum*0.95), 15.6 , 0.01, color=color_set[i],transform=axes[r].transData))
# mut_list = ['C>A','C>G','C>T','T>A','T>C','T>G']
# for i in range(6):
# plt.text(0.19+0.13*i,0.916-df.shape[1]*0.0029, mut_list[i], horizontalalignment='center',verticalalignment='center',transform=plt.gcf().transFigure, fontweight='bold', fontsize=14)
# plt.xticks(y_pos, fig_x, color='#999999',rotation=90, fontsize=9,horizontalalignment='center',verticalalignment='top',fontname='monospace')#verticalalignment='bottom',
# space = 0.008075
# y_scale = [0.072, 0.084, 0.09, 0.094, 0.097, 0.0987, 0.1, 0.1013, 0.1023]
# for i in range(6):
# for j in range(16):
# if i < 3:
# plt.text((0.131+space*16*i)+space*j, y_scale[df.shape[1]-2], 'C',horizontalalignment='center',verticalalignment='center',transform=plt.gcf().transFigure, color=color_set[i], fontsize=9, rotation=90,fontname='monospace', fontweight='bold')
# else:
# plt.text((0.131+space*16*i)+space*j, y_scale[df.shape[1]-2], 'T',horizontalalignment='center',verticalalignment='center',transform=plt.gcf().transFigure, color=color_set[i], fontsize=9, rotation=90,fontname='monospace', fontweight='bold')
# plt.savefig(pic+'SBS_96_plots.pdf',dpi=300, bbox_inches='tight')
# print(colored(('=> Generate SBS Plot: '+pic+'SBS_96_plots.pdf'), 'green'))
# def CosineSimilarity():
# from sklearn.metrics.pairwise import cosine_similarity
# my_file, aux_file = output_folder+'96_sig.csv', 'lib/auxiliary/COSMIC_72.tsv'
# my_df, aux_df = pd.read_csv(my_file, index_col=0), pd.read_csv(aux_file, sep='\t',index_col=0)
# my_list, aux_list = my_df.columns, aux_df.columns
# X = np.array(my_df.T.to_numpy())
# Y = np.array(aux_df.T.to_numpy())
# M = cosine_similarity(X, Y, dense_output=True)
# Mdf= pd.DataFrame(M)
# Mdf.index, Mdf.columns = my_list, aux_list
# Mdf.to_csv(output_folder+'SBS.tsv', sep='\t')
# print(colored('=> Generate file: ', 'green'))
# print(colored((' '+output_folder+'SBS.tsv'), 'green'))
# height, length = len(my_list), len(aux_list)
# sns.set(font_scale=2)
# sns.set_style('white')
# grid_kws = {'height_ratios': (.9, .2),'hspace': 0.3}
# f, (ax, cbar_ax) = plt.subplots(2,figsize=(20,6), gridspec_kw=grid_kws)
# ax = sns.heatmap(M, vmin=0, vmax=1, xticklabels =aux_list, yticklabels = my_list, square=False, linewidth=1, cbar_ax=cbar_ax,ax=ax,
# cmap='Blues',cbar_kws={'orientation': 'horizontal','shrink':1, 'aspect':70})
# # ax.set_title('Cosine Similarity',fontsize=TITLE_SIZE,weight='bold',pad=0,verticalalignment='bottom')
# ax.set_xticklabels(ax.get_xticklabels(),rotation=90, horizontalalignment='center', fontsize=LABEL_SIZE-6, color='#222222')
# ax.tick_params(axis='both',length=0)
# ax.set_yticklabels(ax.get_yticklabels(), fontsize=LABEL_SIZE-6,color='#222222',verticalalignment='center')
# plt.ylim(bottom=0, top=height+0.5)
# plt.savefig(pic+'S2S.pdf',dpi=300,bbox_inches='tight')
# plt.clf()
# print(colored(('=> Generate Cosine Similarity Plot: '+pic+'S2S.pdf'), 'green'))
# def SigDistribution():
# df = pd.read_csv(output_folder+'sig_sample.csv', index_col=0)
# sample_list, sig_list = list(df.columns),list(df.index)
# SUM = (df.sum(axis = 0, skipna = True)).tolist()
# df = df/SUM
# dft = df.T
# # dft.columns = ['sample']+dft.columns
# dft.to_csv(output_folder+'SigContribution.tsv',index_label='sample', sep='\t')
# print(colored((' '+output_folder+'SigContribution.tsv'), 'green'))
# ind = np.arange(df.shape[1])
# data = []
# for i in range(df.shape[0]):
# d = tuple(df.iloc[i].tolist())
# data.append(d)
# fig = plt.figure(figsize=(10, 5))
# ax = fig.add_axes([0,0,1,1])
# for i in range(len(data)):
# if i == 0:
# ax.bar(ind, data[i], 0.8, color = COLOR_MAP[i])
# else:
# b = np.array(data[0])
# for k in range(1,i):
# b = b+np.array(data[k])
# ax.bar(ind, data[i], 0.8, bottom=b,color = COLOR_MAP[i])
# # ax.set_title('Relative Contribution',fontsize=TITLE_SIZE, fontweight='bold')
# ax.spines['bottom'].set_color('#cac9c9')
# ax.spines['top'].set_color('#FFFFFF')
# ax.spines['right'].set_color('#FFFFFF')
# ax.spines['left'].set_color('#cac9c9')
# ax.set_xlim([-1,len(ind)])
# ax.tick_params(axis='y',direction='in', color='#cac9c9', labelsize=LABEL_SIZE-4)
# ax.tick_params(axis='x',direction='in', length=0)
# ax.xaxis.set_visible(False)
# ax.set_yticks(np.arange(0, 1+0.1, 0.25))
# ax.legend(title='',labels=sig_list,loc='lower center',ncol=3, fontsize=LABEL_SIZE-4, edgecolor='white',
# labelspacing=0.5, bbox_to_anchor=(0.5, (-0.1-(math.ceil(len(sig_list)/3)*0.065))))
# plt.savefig(pic+'SigContribution.pdf', dpi=300,bbox_inches='tight')
# print(colored(('=> Generate Bar Plot: ' + pic+'SigContribution.pdf'), 'green'))
# height, length = len(sig_list), len(sample_list)
# h_data = np.array(df.to_numpy())
# sns.set(font_scale=2)
# f,ax = plt.subplots(figsize=(9+length/20,2+height*0.3))
# ax = sns.heatmap(data, vmin=0, vmax=1, yticklabels = sig_list, linewidths=1,
# square=False, cmap='Blues',cbar_kws={'orientation': 'horizontal','shrink':1, 'aspect':50})
# # ax.set_title('Signature Sample Heatmap', fontsize=TITLE_SIZE,weight='bold',va='bottom')
# ax.xaxis.set_visible(False)
# ax.set_xticklabels([])
# ax.tick_params(axis='both',length=0)
# ax.set_yticklabels(ax.get_yticklabels(), fontsize=LABEL_SIZE-4,color='#222222')
# plt.savefig(pic+'SigSamHeatmap.pdf',dpi=300,bbox_inches='tight')
# print(colored(('=> Generate Heatmap: '+pic+'SigSamHeatmap.pdf\n'), 'green'))
# def DonutPlot():
# df = pd.read_csv(output_folder+'sig_sample.csv', index_col=0)
# raw_data = df.sum(axis=1)/df.shape[1]
# SUM = raw_data.sum(axis=0)
# raw_data = raw_data/SUM
# names, sizes = list(raw_data.index), list(raw_data.iloc[:])
# names = [names[i]+': '+'{:.1%}'.format(sizes[i]) for i in range(len(sizes))]
# fig, ax = plt.subplots(figsize=(6, 8), subplot_kw=dict(aspect='equal'))
# wedges, texts = ax.pie(sizes, colors=COLOR_MAP[:len(names)],wedgeprops=dict(width=0.6,edgecolor='w',linewidth=2), startangle=-40) #,normalize=False
# bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0)
# kw = dict(arrowprops=dict(arrowstyle='-'),bbox=bbox_props, zorder=0, va='center')
# for i, p in enumerate(wedges):
# ang = (p.theta2 - p.theta1)/2. + p.theta1
# y = np.sin(np.deg2rad(ang))
# x = np.cos(np.deg2rad(ang))
# horizontalalignment = {-1: 'right', 1: 'left'}[int(np.sign(x))]
# connectionstyle = 'angle,angleA=0,angleB={}'.format(ang)
# kw['arrowprops'].update({'connectionstyle': connectionstyle})
# ax.annotate(names[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),horizontalalignment=horizontalalignment, **kw, fontsize=LABEL_SIZE)
# plt.savefig(pic+'Donut_plot.pdf', dpi=300, bbox_inches='tight')
# print(colored(('=> Generate Donut Plot: '+pic+'Donut_plot.pdf'), 'green'))
# nmf()
# SBSPlot()
# DonutPlot()
# CosineSimilarity()
# SigDistribution()
# 0
def sig_refitting(self):
print(colored('# Signature refitting...', 'yellow'))
def lsqnonneg(y, signatures):
def msize(x, dim):
s = x.shape
if dim >= len(s):
return 1
else:
return s[dim]
d, C = y, signatures
(m, n) = C.shape
tol = 10 * sys.float_info.epsilon * linalg.norm(C, ord=2) * (max(n, m)+1)
P, Z, x = np.zeros(n), np.arange(1, n+1), np.zeros(n)
ZZ = Z
resid = d - np.dot(C, x)
w = np.dot(C.T, resid)
outeriter, it = 0, 0
itmax = 3*n
while np.any(Z) and np.any(w[ZZ-1] > tol):
outeriter += 1
t = w[ZZ-1].argmax()
t = ZZ[t]
P[t-1], Z[t-1] = t, 0
PP, ZZ = np.where(P != 0)[0]+1, np.where(Z != 0)[0]+1
CP = np.zeros(C.shape)
CP[:, PP-1] = C.iloc[:, PP-1]
CP[:, ZZ-1] = np.zeros((m, msize(ZZ, 1)))
z = np.dot(np.linalg.pinv(CP), d)
z[ZZ-1] = np.zeros((msize(ZZ,1), msize(ZZ,0)))
while np.any(z[PP-1] <= tol):
it += 1
if it >= itmax:
max_error = z[PP-1].max()
raise Exception('Exiting: Iteration count (=%d) exceeded\n Try raising the tolerance tol. (max_error=%d)' % (it, max_error))
QQ = np.where((z <= tol) & (P != 0))[0]
alpha = min(x[QQ]/(x[QQ] - z[QQ]))
x = x + alpha*(z-x)
ij = np.where((abs(x) < tol) & (P != 0))[0]+1
Z[ij-1] = ij
P[ij-1] = np.zeros(max(ij.shape))
PP, ZZ= np.where(P != 0)[0]+1, np.where(Z != 0)[0]+1
CP[:, PP-1] = C.iloc[:, PP-1]
CP[:, ZZ-1] = np.zeros((m, msize(ZZ, 1)))
z = np.dot(np.linalg.pinv(CP), d)
z[ZZ-1] = np.zeros((msize(ZZ,1), msize(ZZ,0)))
x = z
resid = d - np.dot(C, x)
w = np.dot(C.T, resid)
return(x, sum(resid * resid), resid)
mut_matrix = pd.read_csv(self.input, sep = '\t', index_col = 0)
n_feature, n_samples = mut_matrix.shape[0], mut_matrix.shape[1]
n_signatures = (self.cosmic).shape[1]
lsq_contribution = pd.DataFrame(index=range(n_signatures),columns=range(n_samples))
lsq_reconstructed = pd.DataFrame(index=range(n_feature),columns=range(n_samples))
for i in range(n_samples):
y = mut_matrix.iloc[:,i]
lsq = lsqnonneg(y, self.cosmic)
lsq_contribution.iloc[:, i] = lsq[0]
lsq_reconstructed.iloc[:, i] = np.dot(self.cosmic, lsq[0])
lsq_contribution.columns = mut_matrix.columns
lsq_contribution.index = (self.cosmic).columns
lsq_reconstructed.columns = mut_matrix.columns
lsq_reconstructed.index = (self.cosmic).index
self.contribution = lsq_contribution
self.reconstructed = lsq_reconstructed
# 1
def estimation(self, output_folder, pic, rank1, rank2, epoch):
os.system('git clone https://github.com/mims-harvard/nimfa.git\n')
os.chdir('nimfa')
os.system('python3 setup.py install --user')
code = open('nimfa.py', 'w')
code.write("import nimfa\nfrom collections import defaultdict, Counter\nimport urllib\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom sklearn import preprocessing\nimport scipy.cluster.hierarchy as sch\nimport pandas as pd\n")
code.write("df = (pd.read_csv(\"../" + output_folder + "ms_input.tsv\", sep=\"\t\")).T\n")
code.write("data = (df.to_numpy())[1:]\n")
code.write("rank_cands = range("+str(rank1)+","+ str(rank2)+", 1)\n")
code.write("snmf = nimfa.Snmf(data, seed='random_vcol', max_iter=100)\n")
code.write("summary = snmf.estimate_rank(rank_range=rank_cands, n_run="+str(epoch)+", what='all')\n")
code.write("rss = [summary[rank]['rss'] for rank in rank_cands]\n")
code.write("coph = [summary[rank]['cophenetic'] for rank in rank_cands]\n")
code.write("disp = [summary[rank]['dispersion'] for rank in rank_cands]\n")
code.write("spar = [summary[rank]['sparseness'] for rank in rank_cands]\n")
code.write("spar_w, spar_h = zip(*spar)\n")
code.write("evar = [summary[rank]['evar'] for rank in rank_cands]\n")
code.write("fig, axs = plt.subplots(2, 3, figsize=(12,8))\n")
code.write("axs[0,0].plot(rank_cands, rss, 'o-', color='#266199', label='RSS', linewidth=3)\n")
code.write("axs[0,0].set_title('RSS', fontsize=16,fontweight='bold')\n")
code.write("axs[0,0].tick_params(axis='both', labelsize=12)\n")
code.write("axs[0,0].set_xticks(np.arange("+str(rank1)+", "+str(rank2)+", 1))\n")
code.write("axs[0,1].plot(rank_cands, coph, 'o-', color='#695D73', label='Cophenetic correlation', linewidth=3)\n")
code.write("axs[0,1].set_title('Cophenetic', fontsize=16,fontweight='bold')\n")
code.write("axs[0,1].tick_params(axis='both', labelsize=12)\n")
code.write("axs[0,1].set_xticks(np.arange("+str(rank1)+", "+str(rank2)+", 1))\n")
code.write("axs[0,2].plot(rank_cands, disp,'o-', color='#71a0a5', label='Dispersion', linewidth=3)\n")
code.write("axs[0,2].set_title('Dispersion', fontsize=16,fontweight='bold')\n")
code.write("axs[0,2].tick_params(axis='both', labelsize=12)\n")
code.write("axs[0,2].set_xticks(np.arange("+str(rank1)+", "+str(rank2)+", 1))\n")
code.write("axs[1,0].plot(rank_cands, spar_w, 'o-', color='#B88655', label='Sparsity (Basis)', linewidth=3)\n")
code.write("axs[1,0].set_title('Sparsity (Basis)', fontsize=16,fontweight='bold')\n")
code.write("axs[1,0].tick_params(axis='both', labelsize=12)\n")
code.write("axs[1,0].set_xticks(np.arange("+str(rank1)+", "+str(rank2)+", 1))\n")
code.write("axs[1,1].plot(rank_cands, spar_h, 'o-', color='#E08B69', label='Sparsity (Mixture)', linewidth=3)\n")
code.write("axs[1,1].set_title('Sparsity (Mixture)', fontsize=16,fontweight='bold')\n")
code.write("axs[1,1].tick_params(axis='both', labelsize=12)\n")
code.write("axs[1,1].set_xticks(np.arange("+str(rank1)+", "+str(rank2)+", 1))\n")
code.write("axs[1,2].plot(rank_cands, evar, 'o-', color='#841D22', label='Explained variance', linewidth=3)\n")
code.write("axs[1,2].set_title('Explained variance', fontsize=16,fontweight='bold')\n")
code.write("axs[1,2].tick_params(axis='both', labelsize=12)\n")
code.write("axs[1,2].set_xticks(np.arange("+str(rank1)+", "+str(rank2)+", 1))\n")
code.write("fig.tight_layout(pad=1.0)\n")
code.write("plt.savefig(\"../"+pic+"Estimation.pdf\",dpi=300,bbox_inches = 'tight')\n")
code.close()
print(colored(('\nStart Estimation (may need a few minutes)....'), 'yellow'))
p = os.popen('python3 nimfa.py\n')
x = p.read()
print(x)
p.close()
print(colored('=> Generate estimation figure: ', 'green'))
print(colored((' '+pic+'Estimation.pdf\n'), 'green'))
os.chdir('..')
os.system('rm -rf nimfa\n')
def getParams(self, params):
self.params = params = params.replace('[', '').replace(']', '').replace(' ', '').split(',')
def SBSplot(self, input, pic):
df = input
if len(self.params) != 0:
df = df[self.params]
fig_x = tuple([ ' '+i[0]+' '+i[6] for i in list(df.index)])
y_pos = np.arange(len(fig_x))
fig_name = list(df.columns)
fig, axes = plt.subplots(df.shape[1], 1, figsize=(12,2*df.shape[1]))#
if df.shape[1] == 1:
return
for r in range(df.shape[1]):
color_set = ['#02bdee', '#010101','#e32925','#cac9c9', '#a1cf63', '#ecc7c4']
color_96 = [ c for c in color_set for i in range(16)]
all_data = df.iloc[:, r]
all_data /= (all_data.sum())
maximum = max(all_data)*1.25
data_list = all_data.tolist()
axes[r].text(0.01, 0.86, fig_name[r], horizontalalignment='left',verticalalignment='center', transform=axes[r].transAxes, fontweight='bold')
axes[r].bar(y_pos, data_list, color=color_96, width=0.4)
axes[r].spines['bottom'].set_color('#cac9c9')
axes[r].spines['top'].set_color('#cac9c9')
axes[r].spines['right'].set_color('#cac9c9')
axes[r].spines['left'].set_color('#cac9c9')
if r != df.shape[1]-1:
axes[r].xaxis.set_visible(False)
axes[r].set_xticklabels([])
axes[r].tick_params(axis='x',length=0)
axes[r].set_xlim([-0.8,len(data_list)-.8])
axes[r].tick_params(axis='y',direction='in', color='#cac9c9', labelsize=10)
axes[r].set_ylabel('Percentage', fontweight='bold')
axes[r].tick_params(axis='y', labelsize=10)
axes[r].set_ylim(top = max(all_data)*1.25)
axes[r].yaxis.set_major_locator(ticker.LinearLocator(5))
axes[r].yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=1))
for i in range(6):
axes[r].add_patch(matplotlib.patches.Rectangle((0+16*i ,maximum*0.95), 15.6 , 0.01, color=color_set[i],transform=axes[r].transData))
mut_list = ['C>A','C>G','C>T','T>A','T>C','T>G']
for i in range(6):
plt.text(0.19+0.13*i,0.916-df.shape[1]*0.0029, mut_list[i], horizontalalignment='center',verticalalignment='center',transform=plt.gcf().transFigure, fontweight='bold', fontsize=14)
plt.xticks(y_pos, fig_x, color='#999999',rotation=90, fontsize=9,horizontalalignment='center',verticalalignment='top',fontname='monospace')#verticalalignment='bottom',
space = 0.008075
y_scale = [0.072, 0.084, 0.09, 0.094, 0.097, 0.0987, 0.1, 0.1013, 0.1023]
for i in range(6):
for j in range(16):
if i < 3:
plt.text((0.131+space*16*i)+space*j, y_scale[df.shape[1]-2], 'C',horizontalalignment='center',verticalalignment='center',transform=plt.gcf().transFigure, color=color_set[i], fontsize=9, rotation=90,fontname='monospace', fontweight='bold')
else:
plt.text((0.131+space*16*i)+space*j, y_scale[df.shape[1]-2], 'T',horizontalalignment='center',verticalalignment='center',transform=plt.gcf().transFigure, color=color_set[i], fontsize=9, rotation=90,fontname='monospace', fontweight='bold')
plt.savefig(pic+'SBS_96_plots.pdf',dpi=300, bbox_inches='tight')
print(colored(('=> Generate SBS Plot: '+pic+'SBS_96_plots.pdf'), 'green'))
def CosineSimilarity(self, input, output_folder, pic):
from sklearn.metrics.pairwise import cosine_similarity
# my_file, aux_file = output_folder+'96_sig.csv', 'lib/auxiliary/COSMIC_72.tsv'
my_df, aux_df = input, self.cosmic
my_list, aux_list = my_df.columns, aux_df.columns
X = np.array(my_df.T.to_numpy())
Y = np.array(aux_df.T.to_numpy())
M = cosine_similarity(X, Y, dense_output=True)
Mdf= pd.DataFrame(M)
Mdf.index, Mdf.columns = my_list, aux_list
Mdf.to_csv(output_folder+'SBS.tsv', sep='\t')
print(colored('=> Generate file: ', 'green'))
print(colored((' '+output_folder+'SBS.tsv'), 'green'))
height, length = len(my_list), len(aux_list)
sns.set(font_scale=2)
sns.set_style('white')
grid_kws = {'height_ratios': (.9, .2),'hspace': 0.3}
f, (ax, cbar_ax) = plt.subplots(2,figsize=(20,6), gridspec_kw=grid_kws)
ax = sns.heatmap(M, vmin=0, vmax=1, xticklabels =aux_list, yticklabels = my_list, square=False, linewidth=1, cbar_ax=cbar_ax,ax=ax,
cmap='Blues',cbar_kws={'orientation': 'horizontal','shrink':1, 'aspect':70})
# ax.set_title('Cosine Similarity',fontsize=TITLE_SIZE,weight='bold',pad=0,verticalalignment='bottom')
ax.set_xticklabels(ax.get_xticklabels(),rotation=90, horizontalalignment='center', fontsize=LABEL_SIZE-6, color='#222222')
ax.tick_params(axis='both',length=0)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=LABEL_SIZE-6,color='#222222',verticalalignment='center')
plt.ylim(bottom=0, top=height+0.5)
plt.savefig(pic+'S2S.pdf',dpi=300,bbox_inches='tight')
plt.clf()
print(colored(('=> Generate Cosine Similarity Plot: '+pic+'S2S.pdf'), 'green'))
def SigDistribution(self, input, output_folder, pic):
df = input.loc[self.params,:] if len(self.params) != 0 else input
sample_list, sig_list = list(df.columns),list(df.index)
SUM = (df.sum(axis = 0, skipna = True)).tolist()
df = df/SUM
dft = df.T
# dft.columns = ['sample']+dft.columns
dft.to_csv(output_folder+'SigContribution.tsv',index_label='sample', sep='\t')
print(colored((' '+output_folder+'SigContribution.tsv'), 'green'))
ind = np.arange(df.shape[1])
data = []
for i in range(df.shape[0]):
d = tuple(df.iloc[i].tolist())
data.append(d)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_axes([0,0,1,1])
for i in range(len(data)):
if i == 0:
ax.bar(ind, data[i], 0.8, color = COLOR_MAP[i])
else:
b = np.array(data[0])
for k in range(1,i):
b = b+np.array(data[k])
ax.bar(ind, data[i], 0.8, bottom=b,color = COLOR_MAP[i])
# ax.set_title('Relative Contribution',fontsize=TITLE_SIZE, fontweight='bold')
ax.spines['bottom'].set_color('#cac9c9')
ax.spines['top'].set_color('#FFFFFF')
ax.spines['right'].set_color('#FFFFFF')
ax.spines['left'].set_color('#cac9c9')
ax.set_xlim([-1,len(ind)])
ax.tick_params(axis='y',direction='in', color='#cac9c9', labelsize=LABEL_SIZE-4)
ax.tick_params(axis='x',direction='in', length=0)
ax.xaxis.set_visible(False)
ax.set_yticks(np.arange(0, 1+0.1, 0.25))
ax.legend(title='',labels=sig_list,loc='lower center',ncol=3, fontsize=LABEL_SIZE-4, edgecolor='white',
labelspacing=0.5, bbox_to_anchor=(0.5, (-0.1-(math.ceil(len(sig_list)/3)*0.065))))
plt.savefig(pic+'SigContribution.pdf', dpi=300,bbox_inches='tight')
print(colored(('=> Generate Bar Plot: ' + pic+'SigContribution.pdf'), 'green'))
height, length = len(sig_list), len(sample_list)
h_data = np.array(df.to_numpy())
sns.set(font_scale=2)
f,ax = plt.subplots(figsize=(9+length/20,2+height*0.3))
ax = sns.heatmap(data, vmin=0, vmax=1, yticklabels = sig_list, linewidths=1,
square=False, cmap='Blues',cbar_kws={'orientation': 'horizontal','shrink':1, 'aspect':50})
# ax.set_title('Signature Sample Heatmap', fontsize=TITLE_SIZE,weight='bold',va='bottom')
ax.xaxis.set_visible(False)
ax.set_xticklabels([])
ax.tick_params(axis='both',length=0)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=LABEL_SIZE-4,color='#222222')
plt.savefig(pic+'SigSamHeatmap.pdf',dpi=300,bbox_inches='tight')
print(colored(('=> Generate Heatmap: '+pic+'SigSamHeatmap.pdf'), 'green'))
def DonutPlot(self, input, pic):
df = input.loc[self.params,:] if len(self.params) != 0 else input
raw_data = df.sum(axis=1)/df.shape[1]
SUM = raw_data.sum(axis=0)
raw_data = raw_data/SUM
names, sizes = list(raw_data.index), list(raw_data.iloc[:])
names = [names[i]+': '+'{:.1%}'.format(sizes[i]) for i in range(len(sizes))]
fig, ax = plt.subplots(figsize=(6, 8), subplot_kw=dict(aspect='equal'))
wedges, texts = ax.pie(sizes, colors=COLOR_MAP[:len(names)],wedgeprops=dict(width=0.6,edgecolor='w',linewidth=2), startangle=-40) #,normalize=False
bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0)
kw = dict(arrowprops=dict(arrowstyle='-'),bbox=bbox_props, zorder=0, va='center')
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: 'right', 1: 'left'}[int(np.sign(x))]
connectionstyle = 'angle,angleA=0,angleB={}'.format(ang)
kw['arrowprops'].update({'connectionstyle': connectionstyle})
ax.annotate(names[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),horizontalalignment=horizontalalignment, **kw, fontsize=LABEL_SIZE)
plt.savefig(pic+'Donut_plot.pdf', dpi=300, bbox_inches='tight')
print(colored(('=> Generate Donut Plot: '+pic+'Donut_plot.pdf'), 'green'))
def nmf(self, output_folder, sig):
print(colored(('\nStart NMF....'), 'yellow'))
from sklearn.decomposition import NMF
if not os.path.isfile(output_folder+'ms_input.tsv'):
raise ValueError('[MutScape] Mutational Signature: Step 1 must be done before step 2.')
df = (pd.read_csv(output_folder+'ms_input.tsv', sep='\t')).T
sample_list = df.index[1:]
index_96 = df.to_numpy()[0]
data = (df.to_numpy())[1:]
model = NMF(n_components=int(sig),init='random', random_state=0)
W = model.fit_transform(data)
H = model.components_
Hdf, Wdf = pd.DataFrame(H.T), pd.DataFrame(W.T)
Hdf.columns = ['Signature '+str(i+1) for i in range(int(sig))]
Wdf.columns = sample_list
Hdf.index = index_96
Wdf.index = ['Signature '+str(i+1) for i in range(int(sig))]
Hdf.to_csv(output_folder+'96_sig.csv')
Wdf.to_csv(output_folder+'sig_sample.csv')
print(colored('=> Generate file: ', 'green'))
print(colored((' '+output_folder+'96_sig.csv'), 'green'))
print(colored((' '+output_folder+'sig_sample.csv'), 'green'))
def plotting(self, output_folder, pic, sig):
LABEL_SIZE, TITLE_SIZE = 24,30
print(colored(('\nStart Mutational_Signature Plotting(signature number must be in the range of 2 to 9)....'), 'yellow'))
self.nmf(output_folder, sig)
df = (pd.read_csv(output_folder+'96_sig.csv'))
df = df.set_index(list(df.columns[[0]]))
df1 = | pd.read_csv(output_folder+'sig_sample.csv', index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 10:40:57 2021
@author: lschiesser
"""
import unittest
import pandas as pd
from code.feature_extraction.binary_features import BinaryFeatureExtractor
class TestBinaryExtractor(unittest.TestCase):
def setUp(self):
self.INPUT_COLUMN = "tweet"
self.OUTPUT_COLUMN = "output"
self.INPUT_COLUMNS = ["photo", "video"]
self.multiple_input_extractor = BinaryFeatureExtractor(self.INPUT_COLUMNS, self.OUTPUT_COLUMN)
self.one_input_extractor = BinaryFeatureExtractor(self.INPUT_COLUMN, self.OUTPUT_COLUMN)
def test_one_binary_feature(self):
url_input = ["https://google.com", "https://ikw.uos.de"]
output = 1
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Monday June 7th
@author: enprietop
"""
from DJSFunctions import extract_preprocess_data, ankle_DJS
from plot_dynamics import plot_ankle_DJS
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from scipy import stats
from utilities_QS import multi_idx, create_df, best_hyper, change_labels
import itertools as it
#stats
import researchpy as rp
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
import seaborn as sns
from scipy.stats.mstats import kruskal
import scikit_posthocs as sp
# =============================================================================
# Helper functions
# =============================================================================
def ttest_(ds1, ds2, dep_vars):
"""
Parameters
----------
ds1 : Dataset 1
ds2 : Dataset 2
items : items in a dict format
Returns
-------
None.
"""
# Assumptions:
# 1. Independent samples
# 2. Large enough sample size or observations come from a normally-distributed
# population
# 3. Variances are equal, if not apply weltch test
# Does the samples come from a normally distributed population
#Let's perform the Bartetts's test whose Null Hypothesis is that the
#variances are equal. We will use a significance level of 5.0%, for lower values the null hypothesis is rejected
#and the variances are not equal
# Measuring and storing if the samples has the same variance
var = {item: stats.bartlett(ds1[item],
ds2[item]).pvalue for item in dep_vars}
# Performing the ttest, if not equal it will perform
ttest_ = {item: stats.ttest_ind(ds1[item], ds2[item],
equal_var=var[item] > 0.05).pvalue for item in dep_vars}
return var, ttest_
#Testing normal distributions
#For values below 5% the hipothesis is rejected and is non-normal distribution
def shapiro_test(ds, dep_vars, name='No name', df=True):
if df == True:
shapiro_ = {item: stats.shapiro(ds[item]).pvalue > 0.05 for item in dep_vars}
shapiro_df = pd.Series(shapiro_, name=name)
return shapiro_df
else:
shapiro_ = {item: stats.shapiro(ds[item]).pvalue for item in dep_vars}
return shapiro_
# =============================================================================
# Kruskal Wallis test on ranks
# =============================================================================
def kruskal_groups(ds1, ds2, dep_vars, name):
kruskal_deps = pd.Series({item: kruskal(ds1[item].values,
ds2[item].values).pvalue < 0.05 for item in dep_vars})
kruskal_deps.name = name
return kruskal_deps
os.chdir('ConcatDatasets/')
concat_QS = | pd.read_csv('DatasetPaper.csv', index_col=[0]) | pandas.read_csv |
import os
from solaris.eval.base import Evaluator
import solaris
import geopandas as gpd
import pandas as pd
class TestEvaluator(object):
def test_init_from_file(self):
"""Test instantiation of an Evaluator instance from a file."""
base_instance = Evaluator(os.path.join(solaris.data.data_dir,
'gt.geojson'))
gdf = solaris.data.gt_gdf()
assert base_instance.ground_truth_sindex.bounds == gdf.sindex.bounds
assert base_instance.proposal_GDF.equals(gpd.GeoDataFrame([]))
assert base_instance.ground_truth_GDF.equals(
base_instance.ground_truth_GDF_Edit)
def test_init_from_gdf(self):
"""Test instantiation of an Evaluator from a pre-loaded GeoDataFrame."""
gdf = solaris.data.gt_gdf()
base_instance = Evaluator(gdf)
assert base_instance.ground_truth_sindex.bounds == gdf.sindex.bounds
assert base_instance.proposal_GDF.equals(gpd.GeoDataFrame([]))
assert base_instance.ground_truth_GDF.equals(
base_instance.ground_truth_GDF_Edit)
def test_init_empty_geojson(self):
"""Test instantiation of Evaluator with an empty geojson file."""
base_instance = Evaluator(os.path.join(solaris.data.data_dir,
'empty.geojson'))
expected_gdf = gpd.GeoDataFrame({'sindex': [],
'condition': [],
'geometry': []})
assert base_instance.ground_truth_GDF.equals(expected_gdf)
def test_score_proposals(self):
"""Test reading in a proposal GDF from a geojson and scoring it."""
eb = Evaluator(os.path.join(solaris.data.data_dir, 'gt.geojson'))
eb.load_proposal(os.path.join(solaris.data.data_dir, 'pred.geojson'))
pred_gdf = solaris.data.pred_gdf()
assert eb.proposal_GDF.iloc[:, 0:3].sort_index().equals(pred_gdf)
expected_score = [{'class_id': 'all',
'iou_field': 'iou_score_all',
'TruePos': 8,
'FalsePos': 20,
'FalseNeg': 20,
'Precision': 0.2857142857142857,
'Recall': 0.2857142857142857,
'F1Score': 0.2857142857142857}]
scores = eb.eval_iou(calculate_class_scores=False)
assert scores == expected_score
def test_iou_by_building(self):
"""Test output of ground truth table with per-building IoU scores"""
data_folder = solaris.data.data_dir
path_truth = os.path.join(data_folder, 'SN2_sample_truth.csv')
path_pred = os.path.join(data_folder, 'SN2_sample_preds.csv')
path_ious = os.path.join(data_folder, 'SN2_sample_iou_by_building.csv')
path_temp = './temp.pd'
eb = Evaluator(path_truth)
eb.load_proposal(path_pred, conf_field_list=['Confidence'],
proposalCSV=True)
eb.eval_iou_spacenet_csv(miniou=0.5, imageIDField='ImageId',
min_area=20)
output = eb.get_iou_by_building()
result_actual = | pd.DataFrame(output) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine
import evaluation_utils
class CosineBaseline:
def __init__(self, user_item: pd.DataFrame, test_set: pd.DataFrame, movies_set: pd.DataFrame, k: int, n: int,
sim_matrix_flag=0, sim_matrix_path="./generated_files/item_knn_sim.csv"):
"""
Constructor of the class
:param user_item: user item matrix data frame
:param test_set: test set
:param movies_set: movies set
:param k: number of neighbours
:param n: number of recommendation items to return
:param sim_matrix_flag: 1 to generate the similarity matrix from scratch, 0 to read from file path
:param sim_matrix_path: path to file of the similarity matrix
"""
self.user_item = user_item
self.test_set = test_set
self.movies_set = movies_set
self.sim_matrix_flag = sim_matrix_flag
self.sim_matrix_path = sim_matrix_path
self.k = k
self.n = n
def set_k(self, new_k: int):
"""
setter of neighbours of class
:param new_k: new value of k
:return:k of class changed
"""
self.k = new_k
def set_n(self, new_n: int):
"""
setter of top n of class
:param new_n: new value of n
:return: n of class changed
"""
self.n = new_n
def __get_similarity_matrix(self):
"""
Function that gets or generates the similarity matrix
:return: the similarity matrix with index and column
"""
movies_id = self.movies_set['movie_id'].to_list()
movies_id.sort()
if self.sim_matrix_flag == 1:
np.seterr(all='raise')
item_sim = | pd.DataFrame(0, index=movies_id, columns=movies_id) | pandas.DataFrame |
import numpy as np
import pandas as pd
import geopandas as gpd
import warnings
import osmnx as ox
import requests
import json
from shapely.geometry import Point
def geom_ceil(coordinate, precision=4):
return np.true_divide(np.ceil(coordinate * 10 ** precision), 10 ** precision)
def geom_floor(coordinate, precision=4):
return np.true_divide(np.floor(coordinate * 10 ** precision), 10 ** precision)
class POIdata:
"""
This class creates a query for the investigated area and POI categories.
The query is sent to osm using overpass API and the data is retrieved.
Parameters
----------
area : GeoDataFrame or str
GeoDataFrame must have a single shapely Polygon or MultiPolygon
in geometry column and its CRS must be defined.
str must be a name of a city, or an address of a region
poi_categories : A list of OSM primary map features or 'all'
timeout : int
The TCP connection timeout for the overpass request
verbose : bool
If True, print information while computing
"""
def __init__(self, area, poi_categories, timeout, verbose):
self.area_buffered = None
self.area = area
self.poi_categories = poi_categories
self.timeout = timeout
self.verbose = verbose
@staticmethod
def osm_primary_features():
"""
list of primary OSM features
available at https://wiki.openstreetmap.org/wiki/Map_features
Returns
--------
osm_primary_features_lst : list
"""
osm_primary_features_lst = ['aerialway',
'aeroway',
'amenity',
'barrier',
'boundary',
'building',
'craft',
'emergency',
'geological',
'healthcare',
'highway',
'historic',
'landuse',
'leisure',
'man_made',
'military',
'natural',
'office',
'place',
'power',
'public_transport',
'railway',
'route',
'shop',
'sport',
'telecom',
'tourism',
'water',
'waterway']
return osm_primary_features_lst
def create_overpass_query_string(self):
"""
creates the query string to be passed to overpass
Returns
--------
query_string : str
"""
# make the query area a bit larger
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.area_buffered = self.area.buffer(0.008).simplify(0.005)
xy = np.array(self.area_buffered.iloc[0].exterior.coords)
# make polygon string for OSM overpass query
# Using the polygon, fewer data are retrieved, and it's faster but request is long can can lead to 414
# poly_str = ''
# for lat, lon in zip(xy[:, 1], xy[:, 0]):
# poly_str = poly_str + str(lat) + ' ' + str(lon) + ' '
# poly_str = poly_str.strip()
# make bounding box for OSM overpass query
lat_min = geom_floor(np.min(xy[:, 0]))
lon_min = geom_floor(np.min(xy[:, 1]))
lat_max = geom_ceil(np.max(xy[:, 0]))
lon_max = geom_ceil(np.max(xy[:, 1]))
# if poi not in primary --> error
# todo: is this necessary?
for poi_category in self.poi_categories:
if poi_category not in self.osm_primary_features():
raise ValueError(f'{poi_category} is not a valid POI primary category. See a list of OSM primary '
f'features with Tessellation.osm_primary_features()')
# create query string for overpass
query_string = ''
for element in ['node', 'way']:
for poi_category in self.poi_categories:
# query with polygon
# query_string = query_string + f'{element}[{poi_category}](poly:"{poly_str}");'
# query with bounding box
query_string = query_string + f'{element}[{poi_category}];'
# query_string = f"[out:json][timeout:{self.timeout}];(" + query_string + ');out geom;'
query_string = f"[bbox][out:json][timeout:{self.timeout}];(" \
+ query_string \
+ ');out geom;' \
+ f'&bbox={lat_min},{lon_min},{lat_max},{lon_max}'
return query_string
def get_poi_data(self):
"""
sends the query to osm using the overpass API and gets the data
Returns
--------
poi_df : pandas.DataFrame
A dataframe containing the POI, POI type, and coordinates
"""
query_string = self.create_overpass_query_string()
request_header = "https://overpass-api.de/api/interpreter?data="
if self.verbose:
print('Getting data from OSM...')
# sending the request
resp = requests.get(url=request_header + query_string)
if resp.status_code == 429:
raise RuntimeError("429 Too Many Requests:\n"
"You have sent multiple requests from the same "
"IP and passed the passed the fair use policy. "
"Please wait a couple of minutes and then try again.")
elif resp.status_code == 504:
raise RuntimeError("504 Gateway Timeout:\n"
"the server has already so much load that the request cannot be executed."
"Please try again later")
elif resp.status_code != 200:
raise RuntimeError("Bad Request!")
else:
resp = json.loads(resp.text)
if self.verbose:
print('Creating POI DataFrame...')
lst_nodes = []
lst_ways = []
generator = resp['elements']
for item in generator:
for cat in self.poi_categories:
if cat in item['tags'].keys():
item[cat] = True
if item['type'] == 'node':
lst_nodes.append(item)
elif item['type'] == 'way':
item['center_latitude'] = np.mean([point['lat'] for point in item['geometry']])
item['center_longitude'] = np.mean([point['lon'] for point in item['geometry']])
lst_ways.append(item)
else:
continue
if self.verbose:
print('Cleaning POI DataFrame...')
nodes_df = pd.DataFrame(lst_nodes)
ways_df = pd.DataFrame(lst_ways)
nodes_df['geometry'] = nodes_df[['lon', 'lat']].apply(lambda p: [{'lat': p['lat'], 'lon': p['lon']}], axis=1)
nodes_df = nodes_df.rename(columns={'lat': 'center_latitude', 'lon': 'center_longitude'})
nodes_df = nodes_df.drop(columns=['id'])
ways_df = ways_df.drop(columns=['id', 'bounds', 'nodes'])
poi_df = | pd.concat([ways_df, nodes_df]) | pandas.concat |
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
from swstats import *
from scipy.stats import ttest_ind
import xlsxwriter
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.proportion import proportions_ztest
debugging = False
def pToSign(pval):
if pval < .001:
return "***"
elif pval < .01:
return "**"
elif pval < .05:
return "*"
elif pval < .1:
return "+"
else:
return ""
def analyzeExperiment_ContinuousVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1mean = np.mean(order_value_control_group)
arm1sd = np.std(order_value_control_group)
arm1text = "" + "{:.2f}".format(arm1mean) + " (" + "{:.2f}".format(arm1sd) + ")"
# Effect of Arm 2
arm2mean = np.mean(order_value_arm2_group)
arm2sd = np.std(order_value_arm2_group)
tscore, pval2 = ttest_ind(order_value_control_group, order_value_arm2_group)
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2mean) + " (" + "{:.2f}".format(arm2sd) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3mean = np.mean(order_value_arm3_group)
arm3sd = np.std(order_value_arm3_group)
tscore, pval3 = ttest_ind(order_value_control_group, order_value_arm3_group)
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3mean) + " (" + "{:.2f}".format(arm3sd) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4mean = np.mean(order_value_arm4_group)
arm4sd = np.std(order_value_arm4_group)
tscore, pval4 = ttest_ind(order_value_control_group, order_value_arm4_group)
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4mean) + " (" + "{:.2f}".format(arm4sd) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
tscore, pval2to4 = ttest_ind(order_value_arm2_group, order_value_arm4_group)
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4mean - arm2mean) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
tscore, pval3to4 = ttest_ind(order_value_arm3_group, order_value_arm4_group)
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4mean - arm3mean) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeExperiment_BinaryVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1Successes = sum(order_value_control_group.isin([True, 1]))
arm1Count = sum(order_value_control_group.isin([True, False, 1, 0]))
arm1PercentSuccess = arm1Successes/arm1Count
arm1text = "" + "{:.2f}".format(arm1PercentSuccess) + " (" + "{:.0f}".format(arm1Successes) + ")"
# Effect of Arm 2
arm2Successes = sum(order_value_arm2_group.isin([True, 1]))
arm2Count = sum(order_value_arm2_group.isin([True, False, 1, 0]))
arm2PercentSuccess = arm2Successes/arm2Count
zstat, pval2 = proportions_ztest(count=[arm1Successes,arm2Successes], nobs=[arm1Count,arm2Count], alternative='two-sided')
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2PercentSuccess) + " (" + "{:.0f}".format(arm2Successes) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3Successes = sum(order_value_arm3_group.isin([True, 1]))
arm3Count = sum(order_value_arm3_group.isin([True, False, 1, 0]))
arm3PercentSuccess = arm3Successes/arm3Count
zstat, pval3 = proportions_ztest(count=[arm1Successes,arm3Successes], nobs=[arm1Count,arm3Count], alternative='two-sided')
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3PercentSuccess) + " (" + "{:.0f}".format(arm3Successes) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4Successes = sum(order_value_arm4_group.isin([True, 1]))
arm4Count = sum(order_value_arm4_group.isin([True, False, 1, 0]))
arm4PercentSuccess = arm4Successes/arm4Count
zstat, pval4 = proportions_ztest(count=[arm1Successes,arm4Successes], nobs=[arm1Count,arm4Count], alternative='two-sided')
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4PercentSuccess) + " (" + "{:.0f}".format(arm4Successes) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
zstat, pval2to4 = proportions_ztest(count=[arm2Successes,arm4Successes], nobs=[arm2Count,arm4Count], alternative='two-sided')
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm2PercentSuccess) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
zstat, pval3to4 = proportions_ztest(count=[arm3Successes,arm4Successes], nobs=[arm3Count,arm4Count], alternative='two-sided')
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm3PercentSuccess) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeResults(dta, outputFileName, scoringVars, surveyVersion, primaryOnly=True):
if primaryOnly:
dta = dta[dta.IsPrimaryWave].copy()
dataDir = "C:/Dev/src/ssascams/data/"
''' Analyze the answers'''
writer = pd.ExcelWriter(dataDir + 'RESULTS_' + outputFileName + '.xlsx', engine='xlsxwriter')
# ###############
# Export summary stats
# ###############
demographicVars = ['trustScore', 'TotalIncome', 'incomeAmount', 'Race', 'race5', 'employment3', 'educYears', 'Married', 'marriedI', 'Age', 'ageYears', 'Gender', 'genderI']
allSummaryVars = ["percentCorrect", "surveyArm", "Wave", "daysFromTrainingToTest"] + scoringVars + demographicVars
summaryStats = dta[allSummaryVars].describe()
summaryStats.to_excel(writer, sheet_name="summary_FullPop", startrow=0, header=True, index=True)
grouped = dta[allSummaryVars].groupby(["surveyArm"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['VarName', 'Metric'], inplace=True)
summaryStats.to_excel(writer, sheet_name="summary_ByArm", startrow=0, header=True, index=False)
if ~primaryOnly:
grouped = dta[allSummaryVars].groupby(["surveyArm", "Wave"])
summaryStats = grouped.describe().unstack().transpose().reset_index()
summaryStats.rename(columns={'level_0' :'VarName', 'level_1' :'Metric'}, inplace=True)
summaryStats.sort_values(['Wave','VarName', 'Metric'], inplace=True)
# grouped.describe().reset_index().pivot(index='name', values='score', columns='level_1')
summaryStats.to_excel(writer, sheet_name="summary_ByArmAndWave", startrow=0, header=True, index=False)
# summaryStats.to_csv(dataDir + "RESULTS_" + outputFileName + '.csv')
# ###############
# RQ1: What is the effect?
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numFakeLabeledReal")
row3 = analyzeExperiment_ContinuousVar(dta, "numRealLabeledFake")
row4 = analyzeExperiment_ContinuousVar(dta, "percentCorrect")
pd.DataFrame([row1, row2, row3, row4]).to_excel(writer, sheet_name="r1", startrow=1, header=True, index=True)
##############
# RQ1* Robustness check on result: is the experiment randomized correctly?
##############
# NumCorrect Regression
resultTables = ols('numCorrect ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r1_reg", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r1_reg", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ2: Communication Type
# ###############
row1 = analyzeExperiment_ContinuousVar(dta, "numEmailsCorrect")
row2 = analyzeExperiment_ContinuousVar(dta, "numSMSesCorrect")
row3 = analyzeExperiment_ContinuousVar(dta, "numLettersCorrect")
pd.DataFrame([row1, row2, row3]).to_excel(writer, sheet_name="r2", startrow=1, header=True, index=True)
##############
# RQ2* Robustness check on Emails result: is the experiment randomized correctly?
##############
# NumEmailsCorrect Regression
resultTables = ols('numEmailsCorrect ~ C(surveyArm) + daysFromTrainingToTest + trustScore + lIncomeAmount + '
'C(employment3) + educYears + marriedI + ageYears + ageYearsSq + genderI + lose_moneyYN + duration_p2_Quantile ', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r2_reg", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r2_reg", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
# ###############
# RQ3: Time Delay
# ###############
resultTables = ols('numCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest', data=dta).fit().summary().tables
pd.DataFrame(resultTables[0]).to_excel(writer, sheet_name="r3a_CorrectWaveAndDay_Simple", startrow=1, header=False, index=False)
pd.DataFrame(resultTables[1]).to_excel(writer, sheet_name="r3a_CorrectWaveAndDay_Simple", startrow=1 + len(resultTables[0]) + 2, header=False, index=False)
resultTables = ols('numEmailsCorrect ~ C(surveyArm)*Wave + daysFromTrainingToTest', data=dta).fit().summary().tables
| pd.DataFrame(resultTables[0]) | pandas.DataFrame |
import wf_core_data.utils
import requests
import pandas as pd
from collections import OrderedDict
# import pickle
# import json
import datetime
import time
import logging
import os
logger = logging.getLogger(__name__)
DEFAULT_DELAY = 0.25
DEFAULT_MAX_REQUESTS = 50
DEFAULT_WRITE_CHUNK_SIZE = 10
SCHOOLS_BASE_ID = 'appJBT9a4f3b7hWQ2'
DATA_DICT_BASE_ID = 'appJBT9a4f3b7hWQ2'
# DATA_DICT_BASE_ID = 'appHMyIWgnHqVJymL'
class AirtableClient:
def __init__(
self,
api_key=None,
url_base='https://api.airtable.com/v0/'
):
self.api_key = api_key
self.url_base = url_base
if self.api_key is None:
self.api_key = os.getenv('AIRTABLE_API_KEY')
def fetch_tl_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching TL data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='TLs',
params=params
)
tl_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('teacher_id_at', record.get('id')),
('teacher_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('teacher_full_name_at', fields.get('Full Name')),
('teacher_first_name_at', fields.get('First Name')),
('teacher_middle_name_at', fields.get('Middle Name')),
('teacher_last_name_at', fields.get('Last Name')),
('teacher_title_at', fields.get('Title')),
('teacher_ethnicity_at', fields.get('Race & Ethnicity')),
('teacher_ethnicity_other_at', fields.get('Race & Ethnicity - Other')),
('teacher_income_background_at', fields.get('Income Background')),
('teacher_email_at', fields.get('Email')),
('teacher_email_2_at', fields.get('Email 2')),
('teacher_email_3_at', fields.get('Email 3')),
('teacher_phone_at', fields.get('Phone Number')),
('teacher_phone_2_at', fields.get('Phone Number 2')),
('teacher_employer_at', fields.get('Employer')),
('hub_at', fields.get('Hub')),
('pod_at', fields.get('Pod')),
('user_id_tc', fields.get('TC User ID'))
])
tl_data.append(datum)
if format == 'dataframe':
tl_data = convert_tl_data_to_df(tl_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return tl_data
def fetch_location_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching location data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Locations',
params=params
)
location_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('location_id_at', record.get('id')),
('location_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('location_address_at', fields.get('Address')),
('school_id_at', wf_core_data.utils.to_singleton(fields.get('School Name'))),
('school_location_start_at', wf_core_data.utils.to_date(fields.get('Start of time at location'))),
('school_location_end_at', wf_core_data.utils.to_date(fields.get('End of time at location')))
])
location_data.append(datum)
if format == 'dataframe':
location_data = convert_location_data_to_df(location_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return location_data
def fetch_teacher_school_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching teacher school association data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Teachers x Schools',
params=params
)
teacher_school_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('teacher_school_id_at', record.get('id')),
('teacher_school_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('teacher_id_at', fields.get('TL')),
('school_id_at', fields.get('School')),
('teacher_school_start_at', wf_core_data.utils.to_date(fields.get('Start Date'))),
('teacher_school_end_at', wf_core_data.utils.to_date(fields.get('End Date'))),
('teacher_school_active_at', wf_core_data.utils.to_boolean(fields.get('Currently Active')))
])
teacher_school_data.append(datum)
if format == 'dataframe':
teacher_school_data = convert_teacher_school_data_to_df(teacher_school_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return teacher_school_data
def fetch_school_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching school data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Schools',
params=params
)
school_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('school_id_at', record.get('id')),
('school_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_id_at', fields.get('Hub')),
('pod_id_at', fields.get('Pod')),
('school_name_at', fields.get('Name')),
('school_short_name_at', fields.get('Short Name')),
('school_status_at', fields.get('School Status')),
('school_ssj_stage_at', fields.get('School Startup Stage')),
('school_governance_model_at', fields.get('Governance Model')),
('school_ages_served_at', fields.get('Ages served')),
('school_location_ids_at', fields.get('Locations')),
('school_id_tc', fields.get('TC school ID'))
])
school_data.append(datum)
if format == 'dataframe':
school_data = convert_school_data_to_df(school_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return school_data
def fetch_hub_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching hub data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Hubs',
params=params
)
hub_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('hub_id_at', record.get('id')),
('hub_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_name_at', fields.get('Name'))
])
hub_data.append(datum)
if format == 'dataframe':
hub_data = convert_hub_data_to_df(hub_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return hub_data
def fetch_pod_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching pod data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Pods',
params=params
)
pod_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('pod_id_at', record.get('id')),
('pod_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('pod_name_at', fields.get('Name'))
])
pod_data.append(datum)
if format == 'dataframe':
pod_data = convert_pod_data_to_df(pod_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return pod_data
def fetch_ethnicity_lookup(self):
ethnicity_categories = self.fetch_ethnicity_categories()
ethnicity_mapping = self.fetch_ethnicity_mapping()
ethnicity_lookup = (
ethnicity_mapping
.join(
ethnicity_categories['ethnicity_category'],
how='left',
on='ethnicity_category_id_at'
)
.reindex(columns=[
'ethnicity_category'
])
.sort_index()
)
return ethnicity_lookup
def fetch_gender_lookup(self):
gender_categories = self.fetch_gender_categories()
gender_mapping = self.fetch_gender_mapping()
gender_lookup = (
gender_mapping
.join(
gender_categories['gender_category'],
how='left',
on='gender_category_id_at'
)
.reindex(columns=[
'gender_category'
])
.sort_index()
.sort_values('gender_category')
)
return gender_lookup
def fetch_household_income_lookup(self):
household_income_categories = self.fetch_household_income_categories()
household_income_mapping = self.fetch_household_income_mapping()
household_income_lookup = (
household_income_mapping
.join(
household_income_categories['household_income_category'],
how='left',
on='household_income_category_id_at'
)
.reindex(columns=[
'household_income_category'
])
.sort_index()
.sort_values('household_income_category')
)
return household_income_lookup
def fetch_nps_lookup(self):
nps_categories = self.fetch_nps_categories()
nps_mapping = self.fetch_nps_mapping()
nps_lookup = (
nps_mapping
.join(
nps_categories['nps_category'],
how='left',
on='nps_category_id_at'
)
.reindex(columns=[
'nps_category'
])
.sort_index()
)
return nps_lookup
def fetch_boolean_lookup(self):
boolean_categories = self.fetch_boolean_categories()
boolean_mapping = self.fetch_boolean_mapping()
boolean_lookup = (
boolean_mapping
.join(
boolean_categories['boolean_category'],
how='left',
on='boolean_category_id_at'
)
.reindex(columns=[
'boolean_category'
])
.sort_index()
.sort_values('boolean_category')
)
return boolean_lookup
def fetch_ethnicity_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching ethnicity categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Ethnicity categories',
params=params
)
ethnicity_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('ethnicity_category_id_at', record.get('id')),
('ethnicity_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('ethnicity_category', fields.get('ethnicity_category')),
('ethnicity_display_name_english', fields.get('ethnicity_display_name_english')),
('ethnicity_display_name_spanish', fields.get('ethnicity_display_name_spanish')) ])
ethnicity_categories.append(datum)
if format == 'dataframe':
ethnicity_categories = convert_ethnicity_categories_to_df(ethnicity_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return ethnicity_categories
def fetch_gender_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching gender categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Gender categories',
params=params
)
gender_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('gender_category_id_at', record.get('id')),
('gender_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('gender_category', fields.get('gender_category')),
('gender_display_name_english', fields.get('gender_display_name_english')),
('gender_display_name_spanish', fields.get('gender_display_name_spanish')) ])
gender_categories.append(datum)
if format == 'dataframe':
gender_categories = convert_gender_categories_to_df(gender_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return gender_categories
def fetch_household_income_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching household income categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Household income categories',
params=params
)
household_income_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('household_income_category_id_at', record.get('id')),
('household_income_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('household_income_category', fields.get('household_income_category')),
('household_income_display_name_english', fields.get('household_income_display_name_english')),
('household_income_display_name_spanish', fields.get('household_income_display_name_spanish')) ])
household_income_categories.append(datum)
if format == 'dataframe':
household_income_categories = convert_household_income_categories_to_df(household_income_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return household_income_categories
def fetch_nps_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching NPS categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='NPS categories',
params=params
)
nps_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('nps_category_id_at', record.get('id')),
('nps_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('nps_category', fields.get('nps_category')),
('nps_display_name_english', fields.get('nps_display_name_english')),
('nps_display_name_spanish', fields.get('nps_display_name_spanish')) ])
nps_categories.append(datum)
if format == 'dataframe':
nps_categories = convert_nps_categories_to_df(nps_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return nps_categories
def fetch_boolean_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching boolean categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Boolean categories',
params=params
)
boolean_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('boolean_category_id_at', record.get('id')),
('boolean_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('boolean_category', wf_core_data.utils.to_boolean(fields.get('boolean_category'))),
('boolean_display_name_english', fields.get('boolean_display_name_english')),
('boolean_display_name_spanish', fields.get('boolean_display_name_spanish')) ])
boolean_categories.append(datum)
if format == 'dataframe':
boolean_categories = convert_boolean_categories_to_df(boolean_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return boolean_categories
def fetch_ethnicity_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching ethnicity mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Ethnicity mapping',
params=params
)
ethnicity_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('ethnicity_mapping_id_at', record.get('id')),
('ethnicity_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('ethnicity_response', fields.get('ethnicity_response')),
('ethnicity_category_id_at', fields.get('ethnicity_category'))
])
ethnicity_mapping.append(datum)
if format == 'dataframe':
ethnicity_mapping = convert_ethnicity_mapping_to_df(ethnicity_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return ethnicity_mapping
def fetch_gender_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching gender mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Gender mapping',
params=params
)
gender_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('gender_mapping_id_at', record.get('id')),
('gender_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('gender_response', fields.get('gender_response')),
('gender_category_id_at', fields.get('gender_category'))
])
gender_mapping.append(datum)
if format == 'dataframe':
gender_mapping = convert_gender_mapping_to_df(gender_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return gender_mapping
def fetch_household_income_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching household income mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Household income mapping',
params=params
)
household_income_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('household_income_mapping_id_at', record.get('id')),
('household_income_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('household_income_response', fields.get('household_income_response')),
('household_income_category_id_at', fields.get('household_income_category'))
])
household_income_mapping.append(datum)
if format == 'dataframe':
household_income_mapping = convert_household_income_mapping_to_df(household_income_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return household_income_mapping
def fetch_nps_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching NPS mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='NPS mapping',
params=params
)
nps_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('nps_mapping_id_at', record.get('id')),
('nps_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('nps_response', fields.get('nps_response')),
('nps_category_id_at', fields.get('nps_category'))
])
nps_mapping.append(datum)
if format == 'dataframe':
nps_mapping = convert_nps_mapping_to_df(nps_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return nps_mapping
def fetch_boolean_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching boolean mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Boolean mapping',
params=params
)
boolean_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('boolean_mapping_id_at', record.get('id')),
('boolean_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('boolean_response', fields.get('boolean_response')),
('boolean_category_id_at', fields.get('boolean_category'))
])
boolean_mapping.append(datum)
if format == 'dataframe':
boolean_mapping = convert_boolean_mapping_to_df(boolean_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return boolean_mapping
def write_dataframe(
self,
df,
base_id,
endpoint,
params=None,
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS,
write_chunk_size=DEFAULT_WRITE_CHUNK_SIZE
):
num_records = len(df)
num_chunks = (num_records // write_chunk_size) + 1
logger.info('Writing {} records in {} chunks'.format(
num_records,
num_chunks
))
for chunk_index in range(num_chunks):
start_row_index = chunk_index*write_chunk_size
end_row_index = min(
(chunk_index + 1)*write_chunk_size,
num_records
)
chunk_df = df.iloc[start_row_index:end_row_index]
chunk_list = chunk_df.to_dict(orient='records')
chunk_dict = {'records': [{'fields': row_dict} for row_dict in chunk_list]}
logger.info('Writing chunk {}: rows {} to {}'.format(
chunk_index,
start_row_index,
end_row_index
))
self.post(
base_id=base_id,
endpoint=endpoint,
data=chunk_dict
)
time.sleep(delay)
def bulk_get(
self,
base_id,
endpoint,
params=None,
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
if params is None:
params = dict()
num_requests = 0
records = list()
while True:
data = self.get(
base_id=base_id,
endpoint=endpoint,
params=params
)
if 'records' in data.keys():
logging.info('Returned {} records'.format(len(data.get('records'))))
records.extend(data.get('records'))
num_requests += 1
if num_requests >= max_requests:
logger.warning('Reached maximum number of requests ({}). Terminating.'.format(
max_requests
))
break
offset = data.get('offset')
if offset is None:
break
params['offset'] = offset
time.sleep(delay)
return records
def post(
self,
base_id,
endpoint,
data
):
headers = dict()
if self.api_key is not None:
headers['Authorization'] = 'Bearer {}'.format(self.api_key)
r = requests.post(
'{}{}/{}'.format(
self.url_base,
base_id,
endpoint
),
headers=headers,
json=data
)
if r.status_code != 200:
error_message = 'Airtable POST request returned status code {}'.format(r.status_code)
r.raise_for_status()
return r.json()
def get(
self,
base_id,
endpoint,
params=None
):
headers = dict()
if self.api_key is not None:
headers['Authorization'] = 'Bearer {}'.format(self.api_key)
r = requests.get(
'{}{}/{}'.format(
self.url_base,
base_id,
endpoint
),
params=params,
headers=headers
)
if r.status_code != 200:
error_message = 'Airtable GET request returned status code {}'.format(r.status_code)
r.raise_for_status()
return r.json()
def convert_tl_data_to_df(tl_data):
if len(tl_data) == 0:
return pd.DataFrame()
tl_data_df = pd.DataFrame(
tl_data,
dtype='object'
)
tl_data_df['pull_datetime'] = pd.to_datetime(tl_data_df['pull_datetime'])
tl_data_df['teacher_created_datetime_at'] = pd.to_datetime(tl_data_df['teacher_created_datetime_at'])
# school_data_df['user_id_tc'] = pd.to_numeric(tl_data_df['user_id_tc']).astype('Int64')
tl_data_df = tl_data_df.astype({
'teacher_full_name_at': 'string',
'teacher_middle_name_at': 'string',
'teacher_last_name_at': 'string',
'teacher_title_at': 'string',
'teacher_ethnicity_at': 'string',
'teacher_ethnicity_other_at': 'string',
'teacher_income_background_at': 'string',
'teacher_email_at': 'string',
'teacher_email_2_at': 'string',
'teacher_email_3_at': 'string',
'teacher_phone_at': 'string',
'teacher_phone_2_at': 'string',
'teacher_employer_at': 'string',
'hub_at': 'string',
'pod_at': 'string',
'user_id_tc': 'string'
})
tl_data_df.set_index('teacher_id_at', inplace=True)
return tl_data_df
def convert_location_data_to_df(location_data):
if len(location_data) == 0:
return pd.DataFrame()
location_data_df = pd.DataFrame(
location_data,
dtype='object'
)
location_data_df['pull_datetime'] = pd.to_datetime(location_data_df['pull_datetime'])
location_data_df['location_created_datetime_at'] = pd.to_datetime(location_data_df['location_created_datetime_at'])
location_data_df = location_data_df.astype({
'location_id_at': 'string',
'location_address_at': 'string',
'school_id_at': 'string'
})
location_data_df.set_index('location_id_at', inplace=True)
return location_data_df
def convert_teacher_school_data_to_df(teacher_school_data):
if len(teacher_school_data) == 0:
return pd.DataFrame()
teacher_school_data_df = pd.DataFrame(
teacher_school_data,
dtype='object'
)
teacher_school_data_df['pull_datetime'] = pd.to_datetime(teacher_school_data_df['pull_datetime'])
teacher_school_data_df['teacher_school_created_datetime_at'] = pd.to_datetime(teacher_school_data_df['teacher_school_created_datetime_at'])
teacher_school_data_df = teacher_school_data_df.astype({
'teacher_school_active_at': 'bool'
})
teacher_school_data_df.set_index('teacher_school_id_at', inplace=True)
return teacher_school_data_df
def convert_school_data_to_df(school_data):
if len(school_data) == 0:
return pd.DataFrame()
school_data_df = pd.DataFrame(
school_data,
dtype='object'
)
school_data_df['pull_datetime'] = pd.to_datetime(school_data_df['pull_datetime'])
school_data_df['school_created_datetime_at'] = pd.to_datetime(school_data_df['school_created_datetime_at'])
school_data_df['hub_id_at'] = school_data_df['hub_id_at'].apply(wf_core_data.utils.to_singleton)
school_data_df['pod_id_at'] = school_data_df['pod_id_at'].apply(wf_core_data.utils.to_singleton)
school_data_df['school_id_tc'] = pd.to_numeric(school_data_df['school_id_tc']).astype('Int64')
school_data_df = school_data_df.astype({
'school_id_at': 'string',
'hub_id_at': 'string',
'pod_id_at': 'string',
'school_name_at': 'string',
'school_short_name_at': 'string',
'school_status_at': 'string',
'school_ssj_stage_at': 'string',
'school_governance_model_at': 'string',
})
school_data_df.set_index('school_id_at', inplace=True)
return school_data_df
def convert_hub_data_to_df(hub_data):
if len(hub_data) == 0:
return pd.DataFrame()
hub_data_df = pd.DataFrame(
hub_data,
dtype='object'
)
hub_data_df['pull_datetime'] = pd.to_datetime(hub_data_df['pull_datetime'])
hub_data_df['hub_created_datetime_at'] = pd.to_datetime(hub_data_df['hub_created_datetime_at'])
hub_data_df = hub_data_df.astype({
'hub_id_at': 'string',
'hub_name_at': 'string'
})
hub_data_df.set_index('hub_id_at', inplace=True)
return hub_data_df
def convert_pod_data_to_df(pod_data):
if len(pod_data) == 0:
return pd.DataFrame()
pod_data_df = pd.DataFrame(
pod_data,
dtype='object'
)
pod_data_df['pull_datetime'] = pd.to_datetime(pod_data_df['pull_datetime'])
pod_data_df['pod_created_datetime_at'] = pd.to_datetime(pod_data_df['pod_created_datetime_at'])
pod_data_df = pod_data_df.astype({
'pod_id_at': 'string',
'pod_name_at': 'string'
})
pod_data_df.set_index('pod_id_at', inplace=True)
return pod_data_df
def convert_ethnicity_categories_to_df(ethnicity_categories):
if len(ethnicity_categories) == 0:
return pd.DataFrame()
ethnicity_categories_df = pd.DataFrame(
ethnicity_categories,
dtype='object'
)
ethnicity_categories_df['pull_datetime'] = pd.to_datetime(ethnicity_categories_df['pull_datetime'])
ethnicity_categories_df['ethnicity_category_created_datetime_at'] = pd.to_datetime(ethnicity_categories_df['ethnicity_category_created_datetime_at'])
ethnicity_categories_df = ethnicity_categories_df.astype({
'ethnicity_category_id_at': 'string',
'ethnicity_category': 'string',
'ethnicity_display_name_english': 'string',
'ethnicity_display_name_spanish': 'string'
})
ethnicity_categories_df.set_index('ethnicity_category_id_at', inplace=True)
return ethnicity_categories_df
def convert_gender_categories_to_df(gender_categories):
if len(gender_categories) == 0:
return pd.DataFrame()
gender_categories_df = pd.DataFrame(
gender_categories,
dtype='object'
)
gender_categories_df['pull_datetime'] = pd.to_datetime(gender_categories_df['pull_datetime'])
gender_categories_df['gender_category_created_datetime_at'] = pd.to_datetime(gender_categories_df['gender_category_created_datetime_at'])
gender_categories_df = gender_categories_df.astype({
'gender_category_id_at': 'string',
'gender_category': 'string',
'gender_display_name_english': 'string',
'gender_display_name_spanish': 'string'
})
gender_categories_df.set_index('gender_category_id_at', inplace=True)
return gender_categories_df
def convert_household_income_categories_to_df(household_income_categories):
if len(household_income_categories) == 0:
return pd.DataFrame()
household_income_categories_df = pd.DataFrame(
household_income_categories,
dtype='object'
)
household_income_categories_df['pull_datetime'] = pd.to_datetime(household_income_categories_df['pull_datetime'])
household_income_categories_df['household_income_category_created_datetime_at'] = pd.to_datetime(household_income_categories_df['household_income_category_created_datetime_at'])
household_income_categories_df = household_income_categories_df.astype({
'household_income_category_id_at': 'string',
'household_income_category': 'string',
'household_income_display_name_english': 'string',
'household_income_display_name_spanish': 'string'
})
household_income_categories_df.set_index('household_income_category_id_at', inplace=True)
return household_income_categories_df
def convert_nps_categories_to_df(nps_categories):
if len(nps_categories) == 0:
return pd.DataFrame()
nps_categories_df = pd.DataFrame(
nps_categories,
dtype='object'
)
nps_categories_df['pull_datetime'] = pd.to_datetime(nps_categories_df['pull_datetime'])
nps_categories_df['nps_category_created_datetime_at'] = pd.to_datetime(nps_categories_df['nps_category_created_datetime_at'])
nps_categories_df = nps_categories_df.astype({
'nps_category_id_at': 'string',
'nps_category': 'string',
'nps_display_name_english': 'string',
'nps_display_name_spanish': 'string'
})
nps_categories_df.set_index('nps_category_id_at', inplace=True)
return nps_categories_df
def convert_boolean_categories_to_df(boolean_categories):
if len(boolean_categories) == 0:
return pd.DataFrame()
boolean_categories_df = pd.DataFrame(
boolean_categories,
dtype='object'
)
boolean_categories_df['pull_datetime'] = pd.to_datetime(boolean_categories_df['pull_datetime'])
boolean_categories_df['boolean_category_created_datetime_at'] = pd.to_datetime(boolean_categories_df['boolean_category_created_datetime_at'])
boolean_categories_df = boolean_categories_df.astype({
'boolean_category_id_at': 'string',
'boolean_category': 'bool',
'boolean_display_name_english': 'string',
'boolean_display_name_spanish': 'string'
})
boolean_categories_df.set_index('boolean_category_id_at', inplace=True)
return boolean_categories_df
def convert_ethnicity_mapping_to_df(ethnicity_mapping):
if len(ethnicity_mapping) == 0:
return pd.DataFrame()
ethnicity_mapping_df = pd.DataFrame(
ethnicity_mapping,
dtype='object'
)
ethnicity_mapping_df['pull_datetime'] = pd.to_datetime(ethnicity_mapping_df['pull_datetime'])
ethnicity_mapping_df['ethnicity_mapping_created_datetime_at'] = pd.to_datetime(ethnicity_mapping_df['ethnicity_mapping_created_datetime_at'])
ethnicity_mapping_df['ethnicity_category_id_at'] = ethnicity_mapping_df['ethnicity_category_id_at'].apply(wf_core_data.utils.to_singleton)
ethnicity_mapping_df = ethnicity_mapping_df.astype({
'ethnicity_mapping_id_at': 'string',
'ethnicity_response': 'string',
'ethnicity_category_id_at': 'string'
})
ethnicity_mapping_df.set_index('ethnicity_response', inplace=True)
return ethnicity_mapping_df
def convert_gender_mapping_to_df(gender_mapping):
if len(gender_mapping) == 0:
return pd.DataFrame()
gender_mapping_df = pd.DataFrame(
gender_mapping,
dtype='object'
)
gender_mapping_df['pull_datetime'] = pd.to_datetime(gender_mapping_df['pull_datetime'])
gender_mapping_df['gender_mapping_created_datetime_at'] = pd.to_datetime(gender_mapping_df['gender_mapping_created_datetime_at'])
gender_mapping_df['gender_category_id_at'] = gender_mapping_df['gender_category_id_at'].apply(wf_core_data.utils.to_singleton)
gender_mapping_df = gender_mapping_df.astype({
'gender_mapping_id_at': 'string',
'gender_response': 'string',
'gender_category_id_at': 'string'
})
gender_mapping_df.set_index('gender_response', inplace=True)
return gender_mapping_df
def convert_household_income_mapping_to_df(household_income_mapping):
if len(household_income_mapping) == 0:
return pd.DataFrame()
household_income_mapping_df = pd.DataFrame(
household_income_mapping,
dtype='object'
)
household_income_mapping_df['pull_datetime'] = pd.to_datetime(household_income_mapping_df['pull_datetime'])
household_income_mapping_df['household_income_mapping_created_datetime_at'] = pd.to_datetime(household_income_mapping_df['household_income_mapping_created_datetime_at'])
household_income_mapping_df['household_income_category_id_at'] = household_income_mapping_df['household_income_category_id_at'].apply(wf_core_data.utils.to_singleton)
household_income_mapping_df = household_income_mapping_df.astype({
'household_income_mapping_id_at': 'string',
'household_income_response': 'string',
'household_income_category_id_at': 'string'
})
household_income_mapping_df.set_index('household_income_response', inplace=True)
return household_income_mapping_df
def convert_nps_mapping_to_df(nps_mapping):
if len(nps_mapping) == 0:
return pd.DataFrame()
nps_mapping_df = pd.DataFrame(
nps_mapping,
dtype='object'
)
nps_mapping_df['pull_datetime'] = pd.to_datetime(nps_mapping_df['pull_datetime'])
nps_mapping_df['nps_mapping_created_datetime_at'] = pd.to_datetime(nps_mapping_df['nps_mapping_created_datetime_at'])
nps_mapping_df['nps_category_id_at'] = nps_mapping_df['nps_category_id_at'].apply(wf_core_data.utils.to_singleton)
nps_mapping_df = nps_mapping_df.astype({
'nps_mapping_id_at': 'string',
'nps_response': 'int',
'nps_category_id_at': 'string'
})
nps_mapping_df.set_index('nps_response', inplace=True)
return nps_mapping_df
def convert_boolean_mapping_to_df(boolean_mapping):
if len(boolean_mapping) == 0:
return pd.DataFrame()
boolean_mapping_df = pd.DataFrame(
boolean_mapping,
dtype='object'
)
boolean_mapping_df['pull_datetime'] = pd.to_datetime(boolean_mapping_df['pull_datetime'])
boolean_mapping_df['boolean_mapping_created_datetime_at'] = | pd.to_datetime(boolean_mapping_df['boolean_mapping_created_datetime_at']) | pandas.to_datetime |
#########################
## ##
## <NAME> ##
## May 10, 2021 ##
## ##
#########################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.tools.tools import add_constant
from scipy.optimize import least_squares
from scipy.stats import norm, t
alpha = 0.05
dat = pd.read_csv('ChickWeight.csv')
dat = dat.drop(dat.columns[0], axis=1)
dat = dat.drop('Chick', axis=1)
dat['Diet'] = dat['Diet'].astype('category')
dat_dummies = pd.get_dummies(dat['Diet'])
dat_dummies = dat_dummies.rename(columns={1:'Diet1', 2:'Diet2', 3:'Diet3', 4:'Diet4'})
dat = pd.concat([dat, dat_dummies], axis=1)
dat
y = dat['weight']
X = dat[['Time', 'Diet1', 'Diet2', 'Diet3', 'Diet4']]
n = len(y)
p = 12
# Let's stabilize the variance
dat_var = dat[['weight','Diet','Time']].groupby(['Diet','Time']).var().reset_index()
dat_var = dat_var.rename(columns={'weight':'var'})
dat_var['log_var'] = np.log(dat_var['var'])
dat_var
dat_var = | pd.merge(dat, dat_var, how='left', on=['Diet','Time']) | pandas.merge |
from keras.models import Sequential
from keras.optimizers import SGD,adam
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from sklearn.metrics import log_loss
import numpy as np
import json
import matplotlib.pyplot as plt
import pandas as pd
from natsort import natsorted
import glob
import pathlib
from keras.callbacks import EarlyStopping,ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras import regularizers
import tensorflow as tf
import configparser
def vgg16_model(img_rows, img_cols, channel=1, num_classes=None):
ratio = 0.5
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(img_rows, img_cols, channel)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu',
kernel_initializer='he_normal',
bias_initializer='zeros'))
model.add(Dropout(ratio))
model.add(Dense(4096, activation='relu',
kernel_initializer='he_normal',
bias_initializer='zeros'))
model.add(Dropout(ratio))
model.add(Dense(1000, activation='relu',
kernel_initializer='he_normal',
bias_initializer='zeros'))
model.add(Dropout(ratio))
model.add(Dense(1, activation='sigmoid',
kernel_initializer='he_normal',
bias_initializer='zeros'))
# sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(optimizer=sgd, loss=custom_loss)
model.compile(optimizer='adam', loss=custom_loss)
return model
def custom_loss(y_true, y_pred):
normalize_num = 80000000
y_true = y_true * normalize_num
y_pred = y_pred * normalize_num
out = tf.square(tf.log(y_true + 1) - tf.log(y_pred + 1))
return out
def plot_history_loss(history,axL):
axL.plot(history['loss'],label="loss for training")
axL.plot(history['val_loss'],label="loss for validation")
axL.set_title('model loss')
axL.set_xlabel('epoch')
axL.set_ylabel('loss')
axL.legend(loc='upper right')
def calc_RMSLE(Y_train, Y_pred):
RMSLE = np.square(np.log(Y_train + 1) - np.log(Y_pred + 1))
return RMSLE
def batch_iter(data, labels, batch_size, shuffle=True):
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
def data_generator():
data_size = len(data)
while True:
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
shuffled_labels = labels[shuffle_indices]
else:
shuffled_data = data
shuffled_labels = labels
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
X = shuffled_data[start_index: end_index]
y = shuffled_labels[start_index: end_index]
yield X, y
return num_batches_per_epoch, data_generator()
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
if __name__ == '__main__':
channel = 3
num_classes = 1
# SETTING
ini = configparser.ConfigParser()
ini.read('./config.ini', 'UTF-8')
image_size = int(ini['common']['image_size'])
img_rows, img_cols = image_size, image_size
batch_size = int(ini['Train']['batch_size'])
nb_epoch = int(ini['Train']['nb_epoch'])
normalize_num = int(ini['Train']['normalize_num'])
dir_prep = str(ini['Train']['dir_prep'])
dir_result = str(ini['Train']['dir_result_VGG-like'])
dir_data = str(ini['Train']['dir_data'])
dir_tflog = str(ini['Train']['dir_tflog'])
dir_eval_image = str(ini['common']['dir_ori_data']) + str(ini['common']['dir_eval_image'])
# データのロード
X_train_temp = np.load(dir_prep + 'train_images.npy', allow_pickle=True)/255
Y_train_temp = np.load(dir_prep + 'train_anno.npy', allow_pickle=True)/normalize_num
X_valid_temp = np.load(dir_prep + 'test_images.npy', allow_pickle=True)/255
Y_valid_temp = np.load(dir_prep + 'test_anno.npy', allow_pickle=True)/normalize_num
# データのシャッフル
all_data = np.concatenate([X_train_temp, X_valid_temp], axis=0)
all_label = np.concatenate([Y_train_temp,Y_valid_temp], axis=0)
num_train = X_train_temp.shape[0]
num_valid = X_valid_temp.shape[0]
num_all = num_train + num_valid
print(num_train, num_valid, num_all, all_data.shape, all_label.shape)
print(Y_train_temp.shape, Y_valid_temp.shape)
del X_train_temp,Y_train_temp,X_valid_temp,Y_valid_temp
id_all = np.random.choice(num_all, num_all, replace=False)
id_train = id_all[:num_train]
id_valid = id_all[num_train:]
X_train = all_data[id_train]
Y_train = all_label[id_train]
X_valid = all_data[id_valid]
Y_valid = all_label[id_valid]
X_eval = np.load(dir_prep + 'eval_images.npy', allow_pickle=True)/255
print("!!!!",X_train.shape,Y_train.shape,X_valid.shape,Y_valid.shape,X_eval.shape)
print("!!!!",all_data[id_train].shape)
# モデルのロード
model = vgg16_model(img_rows, img_cols, channel, num_classes)
# モデルの学習
es_cb = EarlyStopping(monitor='val_loss', patience=30, verbose=1, mode='min')
cp = ModelCheckpoint(dir_result + "best.hdf5", monitor="val_loss", verbose=1,
save_best_only=True, save_weights_only=True)
tb_cb = TensorBoard(log_dir=dir_tflog, histogram_freq=0)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5)
history = model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
callbacks=[cp, es_cb, reduce_lr, tb_cb]
)
train_steps, train_batches = batch_iter(X_train, Y_train, batch_size)
valid_steps, valid_batches = batch_iter(X_valid, Y_valid, batch_size)
model.fit_generator(train_batches, train_steps,
epochs=nb_epoch,
validation_data=valid_batches,
validation_steps=valid_steps,
callbacks=[cp, es_cb, reduce_lr, tb_cb]
)
model.save_weights(dir_result + 'param.hdf5')
with open(dir_result + 'history.json', 'w') as f:
json.dump(history.history, f, cls = MyEncoder)
#ログの書き出し
f = open(dir_result + 'history.json', 'r')
history = json.load(f)
f.close()
fig, (axL) = plt.subplots(ncols=1, figsize=(10,4))
plot_history_loss(history, axL)
fig.savefig(dir_result + 'loss.png')
plt.close()
# 学習結果のロード
model.load_weights(dir_result + "best.hdf5")
# trainデータの出力
Y_train = Y_train * normalize_num
train_pred = model.predict(X_train, batch_size=batch_size, verbose=1).reshape(-1) * normalize_num
RMSLE_train_cal = calc_RMSLE(Y_train, train_pred)
train = np.stack([Y_train, train_pred, RMSLE_train_cal])
df_train = pd.DataFrame(train.T, columns=['TRUE', 'MODEL', 'RMSLE_cal'])
df_train.to_csv(dir_result + 'train.csv')
# valデータの出力
Y_valid = Y_valid * normalize_num
valids_pred = model.predict(X_valid, batch_size=batch_size, verbose=1).reshape(-1) * normalize_num
RMSLE_cal = calc_RMSLE(Y_valid, valids_pred)
valids = np.stack([Y_valid, valids_pred, RMSLE_cal])
df_valids = pd.DataFrame(valids.T, columns=['TRUE', 'MODEL', 'RMSLE_cal'])
df_valids.to_csv(dir_result + 'valids.csv')
RMSLE = np.sum(df_valids['RMSLE_cal'].values)/len(df_valids)
np.savetxt(dir_result + 'RMSLE.txt', RMSLE.reshape(-1))
print("Val RMSLE : ", RMSLE)
# evalデータの出力
files_eval_images = natsorted(glob.glob(dir_eval_image + "*.jpg"))
file_name=[]
i=0
for file in files_eval_images:
file_name.append(file.replace(dir_eval_image, ""))
i=i+1
predictions = model.predict(X_eval, batch_size=batch_size, verbose=1).reshape(-1) * normalize_num
predictions = (predictions).astype(np.int32)
predictions_arr = np.stack([np.array(file_name), predictions], 1)
df_predictions = | pd.DataFrame(predictions_arr) | pandas.DataFrame |
import numpy as np
import pandas as pd
from queue import Queue
from event import EventHandler
from abc import ABCMeta, abstractmethod
from math import floor
from event import FillEvent, OrderEvent, MarketEvent, SignalEvent
from threading import Thread
from datetime import datetime
class NaivePortfolio(EventHandler):
"""
Simplest strategy, for benchmarking and testing
event - Market event
"""
def __init__(self, symbols, initial_capital=1000):
super(NaivePortfolio,self).__init__()
self.portfolio_queue = Queue()
self.central_queue = None
self.symbol_list = symbols
self.prices = {}
self.start_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
self.initial_capital = initial_capital
self.all_positions = self.construct_all_positions()
self.current_positions = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
self.all_holdings = self.construct_all_holdings()
self.current_holdings = self.construct_current_holdings()
def eventhandler(self, event):
if event.type == "signal":
self.generate_naive_order(event)
elif event.type == "fill":
self.update_fill(event)
elif event.type == "market":
self.update_prices(event)
self.update_timeindex(event)
# self.create_equity_curve_dataframe()
def update_prices(self, event):
prices = {}
for s in event.symbols:
self.prices[s] = (event.orderbook[s]['bid']+event.orderbook[s]['ask']) / 2
def construct_all_positions(self):
"""
Constructs the positions list using the start_date
to determine when the time index will begin.
"""
d = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
d['datetime'] = self.start_date
return [d]
def construct_all_holdings(self):
"""
Constructs the holdings list using the start_date
to determine when the time index will begin.
"""
d = dict( (k,v) for k, v in [(s, 0.0) for s in self.symbol_list] )
d['datetime'] = self.start_date
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return [d]
def construct_current_holdings(self):
"""
This constructs the dictionary which will hold the instantaneous
value of the portfolio across all symbols.
"""
d = dict( (k,v) for k, v in [(s, 0.0) for s in self.symbol_list] )
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return d
def update_timeindex(self, event):
"""
Adds a new record to the positions matrix for the current
market data bar. This reflects the PREVIOUS bar, i.e. all
current market data at this stage is known (OLHCVI).
Makes use of a MarketEvent from the events queue.
"""
# Update positions
dp = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
dp['datetime'] = event.timestamp
for s in self.symbol_list:
dp[s] = self.current_positions[s]
# Append the current positions
self.all_positions.append(dp)
# Update holdings
dh = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
dh['datetime'] = event.timestamp
dh['cash'] = self.current_holdings['cash']
dh['commission'] = self.current_holdings['commission']
dh['total'] = self.current_holdings['cash']
for s in self.symbol_list:
# Approximation to the real value
market_value = self.current_positions[s] * self.prices[s]
dh[s] = market_value
dh['total'] += market_value
# Append the current holdings
self.all_holdings.append(dh)
def update_positions_from_fill(self, fill):
"""
Takes a FilltEvent object and updates the position matrix
to reflect the new position.
Parameters:
fill - The FillEvent object to update the positions with.
"""
# Check whether the fill is a buy or sell
fill_dir = 0
if fill.direction == 'BUY':
fill_dir = 1
if fill.direction == 'SELL':
fill_dir = -1
# Update positions list with new quantities
self.current_positions[fill.symbol] += fill_dir*fill.quantity
def update_holdings_from_fill(self, fill):
"""
Takes a FillEvent object and updates the holdings matrix
to reflect the holdings value.
Parameters:
fill - The FillEvent object to update the holdings with.
"""
# Check whether the fill is a buy or sell
fill_dir = 0
if fill.direction == 'BUY':
fill_dir = 1
if fill.direction == 'SELL':
fill_dir = -1
# Update holdings list with new quantities
fill_cost = self.prices[fill.symbol] # Close price
cost = fill_dir * fill_cost * fill.quantity
self.current_holdings[fill.symbol] += cost
self.current_holdings['commission'] += fill.commission
self.current_holdings['cash'] -= (cost + fill.commission)
self.current_holdings['total'] -= (cost + fill.commission)
def update_fill(self, event):
"""
Updates the portfolio current positions and holdings
from a FillEvent.
"""
self.update_positions_from_fill(event)
self.update_holdings_from_fill(event)
def generate_naive_order(self, signal):
"""
Simply transacts an OrderEvent object as a constant quantity
sizing of the signal object, without risk management or
position sizing considerations.
Parameters:
signal - The SignalEvent signal information.
"""
order = None
symbol = signal.symbol
direction = signal.signal
mkt_quantity = 100
cur_quantity = self.current_positions[symbol]
order_type = 'MKT'
if direction == 'LONG' and cur_quantity == 0:
order = OrderEvent(symbol, order_type, mkt_quantity, 'BUY')
if direction == 'SHORT' and cur_quantity == 0:
order = OrderEvent(symbol, order_type, mkt_quantity, 'SELL')
if direction == 'EXIT' and cur_quantity > 0:
order = OrderEvent(symbol, order_type, abs(cur_quantity), 'SELL')
if direction == 'EXIT' and cur_quantity < 0:
order = OrderEvent(symbol, order_type, abs(cur_quantity), 'BUY')
self.central_queue.put(order)
def create_equity_curve_dataframe(self):
"""
Creates a pandas DataFrame from the all_holdings
list of dictionaries.
"""
curve = | pd.DataFrame(self.all_holdings) | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
import pandas.core.ops as ops
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])],
ids=["add", "mul"],
)
def test_add_mul(dtype, opname, exp):
a = pd.array([0, 1, None, 3, 4], dtype=dtype)
b = pd.array([1, 2, 3, None, 5], dtype=dtype)
# array / array
expected = pd.array(exp, dtype=dtype)
op = getattr(operator, opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
op = getattr(ops, "r" + opname)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_sub(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a - b
expected = pd.array([1, 1, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_div(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a / b
expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398, GH#22793
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = FloatingArray(
np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"),
np.array([False, False, False, True]),
)
if negative:
expected *= -1
tm.assert_extension_array_equal(result, expected)
def test_floordiv(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a // b
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
expected = pd.array([0, 2, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_mod(dtype):
a = pd.array([1, 2, 3, None, 5], dtype=dtype)
b = pd.array([0, 1, None, 3, 4], dtype=dtype)
result = a % b
expected = pd.array([0, 0, None, None, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar():
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a**0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a**np.nan
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"),
np.array([False, False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0**a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1**a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA**a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan**a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype="float64"),
np.array([False, False, True, False]),
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array():
a = | pd.array([0, 0, 0, 1, 1, 1, None, None, None]) | pandas.array |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
| pd.Timestamp('20130101 09:00:06') | pandas.Timestamp |
import sys
from multiprocessing import Pool
import os
import pandas as pd
import pyNetLogo
from SALib.sample import saltelli
def initializer(modelfile):
global netlogo
netlogo = pyNetLogo.NetLogoLink(netlogo_home='NetLogo',
netlogo_version='6.2',
gui=False)
netlogo.load_model(modelfile)
def run_simulation(experiment):
for key, value in experiment.items():
netlogo.command(f'set {key} {value}')
netlogo.command('setup')
# fixed parameters:
netlogo.command('set max-timesteps 50')
netlogo.command('set number-of-startind 40')
netlogo.command('set cropland-movement-cost 5')
netlogo.command('set woodland-movement-cost 1')
netlogo.command('set angle-for-viewing-ponds-and-woodland 140')
# reporter:
step_reporter = ['count newts',
'occupied-ponds']
# start with corridors:
netlogo.command('set current-scenario "corridors"')
netlogo.repeat_command('go', 40)
out_corridor = netlogo.repeat_report(step_reporter, 10, go='go')
# then buffer:
netlogo.repeat_command('go', 40)
out_buffer = netlogo.repeat_report(step_reporter, 10, go='go')
out = [netlogo.report('newts-buffer'),
netlogo.report('newts-corridor'),
netlogo.report('occupied-ponds-buffer'),
netlogo.report('occupied-ponds-corridor'),
out_buffer['count newts'].values.mean(),
out_buffer['occupied-ponds'].values.mean(),
out_corridor['count newts'].values.mean(),
out_corridor['occupied-ponds'].values.mean()]
results = pd.Series(out,
index=['newts_buffer', 'newts_corridor',
'ponds_buffer', 'ponds_corridor',
'mean_newts_buffer', 'mean_ponds_buffer',
'mean_newts_corridor', 'mean_ponds_corridor'])
#print(results)
return results
def generate_samples(n):
problem = {
'num_vars': 7,
'names': [
#'number-of-startind', # 15
'capacity', # 20
'mean-juvenile-mortality-prob', # 0.5
'mean-adult-mortality-prob', #0.2
#'cropland-movement-cost', #5
#'woodland-movement-cost', #1
#'angle-for-viewing-ponds-and-woodland', #140
'mortality-decrease-with-buffer', #0.1
'distance-for-viewing-ponds-and-woodland', #2
'movement-energy', #700
'mean-number-of-female-offspring' #5
],
'bounds': [
#[5, 80],
[10, 40],
[0.4, 0.7],
[0.1, 0.3],
#[4, 6],
#[0.5, 2],
#[70, 180],
[0.01, 0.2],
[0.5, 3],
[200, 1000],
[4, 6]
]
}
param_values = saltelli.sample(problem,
n,
calc_second_order=True)
df = pd.DataFrame(param_values,
columns=problem['names'])
return df
if __name__ == '__main__':
modelfile = 'model/crested_newt.nlogo'
#experiments = generate_samples(1024)
#experiments.to_csv('parameter_new.csv')
ind = [i * 256 for i in range(0, 64 + 1)]
parameter_df = pd.read_csv('parameter_new.csv', index_col=0)
#print(len(parameter_df))
#print(ind)
#print(len(ind))
#sys.exit(0)
for i in range(29, 64):
print(ind[i], ind[i+1])
experiments = parameter_df.iloc[ind[i]:ind[i+1]]
results = []
with Pool(initializer=initializer, initargs=(modelfile,), processes=50) as executor:
for entry in executor.map(run_simulation, experiments.to_dict('records')):
results.append(entry)
print('yap!')
results = | pd.DataFrame(results) | pandas.DataFrame |
import mysql.connector, pandas, re
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
import malaya
class DataPreparation():
def __init__(self):
pass
def load_data_from_database(self, sql):
mydb = mysql.connector.connect(host='localhost', database='news_dataset', user='root', password='')
data_db = pandas.read_sql(sql, con=mydb)
return data_db
def clean_data(self, data, loop):
for i in range(loop):
data.loc[i] = ' '.join(data.loc[i].split('-'))
data.loc[i] = re.sub(r'[^\w\s]', ' ', data.loc[i].lower())
data.loc[i] = malaya.stem.sastrawi(data.loc[i])
for word in data.loc[i]:
if word.isdigit():
data.loc[i].replace(word, malaya.num2word.to_cardinal(int(word)))
def create_data_label(self, size):
labels = []
for i in range(size):
labels.append('Fake')
labels.append('Real')
return labels
def encode_label(self, label, num_class):
# label encode the target variable
encoder = LabelEncoder()
label = encoder.fit_transform(label)
encoded_label = to_categorical(label, num_classes=num_class)
return encoded_label
def prepare_data_frame(self):
# load data from database
data = self.load_data_from_database('SELECT fake_news, real_news from news_table2')
# merge fake_news and real_news into single dataframe alternately
data = pandas.concat([data.fake_news, data.real_news]).sort_index(kind='merge')
# reset index bcoz of alternate merging process before
data = data.reset_index(drop=True)
# generate label for data in dataDF
label = self.create_data_label(size=1820)
# prepare dataframe with news and label
dataDF = | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red3 Button
if self.cb_red3.text().split()[0] != 'None':
self.cb_red_plot_3.clear()
self.cb_red_plot_3.setTitle(red_range['describe'].iloc[2])
self.cb_red_plot_3.addLegend(offset=(-30, 20))
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red4 Button
if self.cb_red4.text().split()[0] != 'None':
self.cb_red_plot_4.clear()
self.cb_red_plot_4.setTitle(red_range['describe'].iloc[3])
self.cb_red_plot_4.addLegend(offset=(-30, 20))
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange1 Button
if self.cb_orange1.text().split()[0] != 'None':
self.cb_orange_plot_1.clear()
self.cb_orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.cb_orange_plot_1.addLegend(offset=(-30, 20))
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange2 Button
if self.cb_orange2.text().split()[0] != 'None':
self.cb_orange_plot_2.clear()
self.cb_orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.cb_orange_plot_2.addLegend(offset=(-30, 20))
self.cb_orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange3 Button
if self.cb_orange3.text().split()[0] != 'None':
self.cb_orange_plot_3.clear()
self.cb_orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.cb_orange_plot_3.addLegend(offset=(-30, 20))
self.cb_orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Orange4 Button
if self.cb_orange4.text().split()[0] != 'None':
self.cb_orange_plot_4.clear()
self.cb_orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.cb_orange_plot_4.addLegend(offset=(-30, 20))
self.cb_orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_orange_plot_4.plot(x=symptom_db[0], y= | pd.DataFrame(compared_db) | pandas.DataFrame |
# Copyright 2016 Feather Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
from feather.compat import guid
from feather import FeatherReader, FeatherWriter
import feather
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(feather.FeatherError):
FeatherReader('test_invalid_file')
def _check_pandas_roundtrip(self, df, expected=None):
path = random_path()
self.test_files.append(path)
feather.write_dataframe(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = feather.read_dataframe(path)
if expected is None:
expected = df
assert_frame_equal(result, expected)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
feather.write_dataframe(df, path)
reader = feather.FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
feather.write_dataframe(df, path)
reader = feather.FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = feather.read_dataframe(path)
assert_frame_equal(result, ex_frame)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(info.min,
min(info.max, np.iinfo('i8').max),
size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = feather.read_dataframe(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = feather.read_dataframe(path)
assert_frame_equal(result, ex_frame)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
self._check_pandas_roundtrip(df)
def test_strings(self):
repeats = 1000
values = [b'foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
values = ['foo', None, u'bar', 'qux', None]
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected)
def test_nan_as_null(self):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = | pd.DataFrame({'strings': values}) | pandas.DataFrame |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', update=True)
assert s.root.stale == True
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
# Check that rebalance with update=False
# does not mark the node as stale
s.rebalance(0.6, 'c1', update=False)
assert s.root.stale == False
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via parent method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
parent = Strategy('m', [do_nothing], [child1, 'a'])
child1 = parent['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': pd.Series(data=1, index=dts, name='a'),
'b': pd.Series(data=2, index=dts, name='b'),
'c': | pd.Series(data=3, index=dts, name='c') | pandas.Series |
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.flag = flag
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
self.df_raw = df_raw
# lấy lengt() của dataset chia tỉ lệ 70% 20%
num_train = int(len(df_raw)*0.15)
num_test = int(len(df_raw)*0.80)
# vali nghĩa là
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
border2s = [num_train, num_train+num_vali, len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
# Cắt lấy dòng tiêu đề và loại bỏ cột date. Dữ liệu bên trong: Index(['open', 'close', 'TT'], dtype='object')
cols_data = df_raw.columns[1:]
# lọc loại bỏ cột date
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
# dữ liệu dùng để train
train_data = df_data[border1s[0]:border2s[0]]
# tính mean và sdt chuẩn bị cho thu nhỏ dữ liệu
self.scaler.fit(train_data.values)
# thu nhỏ dữ liệu
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = | pd.to_datetime(df_stamp.date) | pandas.to_datetime |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
import datetime
from pandas.api.types import is_numeric_dtype
import timeserio.ini as ini
from timeserio.data.mock import mock_fit_data
from timeserio.preprocessing import PandasDateTimeFeaturizer
from timeserio.preprocessing.datetime import (
get_fractional_day_from_series, get_fractional_hour_from_series,
get_fractional_year_from_series, truncate_series,
get_zero_indexed_month_from_series, get_time_is_in_interval_from_series,
get_is_holiday_from_series
)
datetime_column = ini.Columns.datetime
seq_column = f'seq_{ini.Columns.datetime}'
usage_column = ini.Columns.target
@pytest.fixture
def df():
return mock_fit_data(start_date=datetime.datetime(2017, 1, 1, 1, 0))
@pytest.fixture
def featurizer():
return PandasDateTimeFeaturizer()
def test_get_fractional_hour_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='0.5H', periods=48)
)
fractionalhour = get_fractional_hour_from_series(series)
expected = pd.Series(np.linspace(0, 23.5, 48))
pdt.assert_series_equal(fractionalhour, expected)
def test_get_fractional_day_from_series():
series = pd.Series(pd.date_range(start='2000-01-01', freq='6H', periods=5))
fractional_day = get_fractional_day_from_series(series)
expected = pd.Series([0, 0.25, 0.5, 0.75, 0])
pdt.assert_series_equal(fractional_day, expected)
def test_get_fractional_year_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='31D', periods=5)
)
fractional_year = get_fractional_year_from_series(series)
expected = pd.Series([0, 1, 2, 3, 4]) * 31 / 365.
pdt.assert_series_equal(fractional_year, expected)
def test_get_is_holiday_from_series():
series = pd.Series(pd.date_range(start='2000-01-01', freq='D', periods=5))
is_holiday = get_is_holiday_from_series(series)
expected = pd.Series([1, 1, 1, 1, 0])
pdt.assert_series_equal(is_holiday, expected)
@pytest.mark.parametrize(
"country, expected",
[("England", [1, 0, 0, 1]), ("Scotland", [1, 1, 1, 0])]
)
def test_get_is_holiday_from_series_with_country(country, expected):
dates = ["2020-01-01", "2020-01-02", "2020-08-03", "2020-08-31"]
series = pd.to_datetime(pd.Series(dates))
is_holiday = get_is_holiday_from_series(series, country=country)
pdt.assert_series_equal(is_holiday, pd.Series(expected))
def test_get_zero_indexed_month_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='1M', periods=12)
)
month0 = get_zero_indexed_month_from_series(series)
expected = pd.Series(range(12))
pdt.assert_series_equal(month0, expected)
@pytest.mark.parametrize(
'series_data, truncation_period, expected_data',
[
([pd.Timestamp(2019, 1, 1, 1, 9)], 'H', [pd.Timestamp(2019, 1, 1, 1)]),
([pd.Timestamp(2019, 1, 2, 1)], 'd', [pd.Timestamp(2019, 1, 2)]),
([pd.Timestamp(2019, 1, 1)], 'W', [pd.Timestamp(2018, 12, 31)]),
([pd.Timestamp(2019, 1, 1)], 'W-FRI', [pd.Timestamp(2018, 12, 29)]),
([pd.Timestamp(2019, 1, 1)], 'W-TUE', [pd.Timestamp(2018, 12, 26)]),
([pd.Timestamp(2019, 2, 8)], 'm', [pd.Timestamp(2019, 2, 1)]),
([pd.Timestamp(2019, 3, 4)], 'Y', [pd.Timestamp(2019, 1, 1)]),
(
[pd.Timestamp(2019, 1, 1, 1, 30), pd.Timestamp(2019, 1, 1, 2, 30)],
'H',
[pd.Timestamp(2019, 1, 1, 1), pd.Timestamp(2019, 1, 1, 2)],
),
]
)
def test_truncate_series(series_data, truncation_period, expected_data):
out = truncate_series(pd.Series(series_data), truncation_period)
expected = pd.Series(expected_data)
| pdt.assert_series_equal(out, expected) | pandas.testing.assert_series_equal |
# coding: utf-8
# In[1]:
"""Running basic code:
Importing packages, setting working directory,
printing out date"""
from IPython.display import HTML
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from IPython.display import YouTubeVideo
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
from matplotlib.colors import ListedColormap
import networkx as nx
import urllib
import os as os
import pandas as pd
import numpy as np
import itertools
import networkx as nx
from bokeh.io import show, output_file
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, BoxZoomTool, ResetTool, PanTool, WheelZoomTool
import bokeh.models.graphs as graphs
#from bokeh.model.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral4
plt.rcParams['figure.figsize'] = (16, 9)
plt.rcParams['font.size'] = 9
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['axes.labelsize'] = plt.rcParams['font.size']
plt.rcParams['axes.titlesize'] = 1.5*plt.rcParams['font.size']
plt.rcParams['legend.fontsize'] = plt.rcParams['font.size']
plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['savefig.dpi'] = 600
plt.rcParams['xtick.major.size'] = 3
plt.rcParams['xtick.minor.size'] = 3
plt.rcParams['xtick.major.width'] = 1
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.major.size'] = 3
plt.rcParams['ytick.minor.size'] = 3
plt.rcParams['ytick.major.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
plt.rcParams['legend.frameon'] = False
plt.rcParams['legend.loc'] = 'center left'
plt.rcParams['axes.linewidth'] = 1
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
plt.gca().xaxis.set_ticks_position('bottom')
plt.gca().yaxis.set_ticks_position('left')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
plt.gca().xaxis.set_ticks_position('bottom')
plt.gca().yaxis.set_ticks_position('left')
sns.set_style('white')
plt.close()
#############################################################################################
#############################################################################################
def plot_unipartite_network (title,network, network_name, layout_func):
"""Creating positions of the nodes"""
if layout_func == 'fruchterman_reingold':
layout = nx.fruchterman_reingold_layout(network, scale=2 )#k = 0.05, iterations=500
elif layout_func =='spring':
layout = nx.spring_layout(network, k = 0.05, scale=2)
elif layout_func =='circular':
layout = nx.circular_layout(network, scale=1, center=None, dim=2)
elif layout_func == 'kamada':
layout = nx.kamada_kawai_layout(network, scale=1, center=None, dim=2)
elif layout_func == 'spectral':
layout = nx.spectral_layout(network, scale=1, center=None, dim=2)
else:
layout = nx.fruchterman_reingold_layout(network, scale=2 )#k = 0.05, iterations=500
from bokeh.models import ColumnDataSource
from bokeh.plotting import show, figure , output_file
from bokeh.io import output_notebook
from bokeh.models import HoverTool
output_notebook()
nodes, nodes_coordinates = zip(*layout.items())
nodes_xs, nodes_ys = list(zip(*nodes_coordinates))
#nodes_source = ColumnDataSource(dict(x=nodes_xs, y=nodes_ys,
# name=nodes,))
node_data = dict(x=nodes_xs, y=nodes_ys, name=nodes)
nd = pd.DataFrame.from_dict(node_data).dropna()
#hostc = '#377eb8'
nodes_source = ColumnDataSource(dict(x=nd.x.tolist(), y=nd.y.tolist(),
name = nd.name.tolist()))
"""
Generate the figure
1. Create tools
2. Set plot size and tools
"""
#hover = HoverTool(tooltips=[('', '@name')])
#hover = HoverTool(names=["name"])
plot = figure(title=title,
plot_width=800, plot_height=800,
tools=['pan','wheel_zoom', 'reset','box_zoom','tap' ])
"""
plot main circles
1. Plot only nodes according to their positions
"""
r_circles = plot.circle('x', 'y', source=nodes_source, size=10,
color= '#377eb8', alpha=0.5, level = 'overlay',name='name')
"""
Function
Get data for generation of edges
"""
def get_edges_specs(_network, _layout):
c = dict(xs=[], ys=[], alphas=[])
#print d
weights = [d['weight'] for u, v, d in _network.edges(data=True)]
max_weight = max(weights)
calc_alpha = lambda h: 0.1 + 0.5 * (h / max_weight)
# example: { ..., ('user47', 'da_bjoerni', {'weight': 3}), ... }
for u, v, data in _network.edges(data=True):
c['xs'].append([_layout[u][0], _layout[v][0]])
c['ys'].append([_layout[u][1], _layout[v][1]])
c['alphas'].append(calc_alpha(data['weight']))
return c
"""
get the data for edges
"""
lines_source = ColumnDataSource(get_edges_specs(network, layout))
"""
plot edge lines
"""
r_lines = plot.multi_line('xs', 'ys', line_width=1.5,
alpha=1 , color='#b3b6b7',
source=lines_source, )#name = 'edge'
"""Centrality """
centrality = nx.algorithms.centrality.betweenness_centrality(network)
""" first element are nodes again """
_, nodes_centrality = zip(*centrality.items())
max_centraliy = max(nodes_centrality)
nodes_source.add([7 + 15 * t / max_centraliy
for t in nodes_centrality],
'centrality')
"""Communities"""
from community import community_louvain
partition = community_louvain.best_partition(network)
p_, nodes_community = zip(*partition.items())
nodes_source.add(nodes_community, 'community')
community_colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628',
'#b3cde3','#ccebc5','#decbe4','#fed9a6','#ffffcc','#e5d8bd','#fddaec',
'#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d',
'#666666']
nodes_source.add([community_colors[t % len(community_colors)]
for t in nodes_community],'community_color')
"""Host Type colour"""
"""Update the plot with communities and Centrality"""
r_circles.glyph.size = 'centrality'
r_circles.glyph.fill_color = 'community_color'
hover = HoverTool(tooltips=[('', '@name')], renderers=[r_circles])
plot.add_tools(hover)
output_file(network_name+"_unipartite.html")
show(plot)
#############################################################################################
#############################################################################################
def construct_bipartite_host_virus_network(dataframe, network_name, plot= False, filter_file= False,
taxonomic_filter = None):
#if data_filename:
# """Importing all the data
# data: """
# if ".pickle" in data_filename:
# data = pd.read_pickle(data_filename,)
# else:
# data = pd.read_csv(data_filename, encoding='ISO-8859-1', low_memory=False)
data = dataframe
""" filter data according to viral family """
if taxonomic_filter:
data = data[data.viral_family == taxonomic_filter]
"""hosttaxa: creating dataframe of unique hosts and their characteristics to generate nodes"""
hosttaxa = data.groupby(['ScientificName']).size().reset_index().rename(columns={0:'count'})
"""vlist: creating list of unique viruses to generate nodes"""
vlist = data.virus_name.dropna().unique().tolist()
"""Construction of network"""
from networkx.algorithms import bipartite
DG=nx.Graph()
"""Initiating host nodes"""
for index, row in hosttaxa.iterrows():
DG.add_node(row['ScientificName'], type="host",
speciesname = row['ScientificName'], bipartite = 0 )
"""Initiating virus nodes"""
for virus in vlist:
DG.add_node(virus, type="virus", virusname = virus, bipartite = 1)
"""Iterating through the raw data to add Edges if a virus is found in a host"""
"""Iterating through the raw data to add Edges if a virus is found in a host"""
if filter_file:
for index, row in data.iterrows():
if row.ConfirmationResult == 'Positive':
DG.add_edge(row['ScientificName'], row['virus_name'], AnimalID = 'AnimalID', weight = 1)
else:
for index, row in data.iterrows():
DG.add_edge(row['ScientificName'], row['virus_name'], weight = 1)
"""Creating positions of the nodes"""
#layout = nx.spring_layout(DG, k = 0.05, scale=2) #
layout = nx.fruchterman_reingold_layout(DG, k = 0.05, iterations=50)
"""write graph """
nx.write_graphml(DG, network_name + "_bipartite.graphml")
"""
Plotting
"""
if plot:
from bokeh.models import ColumnDataSource
nodes, nodes_coordinates = zip(*layout.items())
nodes_xs, nodes_ys = list(zip(*nodes_coordinates))
node_data = dict(x=nodes_xs, y=nodes_ys, name=nodes)
nd = pd.DataFrame.from_dict(node_data)
def addNodeType(c):
if c.name in vlist:
return 'Virus'
else:
return 'Host'
#nd['node_type'] = nd.apply(addNodeType, axis=1)
virusc = '#ef8a62' # ,'#e05354'
hostc = '#67a9cf'
nt = []
nodecolors = []
for i in range (nd.shape[0]):
if nd.name[i] in vlist:
nt.append('virus')
nodecolors.append(virusc)
else:
nt.append('host')
nodecolors.append(hostc)
nd['node_type'] = nt
nd['colors'] = nodecolors
#nodes_source = ColumnDataSource(nd.to_dict())
nodes_source = ColumnDataSource(dict(x=nd.x.tolist(), y=nd.y.tolist(),
name = nd.name.tolist(),
node_type = nd.node_type.tolist(), colors = nd.colors.tolist()))
from bokeh.plotting import show, figure , output_file
from bokeh.io import output_notebook
from bokeh.models import HoverTool
output_notebook()
"""
Generate the figure
1. Create tools
2. Set plot size and tools
"""
#hover = HoverTool(tooltips=[('name', '@name'),('type', '@node_type')])
plot = figure(title=network_name+": Host virus bipartite network",
plot_width=1200, plot_height=1200,
tools=['pan','wheel_zoom','reset','box_zoom','tap' ])
"""
plot main circles
1. Plot only nodes according to their positions
"""
r_circles = plot.circle('x', 'y', source=nodes_source, size=10,
color= "colors", alpha=0.5, level = 'overlay',)
"""
Function
Get data for generation of edges
"""
def get_edges_specs(_network, _layout):
c = dict(xs=[], ys=[], alphas=[])
#print d
weights = [d['weight'] for u, v, d in _network.edges(data=True)]
max_weight = max(weights)
calc_alpha = lambda h: 0.1 + 0.6 * (h / max_weight)
# example: { ..., ('user47', 'da_bjoerni', {'weight': 3}), ... }
for u, v, data in _network.edges(data=True):
c['xs'].append([_layout[u][0], _layout[v][0]])
c['ys'].append([_layout[u][1], _layout[v][1]])
c['alphas'].append(calc_alpha(data['weight']))
return c
"""
get the data for edges
"""
lines_source = ColumnDataSource(get_edges_specs(DG, layout))
"""
plot edge lines
"""
r_lines = plot.multi_line('xs', 'ys', line_width=1.5,
alpha=1 , color='#b3b6b7',
source=lines_source)
"""Centrality """
centrality = nx.algorithms.centrality.betweenness_centrality(DG)
""" first element are nodes again """
_, nodes_centrality = zip(*centrality.items())
max_centraliy = max(nodes_centrality)
nodes_source.add([7 + 15 * t / max_centraliy
for t in nodes_centrality],
'centrality')
"""Communities"""
import community
partition = community.best_partition(network)
#import community #python-louvain
#partition = community.best_partition(DG)
p_, nodes_community = zip(*partition.items())
nodes_source.add(nodes_community, 'community')
community_colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628',
'#b3cde3','#ccebc5','#decbe4','#fed9a6','#ffffcc','#e5d8bd','#fddaec',
'#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d',
'#666666']
nodes_source.add([community_colors[t % len(community_colors)]
for t in nodes_community],'community_color')
"""Host Type colour"""
"""Update the plot with communities and Centrality"""
r_circles.glyph.size = 'centrality'
hover = HoverTool(tooltips=[('', '@name')], renderers=[r_circles])
plot.add_tools(hover)
output_file(network_name+"_bipartite.html")
show(plot)
return DG
#############################################################################################
#############################################################################################
def construct_unipartite_virus_virus_network(dataframe, network_name,
layout_func = 'fruchterman_reingold',
plot= False, filter_file= False,
taxonomic_filter = None,
return_df = False):
"""first construct bipartite network"""
if filter_file:
BPnx = construct_bipartite_host_virus_network(dataframe = dataframe, network_name= network_name,
plot=False, filter_file= True, taxonomic_filter = taxonomic_filter)
else:
BPnx = construct_bipartite_host_virus_network(dataframe = dataframe, network_name= network_name,
plot=False, filter_file= False, taxonomic_filter = taxonomic_filter)
#if data_filename:
# """Importing all the data
# data: """
# if ".pickle" in data_filename:
# data = pd.read_pickle(data_filename,)
# else:
# data = pd.read_csv(data_filename, encoding='ISO-8859-1', low_memory=False)
data = dataframe
data['ScientificName'] = data['ScientificName'].str.replace('[^\x00-\x7F]','')
if taxonomic_filter:
data = data[data.viral_family == taxonomic_filter]
"""hosttaxa: creating dataframe of unique hosts and their characteristics to generate nodes"""
hosttaxa = data.groupby(['ScientificName']).size().reset_index().rename(columns={0:'count'})
"""vlist: creating list of unique viruses to generate nodes"""
virus_dataframe = data.groupby(['virus_name', 'viral_family']).size().reset_index().rename(columns={0:'count'})
vlist = data.virus_name.dropna().unique().tolist()
"""Here we will copllapse the Bipartite network to monopartite
Nodes will be viruses
Edges will be hosts they share the virus with"""
df = pd.DataFrame(list(itertools.combinations(vlist, 2)))
df.columns = ['Virus1', 'Virus2']
def get_n_shared_hosts(c):
return len(list(nx.common_neighbors(BPnx, c['Virus1'],c['Virus2'])))
df['n_shared_hosts'] = df.apply(get_n_shared_hosts, axis=1)
#"""removing pairs with 0 shared hosts"""
#df.drop(df[df.n_shared_hosts == 0].index, inplace=True)
def addsharedhosts (c):
return sorted(nx.common_neighbors(BPnx, c['Virus1'],c['Virus2']))
df["shared_hosts"] = df.apply(addsharedhosts, axis=1)
print ('we have '+str(df.shape[0])+' virus pairs in our model')
"""Creating the a network now using the df
EDGES will be weighted according to number of shared hosts"""
VS_unx = nx.Graph()
"""Initiating virus nodes"""
for index, row in virus_dataframe.iterrows():
VS_unx.add_node(row['virus_name'], type="virus",
ViralFamily = str(row['viral_family']), bipartite = 1)
#for virus in pd.unique(df[['Virus1', 'Virus2']].values.ravel()).tolist():
# VS_unx.add_node(virus, type="virus", virusname = virus, bipartite = 1)
"""Iterating through the raw data to add Edges if a virus is found in a host"""
for index, row in df.iterrows():
if row['n_shared_hosts'] > 0:
VS_unx.add_edge(row['Virus1'], row['Virus2'], weight = row['n_shared_hosts'], hosts = ','.join(row['shared_hosts']))
"""Creating positions of the nodes"""
if layout_func == 'fruchterman_reingold':
layout = nx.fruchterman_reingold_layout(VS_unx, scale=2 )#k = 0.05, iterations=500
elif layout_func =='spring':
layout = nx.spring_layout(VS_unx, k = 0.05, scale=2)
elif layout_func =='circular':
layout = nx.circular_layout(VS_unx, scale=1, center=None, dim=2)
elif layout_func == 'kamada':
layout = nx.kamada_kawai_layout(VS_unx, scale=1, center=None, dim=2)
elif layout_func == 'spectral':
layout = nx.spectral_layout(VS_unx, scale=1, center=None, dim=2)
else:
layout = nx.fruchterman_reingold_layout(VS_unx, scale=2 )#k = 0.05, iterations=500
"""write graph """
#nx.write_graphml(VS_unx, network_name+"unipartite.graphml")
if plot:
plot_unipartite_network(title = network_name,network = VS_unx, network_name = network_name, layout_func = layout_func)
if return_df:
return df, VS_unx
#######################################################################################################
#######################################################################################################
def calculate_features(data_frame, network, Species_file_name, data_path, virus_df, long = False):
print('calculate_features function is in function file 1st function')
print ('calculating topographical features')
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
print ('calculating Jaccard coefficients')
def jaccard (c):
return sorted(nx.jaccard_coefficient(network, [(c['Virus1'],c['Virus2'])]))[0][2]
data_frame["jaccard"] = data_frame.apply(jaccard, axis=1)
################################################################################################################################
################################################################################################################################
def hasShortestPath (c):
return nx.has_path(network, c['Virus1'], c['Virus2'])
data_frame["hasPath"] = data_frame.apply(hasShortestPath, axis=1)
print ('calculating shortest path length')
def ShortPathLen(c):
if c["hasPath"]:
return nx.shortest_path_length(network, c['Virus1'], c['Virus2'])
else:
return np.nan
data_frame["ShortPathLen"] = data_frame.apply(ShortPathLen, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating adamic/adar index')
def adar (c):
return sorted(nx.adamic_adar_index(network, [(c['Virus1'],c['Virus2'])]))[0][2]
data_frame["adamic_adar"] = data_frame.apply(adar, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating Resource coefficients')
def resource (c):
return sorted(nx.resource_allocation_index(network, [(c['Virus1'],c['Virus2'])]))[0][2]
data_frame["resource"] = data_frame.apply(resource, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating preferential attachment coefficients')
def preferential (c):
return sorted(nx.preferential_attachment(network, [(c['Virus1'],c['Virus2'])]))[0][2]
data_frame["preferential_attach"] = data_frame.apply(preferential, axis=1)
################################################################################################################################
################################################################################################################################
if long:
################################################################################################################################
################################################################################################################################
print ('listing neighbors')
def neighbors (c):
l = sorted(nx.common_neighbors(network, c['Virus1'],c['Virus2']))
return str(l)[1:-1]
data_frame["neighbors"] = data_frame.apply(neighbors, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating number of neighbors')
def neighbors_n (c):
return len(sorted(nx.common_neighbors(network, c['Virus1'],c['Virus2'])))
data_frame["neighbors_n"] = data_frame.apply(neighbors_n, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating difference in betweenness centrality')
btw = nx.betweenness_centrality(network, 25)
def betweenDiff(c):
return abs(btw[c['Virus1']] - btw[c['Virus2']])
data_frame["betweeness_diff"] = data_frame.apply(betweenDiff, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating node clusters')
from community import community_louvain
partition = community_louvain.best_partition(network)
################################################################################################################################
################################################################################################################################
def virus1_cluster(c):
return partition[c['Virus1']]
data_frame['VirusCluster1'] = data_frame.apply(virus1_cluster, axis=1)
def virus2_cluster(c):
return partition[c['Virus2']]
data_frame['VirusCluster2'] = data_frame.apply(virus2_cluster, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating if nodes are in a same cluster')
def in_same_cluster(c):
if(partition[c['Virus1']] == partition[c['Virus2']]):
return True
else:
return False
data_frame["in_same_cluster"] = data_frame.apply(in_same_cluster, axis=1)
################################################################################################################################
################################################################################################################################
print ('calculating difference in degree')
degree = nx.degree(network)
def degreeDiff(c):
return abs(degree[c['Virus1']] - degree[c['Virus2']])
data_frame["degree_diff"] = data_frame.apply(degreeDiff, axis=1)
################################################################################################################################
################################################################################################################################
if long:
IUCN = pd.read_csv(data_path+ Species_file_name)
IUCN["ScientificName"] = IUCN["Genus"].map(str) +' '+IUCN["Species"]
IUCN.loc[IUCN.ScientificName== 'Homo sapiens', 'Order'] = 'Humans'
################################################################################################################################
################################################################################################################################
print ('getting Order and Family values for shared hosts')
def getOrders (c):
orderlist = []
if len(c.shared_hosts) > 0:
for h in (c.shared_hosts):
try:
orderlist.append(IUCN.loc[IUCN['ScientificName'] == h, 'Order'].iloc[0])
except:
orderlist.append('MatchNotFound')
return orderlist
data_frame['orders'] = data_frame.apply(getOrders, axis=1)
################################################################################################################################
################################################################################################################################
def getFamily (c):
orderlist = []
if len(c.shared_hosts) > 0:
for h in (c.shared_hosts):
try:
orderlist.append(IUCN.loc[IUCN['ScientificName'] == h, 'Family'].iloc[0])
except:
orderlist.append('MatchNotFound')
return orderlist
data_frame['families'] = data_frame.apply(getFamily, axis=1)
################################################################################################################################
################################################################################################################################
def OrderRichness (c):
return len(set(c.orders))
def FamilyRichness (c):
return len(set(c.families))
data_frame['OrderRichness'] = data_frame.apply(OrderRichness, axis=1)
data_frame['FamilyRichness'] = data_frame.apply(FamilyRichness, axis=1)
print ('richness calculations complete')
################################################################################################################################
################################################################################################################################
print ('calculating ShannonH index of diversity for shared Orders and Familes of taxa')
def shannon_order(c):
total = len(c.orders)
counts = pd.Series(c.orders).value_counts().tolist()
h = sum(map(lambda x:abs(np.log(x/float(total)))*(x/float(total)), counts))
return h
data_frame['Order_H'] = data_frame.apply(shannon_order, axis=1)
################################################################################################################################
################################################################################################################################
def shannon_family(c):
total = len(c.families)
counts = pd.Series(c.families).value_counts().tolist()
h = sum(map(lambda x:abs(np.log(x/float(total)))*(x/float(total)), counts))
return h
data_frame['Familiy_H'] = data_frame.apply(shannon_family, axis=1)
################################################################################################################################
################################################################################################################################
print ('Matching Virus Families')
data_frame = pd.merge(data_frame,virus_df[['virus_name','viral_family','PubMed_Search_ln']], left_on='Virus1', right_on='virus_name', how='left')
data_frame = pd.merge(data_frame,virus_df[['virus_name','viral_family', 'PubMed_Search_ln']], left_on='Virus2', right_on='virus_name', how='left')
data_frame['ViralFamily1'] = data_frame['viral_family_x']
data_frame['ViralFamily2'] = data_frame['viral_family_y']
data_frame['PubMed_Search_ln1'] = data_frame['PubMed_Search_ln_x']
data_frame['PubMed_Search_ln2'] = data_frame['PubMed_Search_ln_y']
del data_frame['viral_family_y']
del data_frame['viral_family_x']
del data_frame['PubMed_Search_ln_x']
del data_frame['PubMed_Search_ln_y']
del data_frame['virus_name_x']
del data_frame['virus_name_y']
def MatchFamily(c):
if c.ViralFamily1 == c.ViralFamily2:
return 'True'
else:
return 'False'
data_frame['FamilyMatch'] = data_frame.apply(MatchFamily, axis=1)
################################################################################################################################
################################################################################################################################
print ('difference in PubMed hits')
def PubMed_hits(c):
return abs(c.PubMed_Search_ln1 - c.PubMed_Search_ln2)
data_frame['PubMed_diff'] = data_frame.apply(PubMed_hits, axis=1)
################################################################################################################################
################################################################################################################################
data_frame['hasPath'] = np.where(data_frame['hasPath']== True, 1, 0)
data_frame['in_same_cluster'] =np.where(data_frame['in_same_cluster']== True, 1, 0)
data_frame['FamilyMatch'] =np.where(data_frame['FamilyMatch']== 'True', 1, 0)
data_frame['ShortPathLen'].fillna(0, inplace = True)
data_frame['Link'] =np.where(data_frame['n_shared_hosts']>= 1, 1, 0)
print (data_frame.shape)
return data_frame
#######################################################################################################
#######################################################################################################
def interactive_plot(network, network_name, layout_func = 'fruchterman_reingold'):
plot = Plot(plot_width=800, plot_height=800,
x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1))
plot.title.text = network_name
plot.add_tools(HoverTool( tooltips=[('','@index')]),TapTool(),
BoxSelectTool(), BoxZoomTool(),
ResetTool(), PanTool(), WheelZoomTool())
if layout_func == 'fruchterman_reingold':
graph_renderer = graphs.from_networkx(network, nx.fruchterman_reingold_layout, scale=1, center=(0,0))
elif layout_func =='spring':
graph_renderer = graphs.from_networkx(network, nx.spring_layout, scale=1, center=(0,0))
elif layout_func =='circular':
graph_renderer = graphs.from_networkx(network, nx.circular_layout, scale=1, center=(0,0))
elif layout_func == 'kamada':
graph_renderer = graphs.from_networkx(network, nx.kamada_kawai_layout, scale=1, center=(0,0))
elif layout_func == 'spectral':
graph_renderer = graphs.from_networkx(network, nx.spectral_layout, scale=1, center=(0,0))
else:
graph_renderer = graphs.from_networkx(network, nx.fruchterman_reingold_layout, scale=1, center=(0,0))
centrality = nx.algorithms.centrality.betweenness_centrality(network)
""" first element are nodes again """
_, nodes_centrality = zip(*centrality.items())
max_centraliy = max(nodes_centrality)
c_centrality = [7 + 15 * t / max_centraliy
for t in nodes_centrality]
import community #python-louvain
partition = community.best_partition(network)
p_, nodes_community = zip(*partition.items())
community_colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628',
'#b3cde3','#ccebc5','#decbe4','#fed9a6','#ffffcc','#e5d8bd','#fddaec',
'#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d',
'#666666']
colors = [community_colors[t % len(community_colors)] for t in nodes_community]
graph_renderer.node_renderer.data_source.add(c_centrality, 'centrality')
graph_renderer.node_renderer.data_source.add(colors, 'colors')
graph_renderer.node_renderer.glyph = Circle(size='centrality', fill_color='colors')
graph_renderer.node_renderer.selection_glyph = Circle(size='centrality', fill_color=Spectral4[2])
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#757474", line_alpha=0.2, line_width=2)
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=3)
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=1)
graph_renderer.selection_policy = graphs.NodesAndLinkedEdges()
graph_inspection_policy = graphs.NodesOnly()
#graph_renderer.inspection_policy = graphs.EdgesAndLinkedNodes()
plot.renderers.append(graph_renderer)
#output_file("interactive_graphs.html")
return plot
#######################################################################################################
#######################################################################################################
def get_observed_network_data(Gc, BPnx, i, data_path, virus_df, Species_file_name):
IUCN = | pd.read_csv(data_path+ Species_file_name,) | pandas.read_csv |
#! /usr/bin/env python3
import os
import sys
import json
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from dateutil import tz
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True)
parser.add_argument('-s', '--show', action='store_true')
parser.add_argument('-fs', '--fine_sampling', type=int, default=600)
parser.add_argument('-in', '--interpolation', choices=['lin', 'no'], default='lin')
parser.add_argument('-a', '--aggr', choices=['rec', 'uniq'], default='uniq')
args = parser.parse_args()
fine_freq = f'{args.fine_sampling}s'
filein = args.input
base = filein[:filein.rfind('.')]
if not os.path.exists(base): os.mkdir(base)
tok = filein[:filein.rfind('.')].split('_')
dt_fmt = '%Y%m%d-%H%M%S'
try:
start = datetime.strptime(tok[-2], dt_fmt)
stop = datetime.strptime(tok[-1], dt_fmt)
except:
start = datetime.strptime(tok[-3], dt_fmt)
stop = datetime.strptime(tok[-2], dt_fmt)
try:
df = pd.read_csv(filein, sep=';', usecols=['mac-address', 'date_time', 'station_name', 'kind'], parse_dates=['date_time'], index_col='date_time', engine='c')
df = df.rename(columns={'mac-address':'mac_address'})
old_format = True
except:
# new format support
old_format = False
df = pd.read_csv(filein, sep=';', usecols=['mac_address', 'date_time', 'station_name', 'kind'], parse_dates=['date_time'], index_col='date_time', engine='c')
df['wday'] = [ t.strftime('%a') for t in df.index ]
df['date'] = df.index.date
df['time'] = df.index.time
if 1 and old_format:
print(f'**** WARNING FILTERING FERRARA STATIONS')
df = df[ df.station_name.str.startswith('Ferrara-') ]
try:
df['station_id'] = df.station_name.str.split('-', expand=True)[1]
except:
#old format support
df['station_id'] = df.station_name.str.extract(r'.*\((\d)\)')
#print(df)
print(df[['wday', 'date', 'station_name', 'station_id']])
"""
Perform device id counting with fine temporal scale
"""
tnow = datetime.now()
stats = pd.DataFrame(index=pd.date_range("00:00", "23:59:59", freq=fine_freq).time)
for (station, date), dfg in df.groupby(['station_id', 'date']):
#print(station, date)
if args.aggr == 'uniq':
s = | pd.Series(dfg['mac_address'], index=dfg.index) | pandas.Series |
#!python3
"""
Module for creating the Excel reports from gdoc and local data
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
from modules.filework import safe2int
# ----------------------------------------------------------------------
# Some helper functions for Excel writing
def safe_write(ws, r, c, val, f=None, n_a="", make_float=False):
"""calls the write method of worksheet after first screening for NaN"""
if not pd.isnull(val):
if make_float:
try:
val = float(val)
except:
pass
if f:
ws.write(r, c, val, f)
else:
ws.write(r, c, val)
elif n_a:
if f:
ws.write(r, c, n_a, f)
else:
ws.write(r, c, n_a)
def write_array(ws, r, c, val, f=None):
"""speciality function to write an array. Assumed non-null"""
if f:
ws.write_formula(r, c, val, f)
else:
ws.write_formula(r, c, val)
def create_formats(wb, cfg_fmt, f_db={}):
"""Takes a workbook and (likely empty) database to fill with formats"""
for name, db in cfg_fmt.items():
f_db[name] = wb.add_format(db)
return f_db
def make_excel_indices():
"""returns an array of Excel header columns from A through ZZ"""
import string # We're not currently using this function, so leaving import here so as not to forget
alphabet = string.ascii_uppercase
master = list(alphabet)
for i in range(len(alphabet)):
master.extend([alphabet[i] + x for x in alphabet])
return master
def _do_simple_sheet(writer, df, sheet_name, na_rep, index=True, f=None):
"""Helper function to write cells and bypass the Pandas write"""
wb = writer.book
ws = wb.add_worksheet(sheet_name)
if index:
safe_write(ws, 0, 0, df.index.name, f=f, n_a=na_rep)
for col, label in enumerate(df.columns):
safe_write(ws, 0, col + 1 * index, label, f=f, n_a=na_rep)
row = 1
for i, data in df.iterrows():
if index:
safe_write(ws, row, 0, i, f=None, n_a=na_rep)
for col_num, col_name in enumerate(df.columns):
safe_write(ws, row, col_num, data[col_name], f=None, n_a=na_rep)
row += 1
return (wb, ws, sheet_name, len(df) + 1)
def _do_initial_output(writer, df, sheet_name, na_rep, index=True):
"""Helper function to push data to xlsx and return formatting handles"""
df.to_excel(writer, sheet_name=sheet_name, na_rep=na_rep, index=index)
wb = writer.book
ws = writer.sheets[sheet_name]
max_row = len(df) + 1
return (wb, ws, sheet_name, max_row)
def create_summary_tab(writer, config, format_db, do_campus, do_counselor=False):
"""
Adds the Summary tab to the output. If summarizing by counselor,
do_counselor will be a list of counselor names
"""
wb = writer.book
sum_label = "Counselor_Summary" if do_counselor else "Summary"
ws = wb.add_worksheet(sum_label)
for c, column in enumerate(config["columns"]):
for label, fmt in column.items():
ws.write(0, c, label, format_db[fmt])
# Select summary options--campus (for whole network) or strategy/counselor
if do_campus:
row_labels = config["campuses"]
s_name = "Campus"
elif do_counselor:
row_labels = do_counselor
s_name = "Counselors"
else:
row_labels = config["strats"]
s_name = "Strats"
for r, label in enumerate(row_labels, start=1):
rx = r + 1 # Excel reference is 1-indexed
ws.write(r, 0, label) # field to summarize by
ws.write(r, 1, f"=COUNTIF({s_name},A{rx})") # student column
ws.write(r, 2, f'=IF(B{rx}>0,SUMIF({s_name},A{rx},MGRs)/B{rx},"")') # TGR
ws.write(r, 3, f'=IF(A{rx}>0,SUMIFS(Schol4YR,{s_name},$A{rx}),"")') # Total 4yr
ws.write(r, 4, f'=IF(B{rx}>0,D{rx}/B{rx},"")') # Avg 4yr
ws.write(r, 5, f'=IF(B{rx}>0,COUNTIFS(PGR,"<>TBD",{s_name},$A{rx})/$B{rx},"")') # % decided
ws.write( # % of awards collected
r,
6,
f'=IF(AND(B{rx}>0,SUMIFS(Accepts,{s_name},$A{rx})),SUMIFS(UAwards,{s_name},$A{rx})/SUMIFS(Accepts,{s_name},$A{rx}),"")',
)
ws.write( # PGR
r,
7,
f'=IF(AND(B{rx}>0,F{rx}>0),SUMIF({s_name},A{rx},PGR)/COUNTIFS(PGR,"<>TBD",{s_name},$A{rx}),"")',
)
ws.write( # PGR-TGR
r,
8,
f'=IF(AND(B{rx}>0,F{rx}>0),SUMIF({s_name},A{rx},PGRTGR)/COUNTIFS(PGR,"<>TBD",{s_name},$A{rx}),"")',
)
ws.write( # % of students w/in 10% of TGR
r,
9,
f'=IF(AND(B{rx}>0,F{rx}>0),COUNTIFS({s_name},A{rx},PGRin10,"Yes")/COUNTIFS(PGRin10,"<>TBD",{s_name},$A{rx}),"")',
)
ws.write( # % of students w/ award at choice
r,
10,
f'=IF(AND(B{rx}>0,F{rx}>0),COUNTIFS({s_name},A{rx}, OOP,"<>TBD")/COUNTIFS(PGR,"<>TBD",{s_name},$A{rx}),"")',
)
ws.write( # Avg unmet need at choice college
r,
11,
f'=IF(COUNTIFS({s_name},A{rx}, UMN,"<>TBD")>0,SUMIFS(UMN,{s_name},A{rx}, UMN,"<>TBD")/COUNTIFS({s_name},A{rx}, UMN,"<>TBD"),"")',
)
# Summary row
fr = 2
lr = len(row_labels) + 1 # This is a little tricky--it's the write location and last value row to sum
lrx = lr + 1
ws.write(lr, 1, f'=SUM(B{fr}:B{lr})', format_db["sum_centered_integer"])
ws.write(lr, 2, f'=SUMPRODUCT(B{fr}:B{lr},C{fr}:C{lr})/B{lrx}', format_db["sum_percent"])
ws.write(lr, 3, f'=SUM(D{fr}:D{lr})', format_db["sum_dollar"])
ws.write(lr, 4, f'=IF(B{lrx}>0,D{lrx}/B{lrx},"")', format_db["sum_dollar"])
ws.write(lr, 5, f'=SUMPRODUCT(B{fr}:B{lr},F{fr}:F{lr})/B{lrx}', format_db["sum_percent"]) # % decided
ws.write(lr, 6, f'=SUM(UAwards)/SUM(Accepts)', format_db["sum_percent"])
ws.write(lr, 7, f'=SUMPRODUCT(B{fr}:B{lr},H{fr}:H{lr})/B{lrx}', format_db["sum_percent"])
ws.write(lr, 8, f'=SUMPRODUCT(B{fr}:B{lr},I{fr}:I{lr})/B{lrx}', format_db["sum_percent"])
ws.write(lr, 9, f'=SUMPRODUCT(B{fr}:B{lr},J{fr}:J{lr})/B{lrx}', format_db["sum_percent"])
ws.write(lr,10, f'=SUMPRODUCT(B{fr}:B{lr},K{fr}:K{lr})/B{lrx}', format_db["sum_percent"])
ws.write(lr,11, f'=SUMPRODUCT(B{fr}:B{lr},L{fr}:L{lr})/B{lrx}', format_db["sum_dollar"])
# Final formatting
ws.set_column("A:A", 8.09, format_db["left_normal_text"])
ws.set_column("B:B", 8.09, format_db["centered_integer"])
ws.set_column("C:C", 9.55, format_db["single_percent_centered"])
ws.set_column("D:D", 13.73, format_db["dollar_no_cents_fmt"])
ws.set_column("E:E", 12.73, format_db["dollar_no_cents_fmt"])
ws.set_column("F:F", 9.73, format_db["single_percent_centered"])
ws.set_column("G:G", 8.09, format_db["single_percent_centered"])
ws.set_column("H:I", 6.36, format_db["single_percent_centered"])
ws.set_column("J:K", 8.09, format_db["single_percent_centered"])
ws.set_column("L:L", 10.91, format_db["dollar_no_cents_fmt"])
if not do_counselor:
ws.activate()
def create_awards_tab(writer, df, format_db):
"""Adds the Awards tab to the output"""
df.drop(columns=["Unique", "Award", "MoneyCode"], inplace=True)
wb, ws, sn, max_row = _do_simple_sheet(writer, df, "AwardData", "", index=False)
ws.set_column("A:B", 8, None, {"hidden": 1})
ws.set_row(0, 75, format_db["p_header"])
# Add the calculated columns:
ws.write(0, 17, "Unique")
ws.write(0, 18, "Award")
for r in range(1, max_row):
ws.write(
r,
17,
f"=IF(OR(A{r+1}<>A{r},B{r+1}<>B{r}),1,0)",
format_db["centered_integer"],
)
ws.write(
r,
18,
f'=IF(OR(AND(R{r+1}=1,ISNUMBER(M{r+1})),AND(R{r+1}=0,ISNUMBER(M{r+1}),M{r}=""),AND(R{r+1}=1,ISNUMBER(N{r+1})),AND(R{r+1}=0,ISNUMBER(N{r+1}),N{r}=""),AND(R{r+1}=1,ISNUMBER(O{r+1})),AND(R{r+1}=0,ISNUMBER(O{r+1}),O{r}="")),1,0)',
format_db["centered_integer"],
)
names = {
"Students": "A",
"NCESs": "B",
"Names": "G",
"Results": "H",
"DataA": "K",
"DataB": "L",
"DataC": "M",
"DataD": "N",
"DataF": "O",
"DataW": "P",
"Unique": "R",
"Award": "S",
}
for name, col in names.items():
wb.define_name(name, "=" + sn + "!$" + col + "$2:$" + col + "$" + str(max_row))
max_col = max(names.values())
ws.autofilter("A1:" + max_col + "1")
ws.freeze_panes(1, 3)
def create_students_tab(writer, df, format_db, hide_campus=False):
"""Adds the Students tab to the output"""
# wb, ws, sn, max_row = _do_initial_output(writer, df, "Students", "N/A", index=False)
wb, ws, sn, max_row = _do_simple_sheet(
writer, df.iloc[:, :12], "Students", "N/A", index=False, f=format_db["p_header"]
)
# Add the calculated columns:
ws.write(0, 12, "Acceptances", format_db["p_header_y"])
ws.write(0, 13, "Unique Awards", format_db["p_header_y"])
ws.write(0, 14, "% of awards collected", format_db["p_header_y"])
ws.write(0, 15, "Total grants & scholarships (1 yr value)", format_db["p_header_y"])
ws.write(0, 16, "Total grants & scholarships (4 yr value)", format_db["p_header_y"])
ws.write(0, 17, "College Choice", format_db["p_header_o"])
ws.write(0, 18, "Ambitious Postsecondary Pathway choice", format_db["p_header_o"])
ws.write(0, 19, "Other College Choice", format_db["p_header_o"])
ws.write(0, 20, "PGR for choice school", format_db["p_header_y"])
ws.write(0, 21, "PGR-TGR", format_db["p_header_y"])
ws.write(0, 22, "PGR within 10% of TGR?", format_db["p_header_y"])
ws.write(0, 23, "Reason for not meeting TGR", format_db["p_header_o"])
ws.write(0, 24, "Out of Pocket at Choice", format_db["p_header_o"])
ws.write(0, 25, "Unmet need", format_db["p_header_o"])
ws.write(
0, 26, "Exceeds Goal? (no more than 3000 over EFC)", format_db["p_header_o"]
)
ws.write(
0,
27,
"Comments (use for undermatching and affordability concerns)",
format_db["p_header_o"],
)
for r in range(1, max_row):
ws.write(
r,
12,
f'=COUNTIFS(Students,B{r+1},Results,"Accepted!",Unique,1)+COUNTIFS(Students,B{r+1},Results,"Choice!",Unique,1)',
format_db["centered_integer"],
)
ws.write(
r, 13, f"=COUNTIFS(Students,B{r+1},Award,1)", format_db["centered_integer"]
)
ws.write(
r,
14,
f"=IF(M{r+1}>0,N{r+1}/M{r+1},0)",
format_db["single_percent_centered"],
)
ws.write(
r, 15, f"=SUMIFS(DataC,Students,B{r+1},Award,1)", format_db["dollar_fmt"]
)
ws.write(r, 16, f"=P{r+1}*4", format_db["dollar_fmt"])
safe_write(ws, r, 17, df["College Choice"].iloc[r - 1])
safe_write(ws, r, 18, df["Ambitious Postsecondary Pathway choice"].iloc[r - 1])
safe_write(ws, r, 19, df["Other College Choice"].iloc[r - 1])
safe_write(
ws,
r,
20,
df["PGR for choice school"].iloc[r - 1],
n_a="TBD",
f=format_db["single_percent_centered"],
make_float=True,
)
safe_write(
ws,
r,
21,
df["PGR-TGR"].iloc[r - 1],
n_a="TBD",
f=format_db["single_percent_centered"],
make_float=True,
)
safe_write(
ws,
r,
22,
df["PGR within 10% of TGR?"].iloc[r - 1],
n_a="TBD",
f=format_db["centered"],
)
safe_write(ws, r, 23, df["Reason for not meeting TGR"].iloc[r - 1])
safe_write(
ws,
r,
24,
df["Out of Pocket at Choice (pulls from Award data tab weekly)"].iloc[
r - 1
],
n_a="TBD",
f=format_db["dollar_no_cents_fmt"],
make_float=True,
)
safe_write(
ws,
r,
25,
f'=IF(AND(ISNUMBER(Y{r+1}),ISNUMBER(D{r+1})),MAX(Y{r+1}-D{r+1},0),"TBD")',
n_a="TBD",
)
safe_write(
ws,
r,
26,
df["Exceeds Goal? (no more than 3000 over EFC)"].iloc[r - 1],
n_a="TBD",
)
safe_write(
ws,
r,
27,
df["Comments (use for undermatching and affordability concerns)"].iloc[
r - 1
],
)
# format data columns
ws.set_column("A:A", 9, format_db["left_normal_text"]) # , {"hidden", 1})
ws.set_column("B:B", 9)
ws.set_column("C:C", 34)
ws.set_column("E:E", 9, format_db["single_percent_centered"])
# ws.set_column("D:L", 9)
ws.set_column("P:Q", 13)
ws.set_column("R:R", 35)
ws.set_column("S:T", 22)
ws.set_column("U:U", 9)
ws.set_column("V:V", 7)
ws.set_column("W:W", 10)
ws.set_column("X:X", 23)
ws.set_column("Y:Y", 9)
ws.set_column("Z:Z", 8)
ws.set_column("AA:AA", 14)
ws.set_column("AB:AB", 33)
ws.set_row(0, 60)
names = {
"Campus": "A",
"SIDs": "B",
"LastFirst": "C",
"EFCs": "D",
"MGRs": "E",
"GPAs": "F",
"SATs": "G",
"Counselors": "H",
"Advisors": "I",
"Strats": "J",
"Accepts": "M",
"UAwards": "N",
"Schol4Yr": "Q",
"CollegeChoice": "R",
"PGR": "U",
"PGRTGR": "V",
"PGRin10": "W",
"OOP": "Y",
"UMN": "Z",
"Affordable": "AA",
}
for name, col in names.items():
wb.define_name(name, "=" + sn + "!$" + col + "$2:$" + col + "$" + str(max_row))
ws.autofilter("A1:AB" + "1")
ws.freeze_panes(1, 3)
def create_college_money_tab(writer, df, format_db):
"""Creates AllColleges from static file"""
wb, ws, sn, max_row = _do_initial_output(writer, df, "CollegeMoneyData", "N/A")
ws.set_column("D:E", 7, format_db["single_percent_centered"])
ws.set_column("B:B", 40)
ws.set_column("C:C", 22)
ws.set_column("F:L", 7)
names = {
"AllCollegeNCES": "A",
"AllCollegeMoneyCode": "H",
"AllCollegeLocation": "M",
}
for name, col in names.items():
wb.define_name(name, "=" + sn + "!$" + col + "$2:$" + col + "$" + str(max_row))
max_col = max(names.values())
ws.autofilter("A1:" + max_col + "1")
ws.hide()
# ----------------------------------------------------------------------------
def create_report_tables(dfs, campus, config, debug):
# First, create a dataframe for the "Award data" tab
dfs["award_report"] = build_award_df(dfs, campus, config, debug)
# Second, create a dataframe for the "Students" tab
# This one will have extra columns if the Decisions tab exists
dfs["student_report"] = build_student_df(dfs, campus, config, debug)
def create_excel(dfs, campus, config, debug):
"""Will create Excel reports for sharing details from Google Docs"""
if debug:
print("Creating Excel report for {}".format(campus), flush=True)
dfs["award_report"].to_csv("award_table_for_excel.csv", index=False)
dfs["student_report"].to_csv("student_table_for_excel.csv", index=False)
# Create the excel:
date_string = datetime.now().strftime("%m_%d_%Y")
fn = (
config["report_filename"].replace("CAMPUS", campus).replace("DATE", date_string)
)
writer = pd.ExcelWriter(
os.path.join(config["report_folder"], fn), engine="xlsxwriter"
)
wb = writer.book
formats = create_formats(wb, config["excel_formats"])
# Award data tab
create_awards_tab(writer, dfs["award_report"], formats)
# Students tab
create_students_tab(
writer, dfs["student_report"], formats, hide_campus=(campus == "All")
)
# Summary tab
create_summary_tab(
writer, config["summary_settings"], formats, do_campus=(campus == "All")
)
# Summary tab with counselor summaries
counselor_list = dfs["student_report"]["Counselor"].unique()
counselor_list = sorted(counselor_list) if len(counselor_list) else ["TBD",]
print(f"Counselors are {counselor_list}")
create_summary_tab(
writer, config["summary_settings"], formats, do_campus=False,
do_counselor=counselor_list
)
# Hidden college lookup
create_college_money_tab(writer, dfs["college"], formats)
# OptionsReport (maybe don't create in Excel?)
writer.save()
def build_student_df(dfs, campus, config, debug):
"""Builds a dataframe for the student fields"""
report_student_fields = config["report_student_fields"]
report_student_sorts = config["report_student_sorts"]
all_student_fields = []
live_student_fields = [] # to hold the excel names
live_student_targets = [] # to hold the live names
# live_decision_fields = [] # to hold excel names for decision tabl
# live_decision_targets = [] # to hold the live names
complex_student_fields = []
for column in report_student_fields:
# Each column will be a dict with a single element
# The key will be the Excel column name and the value the source
# from the live (EFC) table or other (lookup) table
this_key = list(column.keys())[0]
this_value = list(column.values())[0]
all_student_fields.append(this_key)
if ":" in this_value:
complex_student_fields.append((this_key, this_value))
else:
live_student_fields.append(this_key)
live_student_targets.append(this_value)
if live_student_targets: # fields here will be straight pulls from live df
# These 2 lines are necessary to handle single campus reports
if "Campus" not in dfs["live_efc"].columns:
dfs["live_efc"]["Campus"] = campus
student_df = dfs["live_efc"][live_student_targets]
student_df = student_df.rename(
columns=dict(zip(live_student_targets, live_student_fields))
)
else:
print("Probably an error: no report columns pulling from live data")
# Second, pull columns that are lookups from other tables and append
# We skip the "special" ones for now because they might calculate off lookups
for column, target in (
f for f in complex_student_fields if not f[1].startswith("SPECIAL")
):
# parse the target and then call the appropriate function
# to add a column to award_df
# if debug:
# print(f"{column} w spec({target})")
tokens = target.split(sep=":")
if tokens[0] == "INDEX":
student_df[column] = dfs["live_efc"].index
elif tokens[0] == "ROSTER":
student_df[column] = dfs["live_efc"].index.map(
lambda x: dfs["ros"].loc[x, tokens[1]]
)
elif tokens[0] == "DECISION":
if "live_decision" in dfs:
student_df[column] = dfs["live_efc"].index.map(
lambda x: dfs["live_decision"].loc[x, tokens[1]]
)
for column, target in (
f for f in complex_student_fields if f[1].startswith("SPECIAL")
):
# if debug:
# print(f"{column} w spec({target})")
tokens = target.split(sep=":")
student_df[column] = student_df.apply(
_do_special_award, args=(column, tokens[1:]), axis=1
)
student_df = student_df[[x for x in all_student_fields if not x.startswith("x")]]
# These generators work on a list of single pair dicts
sort_terms = [list(item.keys())[0] for item in report_student_sorts]
sort_order = [list(item.values())[0] for item in report_student_sorts]
# recast the EFC as numbers where possible:
student_df.EFC = student_df.EFC.apply(lambda x: pd.to_numeric(x, errors="ignore"))
return student_df.sort_values(by=sort_terms, ascending=sort_order)
def build_award_df(dfs, campus, config, debug):
"""Builds a dataframe for the award fields"""
# First, start the df for the items that are straight pulls from live_data
report_award_fields = config["report_award_fields"]
report_award_sorts = config["report_award_sorts"]
all_award_fields = []
live_award_fields = [] # to hold the excel names
live_award_targets = [] # to hold the live names
complex_award_fields = []
for column in report_award_fields:
# Each column will be a dict with a single element
# The key will be the Excel column name and the value the source
# from the live table or other (lookup) table
this_key = list(column.keys())[0]
this_value = list(column.values())[0]
all_award_fields.append(this_key)
if ":" in this_value:
complex_award_fields.append((this_key, this_value))
else:
live_award_fields.append(this_key)
live_award_targets.append(this_value)
if live_award_targets: # fields here will be straight pulls from live df
award_df = dfs["live_award"][live_award_targets]
award_df = award_df.rename(
columns=dict(zip(live_award_targets, live_award_fields))
)
else:
print("Probably an error: no report columns pulling from live data")
# Quick detour: make a calculated index for app table lookups:
award_df["xAppIndex"] = (
award_df["NCESid"].astype(str) + ":" + award_df["SID"].astype(str)
)
# Second, pull columns that are lookups from other tables and append
# We skip the "special" ones for now because they might calculate off lookups
for column, target in (
f for f in complex_award_fields if not f[1].startswith("SPECIAL")
):
# parse the target and then call the appropriate function
# to add a column to award_df
if debug:
print(f"{column} w spec({target})")
tokens = target.split(sep=":")
if tokens[0] == "ROSTER":
award_df[column] = dfs["live_award"][tokens[1]].apply(
lambda x: dfs["ros"].loc[x, tokens[2]]
)
elif tokens[0] == "COLLEGE":
award_df[column] = dfs["live_award"][tokens[1]].apply(
lambda x: np.nan
if | pd.isnull(x) | pandas.isnull |
import numpy as np
import pandas as pd
from typing import Union, List
from scipy.special import binom
from scipy.spatial import ConvexHull
from tqdm import tqdm
from ._containment import _is_in_simplex
from ._helper import *
__all__ = ['_pointwisedepth', '_samplepointwisedepth']
def _pointwisedepth(
data: pd.DataFrame,
to_compute: Union[list, pd.Index]=None,
containment='simplex',
quiet=True
) -> pd.Series:
"""
Compute pointwise depth for n points in R^p, where data is an nxp matrix of points. If points is not None,
only compute depth for the given points (should be a subset of data.index)
Parameters:
----------
data: pd.DataFrame
n x d DataFrame, where we have n points in d dimensional space.
points: list, pd.Index
The particular points (indices) we would like to calculate band curve for. If None, we calculate depth for all points
containment: str
Definition of containment
Returns:
----------
pd.Series: Depth values for the given points with respect to the data. Index of Series are indices of points in the original data, and the values are the depths
"""
n, d = data.shape
depths = []
if to_compute is None:
to_compute = data.index
if containment == 'simplex':
for time in tqdm(to_compute, disable=quiet):
S_nj = 0
point = data.loc[time, :]
subseq = _subsequences(list(data.drop(time, axis=0).index), d + 1)
for seq in subseq:
S_nj += _is_in_simplex(simplex_points=
np.array(data.loc[seq, :]), point=np.array(point))
depths.append(S_nj / binom(n, d + 1))
elif containment == 'l1':
return _L1_depth(data=data, to_compute=to_compute)
elif containment == 'mahalanobis':
return _mahalanobis_depth(data=data, to_compute=to_compute)
elif containment == 'oja':
return _oja_depth(data=data, to_compute=to_compute)
else: # Probably will be more in the future
raise ValueError(f'{containment} is not a valid containment measure. ')
return | pd.Series(index=to_compute, data=depths) | pandas.Series |
"""The module provides classes and functions responsible for loading and storing spectroscopy data.
Class Region contains the data for one region.
Class RegionCollection stores a number of Region objects
"""
import os
import ntpath
import logging
import copy
import csv
import pandas as pd
import numpy as np
from specqp import helpers
datahandler_logger = logging.getLogger("specqp.datahandler") # Creating child logger
DATA_FILE_TYPES = (
"scienta",
"specs",
"csv"
)
def load_calibration_curves(filenames, columnx='Press_03_value', columny='Press_05_value'):
"""Reads file or files using provided name(s). Checks for file existance etc.
:param filenames: str or sequence: filepath(s)
:param columns: str or sequence: columns to plot on y-axis
:return:
"""
calibration_data = {}
if type(filenames) == str or (not type(filenames) == str and not helpers.is_iterable(filenames)):
filenames = [filenames]
if type(columnx) == str or (not type(columnx) == str and not helpers.is_iterable(columnx)):
columnx = [columnx]
if type(columny) == str or (not type(columny) == str and not helpers.is_iterable(columny)):
columny = [columny]
if len(columnx) != len(filenames):
columnx = [columnx[0]] * len(filenames)
if len(columny) != len(filenames):
columny = [columny[0]] * len(filenames)
for i, filename in enumerate(filenames):
if os.path.isfile(filename):
try:
with open(filename, 'r') as f:
df = | pd.read_csv(f, sep='\t') | pandas.read_csv |
import time
import threading
import argparse
import tushare as ts
import numpy as np
import pandas as pd
from pandas import datetime as dt
from tqdm import tqdm
from utils import *
with open('../../tushare_token.txt', 'r') as f:
token = f.readline()
ts.set_token(token)
tushare_api = ts.pro_api()
# 概念分类表
df_all = tushare_api.concept(src='ts')
# 概念股明细表
df = | pd.DataFrame() | pandas.DataFrame |
##################################################
### import ###
##################################################
# basic lib
from ast import literal_eval
import itertools
import json
import numpy as np
import os
import pandas as pd
from pandarallel import pandarallel
pandarallel.initialize(use_memory_fs=False)
from scipy import ndimage
from scipy.stats import entropy
import sys
from googletrans import Translator
# logging lib
import logging
import src.log as log
# time lib
from time import time
# multiprocess lib
import multiprocessing as mp
PROCESS_NUM = mp.cpu_count()-2
# custom lib
import src.utils as utils
import src.aggregator as aggregator
def cal_score(aggregated_df, gold_df):
ans_df = aggregated_df.loc[aggregated_df['ans'] == True][['id', 'candidates', 'prob']] # answered candidates
fil_df = aggregated_df.loc[aggregated_df['ans'] == False][['id', 'candidates', 'prob']] # filtered candidates
n_ans = len(ans_df)
n_aggregated = len(aggregated_df)
n_gold = len(gold_df)
if fil_df.empty:
FN_df = pd.DataFrame(columns=aggregated_df.columns)
TN_df = pd.DataFrame(columns=aggregated_df.columns)
n_TN = 0
else:
FN_df = fil_df.loc[fil_df['id'].isin(gold_df['id'])] # false negative (filtered out answers)
TN_df = fil_df.loc[~fil_df['id'].isin(gold_df['id'])] # true negative (correctly filtered)
n_TN = len(TN_df)
if ans_df.empty:
FP_df = pd.DataFrame(columns=ans_df.columns)
TP_df = pd.DataFrame(columns=ans_df.columns)
FA_df = pd.DataFrame(columns=ans_df.columns)
n_TP = 0
fil_p, fil_r, fil_f1, align_p, align_r, align_f1 = 0, 0, 0, 0, 0, 0
else:
FP_df = ans_df.loc[~ans_df['id'].isin(gold_df['id'])] # false positive (answers which are not in gold)
hit_df = ans_df.loc[ans_df['id'].isin(gold_df['id'])] # answers which are included in gold
n_hit = len(hit_df)
if n_hit == 0:
fil_p = 0
fil_r = 0
fil_f1 = 0
else:
fil_p = n_hit/n_ans
fil_r = n_hit/n_gold
fil_f1 = f1(fil_p, fil_r)
merge_df = | pd.merge(gold_df, hit_df, left_on='id', right_on='id') | pandas.merge |
import pandas as pd
import numpy as np2
def build(args):
# Get medians
def get_medians(df_p, last):
df_res = df_p.iloc[-last:].groupby(["param"]).median().reset_index()["median"][0]
return df_res
def medians_params(df_list, age_group, last):
params_def = ["age", "beta", "IFR", "RecPeriod", "alpha", "sigma"]
params_val = [
age_group,
get_medians(df_list[0], last),
get_medians(df_list[1], last),
get_medians(df_list[2], last),
get_medians(df_list[3], last),
get_medians(df_list[4], last),
]
res = dict(zip(params_def, params_val))
return res
params_data_BOG = pd.read_csv(args.params_data_path, encoding="unicode_escape", delimiter=",")
# Ages 0-19
young_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "0-19"])
young_ages_beta = pd.DataFrame(young_ages_params[young_ages_params["param"] == "contact_rate"])
young_ages_IFR = pd.DataFrame(young_ages_params[young_ages_params["param"] == "IFR"])
young_ages_RecPeriod = pd.DataFrame(young_ages_params[young_ages_params["param"] == "recovery_period"])
young_ages_alpha = pd.DataFrame(young_ages_params[young_ages_params["param"] == "report_rate"])
young_ages_sigma = pd.DataFrame(young_ages_params[young_ages_params["param"] == "relative_asymp_transmission"])
young_params = [young_ages_beta, young_ages_IFR, young_ages_RecPeriod, young_ages_alpha, young_ages_sigma]
# Ages 20-39
youngAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "20-39"])
youngAdults_ages_beta = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "contact_rate"])
youngAdults_ages_IFR = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "IFR"])
youngAdults_ages_RecPeriod = pd.DataFrame(
youngAdults_ages_params[youngAdults_ages_params["param"] == "recovery_period"]
)
youngAdults_ages_alpha = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "report_rate"])
youngAdults_ages_sigma = pd.DataFrame(
youngAdults_ages_params[youngAdults_ages_params["param"] == "relative_asymp_transmission"]
)
youngAdults_params = [
youngAdults_ages_beta,
youngAdults_ages_IFR,
youngAdults_ages_RecPeriod,
youngAdults_ages_alpha,
youngAdults_ages_sigma,
]
# Ages 40-49
adults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "40-49"])
adults_ages_beta = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "contact_rate"])
adults_ages_IFR = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "IFR"])
adults_ages_RecPeriod = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "recovery_period"])
adults_ages_alpha = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "report_rate"])
adults_ages_sigma = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "relative_asymp_transmission"])
adults_params = [adults_ages_beta, adults_ages_IFR, adults_ages_RecPeriod, adults_ages_alpha, adults_ages_sigma]
# Ages 50-59
seniorAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "50-59"])
seniorAdults_ages_beta = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "contact_rate"])
seniorAdults_ages_IFR = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "IFR"])
seniorAdults_ages_RecPeriod = pd.DataFrame(
seniorAdults_ages_params[seniorAdults_ages_params["param"] == "recovery_period"]
)
seniorAdults_ages_alpha = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "report_rate"])
seniorAdults_ages_sigma = pd.DataFrame(
seniorAdults_ages_params[seniorAdults_ages_params["param"] == "relative_asymp_transmission"]
)
seniorAdults_params = [
seniorAdults_ages_beta,
seniorAdults_ages_IFR,
seniorAdults_ages_RecPeriod,
seniorAdults_ages_alpha,
seniorAdults_ages_sigma,
]
# Ages 60-69
senior_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "60-69"])
senior_ages_beta = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "contact_rate"])
senior_ages_IFR = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "IFR"])
senior_ages_RecPeriod = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "recovery_period"])
senior_ages_alpha = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "report_rate"])
senior_ages_sigma = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "relative_asymp_transmission"])
senior_params = [senior_ages_beta, senior_ages_IFR, senior_ages_RecPeriod, senior_ages_alpha, senior_ages_sigma]
# Ages 70+
elderly_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "70-90+"])
elderly_ages_beta = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "contact_rate"])
elderly_ages_IFR = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "IFR"])
elderly_ages_RecPeriod = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "recovery_period"])
elderly_ages_alpha = | pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "report_rate"]) | pandas.DataFrame |
# Preppin' Data 2021 Week 28
import pandas as pd
import numpy as np
import re
# Load data
world_cup = pd.read_excel('unprepped_data\\PD 2021 Wk 28 Input - InternationalPenalties.xlsx', sheet_name='WorldCup')
euros = pd.read_excel('unprepped_data\\PD 2021 Wk 28 Input - InternationalPenalties.xlsx', sheet_name='Euros')
# Determine what competition each penalty was taken in
world_cup['Event'] = 'World Cup ' + world_cup['Round'] + ' ' + world_cup['Event Year'].astype(str)
euros['Event'] = 'Euros ' + euros['Round'] + ' ' + euros['Event Year'].astype(str)
# trim whitespace
world_cup.columns = world_cup.columns.str.strip()
euros.columns = euros.columns.str.strip()
# lowercase columns
world_cup.columns = world_cup.columns.str.lower()
euros.columns = euros.columns.str.lower()
# stack data frames
penalty_df = pd.concat([world_cup,euros])
# Clean any fields, correctly format the date the penalty was taken, & group the two German countries (eg, West Germany & Germany)
penalty_df['event year'] = penalty_df['event year'].str.replace(',', '', regex=True)
penalty_df['event'] = penalty_df['event'].str.replace(',', '', regex=True)
penalty_df['date'] = pd.to_datetime(penalty_df['date'])
penalty_df['winner'] = penalty_df['winner'].str.strip()
penalty_df['loser'] = penalty_df['loser'].str.strip()
penalty_df['winner'] = penalty_df['winner'].str.replace('^(West|East) ', '', regex=True)
penalty_df['loser'] = penalty_df['loser'].str.replace('^(West|East) ', '', regex=True)
# Rank the countries on the following:
# - Shootout win % (exclude teams who have never won a shootout)
# - Penalties scored %
# What is the most and least successful time to take a penalty? (What penalty number are you most likely to score or miss?)
# determine which penalties were scored, missed or not taken (result already determined)
penalty_df['winning team scored'] = penalty_df['winning team taker'].str.contains(' scored')
penalty_df['losing team scored'] = penalty_df['losing team taker'].str.contains(' scored')
penalty_df['winning team penalty score'] = np.where(penalty_df['winning team scored'] == True, 1, np.where(penalty_df['winning team scored'] == False,0,None))
penalty_df['losing team penalty score'] = np.where(penalty_df['losing team scored'] == True, 1, np.where(penalty_df['losing team scored'] == False,0,None))
# create data frame of shootout results
penalty_winners = penalty_df[['event','winner']]
penalty_losers = penalty_df[['event','loser']]
penalty_winners = penalty_winners.drop_duplicates()
penalty_losers = penalty_losers.drop_duplicates()
penalty_winners.columns = ['event','team']
penalty_losers.columns = ['event','team']
penalty_winners['result'] = 1
penalty_losers['result'] = 0
shootout_df = pd.concat([penalty_winners,penalty_losers])
shootout_df['played'] = 1
# from shootout_df calculate Shootout win % (exclude teams who have never won a shootout)
# total shoot out results by team, filter non-winners
percent_shootout = shootout_df.groupby(['team']).agg({'result':'sum','played':'sum'}).reset_index()
percent_shootout = percent_shootout.loc[percent_shootout['result'] > 0]
# calculate columns for output
percent_shootout['Shootout Win %'] = percent_shootout['result'] / percent_shootout['played']
percent_shootout['Total Shootouts'] = percent_shootout['played']
percent_shootout['Shootouts'] = percent_shootout['result']
percent_shootout['Team'] = percent_shootout['team']
# calculate rank, sort data frame, reduce and reorder columns
percent_shootout['Win % Rank'] = percent_shootout['Shootout Win %'].rank(method='dense',ascending=False).astype(int)
percent_shootout = percent_shootout.sort_values(by='Win % Rank', ascending=True).reset_index()
percent_shootout = percent_shootout[['Win % Rank','Shootout Win %','Total Shootouts','Shootouts','Team']]
# create data frame of penalties
penalty_win_details = penalty_df[['event','winner','penalty number','winning team penalty score']]
penalty_lose_details = penalty_df[['event','loser','penalty number','losing team penalty score']]
penalty_win_details.columns = ['event','team','penalty number','penalty score']
penalty_lose_details.columns = ['event','team','penalty number','penalty score']
penalty_details = | pd.concat([penalty_win_details,penalty_lose_details]) | pandas.concat |
"""
Copyright 2021 Biomedical Computer Vision Group, Heidelberg University.
Author: <NAME> (<EMAIL>)
Distributed under the MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
"""
import argparse
import numpy as np
import pandas as pd
import skimage.util
def disk_mask(imsz, ir, ic, nbpx):
ys, xs = np.ogrid[-nbpx:nbpx + 1, -nbpx:nbpx + 1]
se = xs ** 2 + ys ** 2 <= nbpx ** 2
mask = np.zeros(imsz, dtype=int)
if ir - nbpx < 0 or ic - nbpx < 0 or ir + nbpx + 1 > imsz[0] or ic + nbpx + 1 > imsz[1]:
mask = skimage.util.pad(mask, nbpx)
mask[ir:ir + 2 * nbpx + 1, ic:ic + 2 * nbpx + 1] = se
mask = skimage.util.crop(mask, nbpx)
else:
mask[ir - nbpx:ir + nbpx + 1, ic - nbpx:ic + nbpx + 1] = se
return mask
def find_nn(cim, icy, icx, nim, nbpx):
mask = disk_mask(cim.shape, icy, icx, nbpx)
iys_nim, ixs_nim = np.where(nim * mask)
if iys_nim.size == 0:
return np.NaN, np.NaN
d2 = (icy - iys_nim) ** 2 + (icx - ixs_nim) ** 2
I1 = np.argsort(d2)
iy_nim = iys_nim[I1[0]]
ix_nim = ixs_nim[I1[0]]
mask = disk_mask(cim.shape, iy_nim, ix_nim, nbpx)
iys_cim, ixs_cim = np.where(cim * mask)
d2 = (iy_nim - iys_cim) ** 2 + (ix_nim - ixs_cim) ** 2
I2 = np.argsort(d2)
if not iys_cim[I2[0]] == icy or not ixs_cim[I2[0]] == icx:
return np.NaN, np.NaN
return iy_nim, ix_nim
def points_linking(fn_in, fn_out, nbpx=6, th=25, minlen=50):
data = pd.read_csv(fn_in, delimiter="\t")
all_data = np.array(data)
assert all_data.shape[1] in [3, 4], 'unknow collum(s) in input data!'
coords = all_data[:, :3].astype('int64')
frame_1st = np.min(coords[:, 0])
frame_end = np.max(coords[:, 0])
assert set([i for i in range(frame_1st, frame_end + 1)]).issubset(set(coords[:, 0].tolist())), "spots missing at some time point!"
nSlices = frame_end
stack_h = np.max(coords[:, 2]) + nbpx
stack_w = np.max(coords[:, 1]) + nbpx
stack = np.zeros((stack_h, stack_w, nSlices), dtype='int8')
stack_r = np.zeros((stack_h, stack_w, nSlices), dtype='float64')
for i in range(all_data.shape[0]):
iyxz = tuple(coords[i, ::-1] - 1)
stack[iyxz] = 1
if all_data.shape[1] == 4:
stack_r[iyxz] = all_data[i, -1]
else:
stack_r[iyxz] = 1
tracks_all = np.array([], dtype=float).reshape(0, nSlices, 4)
maxv = np.max(stack_r)
br_max = maxv
idx_max = np.argmax(stack_r)
while 1:
iyxz = np.unravel_index(idx_max, stack.shape)
spot_br = np.empty((nSlices, 1))
track = np.empty((nSlices, 3))
for i in range(nSlices):
spot_br[i] = np.NaN
track[i, :] = np.array((np.NaN, np.NaN, np.NaN))
spot_br[iyxz[2]] = maxv
track[iyxz[2], :] = np.array(iyxz[::-1]) + 1
# forward
icy = iyxz[0]
icx = iyxz[1]
for inz in range(iyxz[2] + 1, nSlices):
iny, inx = find_nn(stack[:, :, inz - 1], icy, icx, stack[:, :, inz], nbpx)
if np.isnan(iny) and not inz == nSlices - 1:
iny, inx = find_nn(stack[:, :, inz - 1], icy, icx, stack[:, :, inz + 1], nbpx)
if np.isnan(iny):
break
else:
iny = icy
inx = icx
stack[iny, inx, inz] = 1
stack_r[iny, inx, inz] = stack_r[iny, inx, inz - 1]
elif np.isnan(iny) and inz == nSlices - 1:
break
track[inz, :] = np.array((inz, inx, iny)) + 1
spot_br[inz] = stack_r[iny, inx, inz]
icy = iny
icx = inx
# backward
icy = iyxz[0]
icx = iyxz[1]
for inz in range(iyxz[2] - 1, -1, -1):
iny, inx = find_nn(stack[:, :, inz + 1], icy, icx, stack[:, :, inz], nbpx)
if np.isnan(iny) and not inz == 0:
iny, inx = find_nn(stack[:, :, inz + 1], icy, icx, stack[:, :, inz - 1], nbpx)
if np.isnan(iny):
break
else:
iny = icy
inx = icx
stack[iny, inx, inz] = 1
stack_r[iny, inx, inz] = stack_r[iny, inx, inz + 1]
elif np.isnan(iny) and inz == 0:
break
track[inz, :] = np.array((inz, inx, iny)) + 1
spot_br[inz] = stack_r[iny, inx, inz]
icy = iny
icx = inx
for iz in range(nSlices):
if not np.isnan(track[iz, 0]):
stack[track[iz, 2].astype(int) - 1, track[iz, 1].astype(int) - 1, iz] = 0
stack_r[track[iz, 2].astype(int) - 1, track[iz, 1].astype(int) - 1, iz] = 0
# discard short trajectories
if np.count_nonzero(~np.isnan(spot_br)) > np.max((1, minlen * (frame_end - frame_1st) / 100)):
tmp = np.concatenate((track, spot_br), axis=1)
tracks_all = np.concatenate((tracks_all, tmp.reshape(1, -1, 4)), axis=0)
maxv = np.max(stack_r)
idx_max = np.argmax(stack_r)
if maxv < th * br_max / 100 or maxv == 0:
break
with pd.ExcelWriter(fn_out) as writer:
if tracks_all.shape[0] == 0:
df = pd.DataFrame()
df['No tracks found'] = np.NaN
df.to_excel(writer, index=False, float_format='%.2f')
else:
for i in range(tracks_all.shape[0]):
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 Deaths Per Capita
# > Comparing death rates adjusting for population size.
#
# - comments: true
# - author: <NAME> & <NAME>
# - categories: [growth, compare, interactive]
# - hide: false
# - image: images/covid-permillion-trajectories.png
# - permalink: /covid-compare-permillion/
# In[1]:
#hide
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import altair as alt
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
chart_width = 550
chart_height= 400
# ## Deaths Per Million Of Inhabitants
# Since reaching at least 1 death per million
#
# > Tip: Click (Shift+ for multiple) on countries in the legend to filter the visualization.
# In[2]:
#hide
data = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv", error_bad_lines=False)
data = data.drop(columns=["Lat", "Long"])
data = data.melt(id_vars= ["Province/State", "Country/Region"])
data = pd.DataFrame(data.groupby(['Country/Region', "variable"]).sum())
data.reset_index(inplace=True)
data = data.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_cases"})
data['date'] =pd.to_datetime(data.date)
data = data.sort_values(by = "date")
data.loc[data.location == "US","location"] = "United States"
data.loc[data.location == "Korea, South","location"] = "South Korea"
data_pwt = pd.read_stata("https://www.rug.nl/ggdc/docs/pwt91.dta")
filter1 = data_pwt["year"] == 2017
data_pop = data_pwt[filter1]
data_pop = data_pop[["country","pop"]]
data_pop.loc[data_pop.country == "Republic of Korea","country"] = "South Korea"
data_pop.loc[data_pop.country == "Iran (Islamic Republic of)","country"] = "Iran"
# per habitant
data_pc = data.copy()
countries = ["China", "Italy", "Spain", "France", "United Kingdom", "Germany",
"Portugal", "United States", "Singapore","South Korea", "Japan",
"Brazil","Iran"]
data_countries = []
data_countries_pc = []
# compute per habitant
for i in countries:
data_pc.loc[data_pc.location == i,"total_cases"] = data_pc.loc[data_pc.location == i,"total_cases"]/float(data_pop.loc[data_pop.country == i, "pop"])
# get each country time series
filter1 = data_pc["total_cases"] > 1
for i in countries:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
# In[3]:
#hide_input
# Stack data to get it to Altair dataframe format
data_countries_pc2 = data_countries_pc.copy()
for i in range(0,len(countries)):
data_countries_pc2[i] = data_countries_pc2[i].reset_index()
data_countries_pc2[i]['n_days'] = data_countries_pc2[i].index
data_countries_pc2[i]['log_cases'] = np.log(data_countries_pc2[i]["total_cases"])
data_plot = data_countries_pc2[0]
for i in range(1, len(countries)):
data_plot = | pd.concat([data_plot, data_countries_pc2[i]], axis=0) | pandas.concat |
# encoding: utf-8
# (c) 2017-2021 Open Risk (https://www.openriskmanagement.com)
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pprint as pp
import pandas as pd
from transitionMatrix.utils.converters import frame_to_array, datetime_to_float
from transitionMatrix.utils.preprocessing import transitions_summary, validate_absorbing_state
""" Examples of using transitionMatrix to prepare data sets (data cleansing). The functionality is primarily based on pandas, with transition data specific procedures supported by the utils sub-package. For some operations (and large datasets) it might be advisable to work with numpy arrays
"""
# Load the raw data into a pandas frame
raw_data = pd.read_csv('../../datasets/rating_data_raw.csv')
# Print a generic summary based on pandas describe() method
print(raw_data.describe())
# Bring the column names to a standard convention
raw_data.rename(columns={"RatingNum": "State", "Date": "Time", "CustomerId": "ID"}, inplace=True)
print(raw_data.head())
# Print a summary of transition statistics
pp.pprint(transitions_summary(raw_data))
# Drop redundant column
raw_data = raw_data.drop(columns=['Rating'])
# Move the NR column to the end
reorder_dict = {
0: 8,
1: 0,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7
}
raw_data = raw_data.replace({"State": reorder_dict})
print(raw_data.head(10))
# Convert date strings to floats
[start_date, end_date, total_days], converted_data = datetime_to_float(raw_data, time_column='Time')
print([start_date, end_date, total_days])
# NB: In the below the D = 7, NR = 8 special states are hardwired
# remove an initial observation for an entity if it is classified as D
# Reason: an initial defaulted observation is unusual / non-sensical
rows = []
entity_id, event_time, entity_state = frame_to_array(converted_data)
for i in range(len(entity_id)):
if entity_id[i - 1] != entity_id[i] and entity_state[i] == 7:
pass
else:
rows.append((entity_id[i], event_time[i], entity_state[i]))
clean_data0 = pd.DataFrame(rows, columns=['ID', 'Time', 'State'])
# remove an initial observation for an entity if it is classified as NR
# Reason: left truncation of observations must be handled consistently
rows = []
entity_id, event_time, entity_state = frame_to_array(clean_data0)
for i in range(len(entity_id)):
if entity_id[i - 1] != entity_id[i] and entity_state[i] == 8:
pass
else:
rows.append((entity_id[i], event_time[i], entity_state[i]))
clean_data1 = pd.DataFrame(rows, columns=['ID', 'Time', 'State'])
# remove an intermediate observation for an entity if it is classified as NR
# Reason: it is non-informative and it complicates the handling of NR state (non-absorbing)
rows = []
entity_id, event_time, entity_state = frame_to_array(clean_data1)
for i in range(len(entity_id) - 1):
if entity_id[i + 1] == entity_id[i] and entity_state[i] == 8 and entity_state[i + 1] != 8:
pass
else:
rows.append((entity_id[i], event_time[i], entity_state[i]))
clean_data2 = | pd.DataFrame(rows, columns=['ID', 'Time', 'State']) | pandas.DataFrame |
import numpy as np
import glob
import logging
import os
from time import time
import SimpleITK as sitk
import numpy as np
import pandas as pd
import yaml
from sklearn.model_selection import KFold
from src.utils.Utils_io import ensure_dir
from src.visualization.Visualize import plot_value_histogram
def copy_meta_and_save(new_image, reference_sitk_img, full_filename=None, override_spacing=None, copy_direction=True):
"""
Copy metadata, UID and structural information from one image to another
Works also for different dimensions, returns new_image with copied structural info
:param new_image: sitk.Image
:param reference_sitk_img: sitk.Image
:param path: full file path as str
:return:
"""
t1 = time()
try:
# make sure this method works with nda and sitk images
if isinstance(new_image, np.ndarray):
if len(new_image.shape) == 4:
# 4D needs to be built from a series
new_image = [sitk.GetImageFromArray(img) for img in new_image]
new_image = sitk.JoinSeries(new_image)
else:
new_image = sitk.GetImageFromArray(new_image)
ensure_dir(os.path.dirname(os.path.abspath(full_filename)))
if reference_sitk_img is not None:
assert (isinstance(reference_sitk_img, sitk.Image)), 'no reference image given'
assert (isinstance(new_image, sitk.Image)), 'only np.ndarrays and sitk images could be stored'
# copy metadata
for key in reference_sitk_img.GetMetaDataKeys():
new_image.SetMetaData(key, get_metadata_maybe(reference_sitk_img, key))
logging.debug('Metadata_copied: {:0.3f}s'.format(time() - t1))
# copy structural informations to image with same dimension and size
if (reference_sitk_img.GetDimension() == new_image.GetDimension()) and (
reference_sitk_img.GetSize() == new_image.GetSize()):
new_image.CopyInformation(reference_sitk_img)
# same dimension (e.g. 4) but different size per dimension
elif (reference_sitk_img.GetDimension() == new_image.GetDimension()):
# copy spacing, origin and rotation but keep size as it is
if copy_direction:
new_image.SetDirection(reference_sitk_img.GetDirection())
new_image.SetOrigin(reference_sitk_img.GetOrigin())
new_image.SetSpacing(reference_sitk_img.GetSpacing())
# copy structural information to smaller images e.g. 4D to 3D
elif reference_sitk_img.GetDimension() > new_image.GetDimension():
shape_ = len(new_image.GetSize())
reference_shape = len(reference_sitk_img.GetSize())
# copy direction to smaller images
# 1. extract the direction, 2. create a matrix, 3. slice by the new shape, 4. flatten
if copy_direction:
direction = np.array(reference_sitk_img.GetDirection())
dir_ = direction.reshape(reference_shape, reference_shape)
direction = dir_[:shape_, :shape_].flatten()
new_image.SetDirection(direction)
new_image.SetOrigin(reference_sitk_img.GetOrigin()[:shape_])
new_image.SetSpacing(reference_sitk_img.GetSpacing()[:shape_])
# copy structural information to bigger images e.g. 3D to 4D, fill with 1.0 spacing
else:
ones = [1.0] * (new_image.GetDimension() - reference_sitk_img.GetDimension())
new_image.SetOrigin((*reference_sitk_img.GetOrigin(), *ones))
new_image.SetSpacing((*reference_sitk_img.GetSpacing(), *ones))
# we cant copy the direction from smaller images to bigger ones
logging.debug('spatial data_copied: {:0.3f}s'.format(time() - t1))
if override_spacing:
new_image.SetSpacing(override_spacing)
if full_filename != None:
# copy uid
writer = sitk.ImageFileWriter()
# writer.KeepOriginalImageUIDOn()
writer.SetFileName(full_filename)
writer.Execute(new_image)
logging.debug('image saved: {:0.3f}s'.format(time() - t1))
return True
except Exception as e:
logging.error('Error with saving file: {} - {}'.format(full_filename, str(e)))
return False
else:
return new_image
def create_4d_volumes_from_4d_files(img_f, mask_f, full_path='data/raw/GCN/3D/', slice_threshold=2):
"""
Expects an 4d-image and -mask file name and a target path
filter mask and image volumes by contoured time-steps
copy all metadata
save them to the destination path
:param img_f: 4D image filepath as str
:param mask_f: 4D mask filepath as str
:param full_path: export path as str
:param slice_threshold: minimal masks per timestep as int
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_4d_sitk = sitk.ReadImage(mask_f)
img_4d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_4d_nda, masked_t = filter_4d_vol(mask_4d_sitk, slice_threshold=slice_threshold)
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)[masked_t]
# write filtered 4d image to disk
patient_name = os.path.basename(img_f).split('.')[0].replace('volume_clean', '')
img_file = '{}_{}{}'.format(patient_name, 'img', '.nrrd')
mask_file = '{}_{}{}'.format(patient_name, 'msk', '.nrrd')
copy_meta_and_save(img_4d_nda, img_4d_sitk, os.path.join(full_path, img_file))
copy_meta_and_save(mask_4d_nda, img_4d_sitk, os.path.join(full_path, mask_file))
return [masked_t, list(img_4d_nda.shape)]
def extract_spacing(matlabfile=None, is_sax=True):
"""
extract the spacing from a medvisio export matlabfile
of a CMR image, either SAX or 4CH
returns (1,1,1,6) for (z,t,x,y) if none spacing could be found
:param matlabfile (np.ndarray) matlabfile opened via scipy.io.loadmat(example.mat)
:param is_sax (bool) toggle between sax or 4ch spacing
:return: (tuple) spacing in the following order (z,t,x,y)
"""
assert (matlabfile is not None), 'no matlab file given, please provide *.mat file as np.ndarray'
try:
values = dict([(keys.lower(), value) for keys, value in
zip(matlabfile['setstruct'][0].dtype.names, matlabfile['setstruct'][0][int(is_sax)])])
except Exception as e:
print(str(e))
values = dict()
spacing_x = float(values.get('resolutionx', 1))
spacing_y = float(values.get('resolutiony', 1))
spacing_t = float(1)
spacing_z = float(values.get('slicethickness', 6))
return (spacing_z, spacing_t, spacing_x, spacing_y)
def create_3d_volumes_from_4d_files(img_f, mask_f, full_path='data/raw/tetra/3D/', slice_treshhold=2):
"""
Expects an 4d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param full_path:
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_4d_sitk = sitk.ReadImage(mask_f)
img_4d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_4d_nda, masked_t = filter_4d_vol(mask_4d_sitk, slice_threshold=slice_treshhold)
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)[masked_t]
# create t 3d volumes
for img_3d, mask_3d, t in zip(img_4d_nda, mask_4d_nda, masked_t):
# write 3d image
patient_name = os.path.basename(img_f).split('.')[0].replace('volume_clean', '')
img_file = '{}_t{}_{}{}'.format(patient_name, str(t), 'img', '.nrrd')
mask_file = '{}_t{}_{}{}'.format(patient_name, str(t), 'msk', '.nrrd')
copy_meta_and_save(img_3d, img_4d_sitk, os.path.join(full_path, img_file))
copy_meta_and_save(mask_3d, img_4d_sitk, os.path.join(full_path, mask_file))
return [masked_t, list(img_4d_nda.shape)]
def create_2d_slices_from_4d_volume_files(img_f, mask_f, export_path, filter_by_mask=True, slice_threshold=2):
"""
Expects an 4d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param export_path: str
:param filter_by_mask: bool
:param slice_threshold: int
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_4d_sitk = sitk.ReadImage(mask_f)
img_4d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
if filter_by_mask:
mask_4d_nda, masked_t = filter_4d_vol(mask_4d_sitk, slice_threshold=slice_threshold)
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)[masked_t]
else:
img_4d_nda = sitk.GetArrayFromImage(img_4d_sitk)
masked_t = list(range(img_4d_nda.shape[0]))
mask_4d_nda = sitk.GetArrayFromImage(mask_4d_sitk)
# create t x 3d volumes
for img_3d, mask_3d, t in zip(img_4d_nda, mask_4d_nda, masked_t):
# get patient_name
patient_name = os.path.basename(img_f).split('.')[0].replace('volume_clean', '')
# create z x 2d slices
for z, slice_2d in enumerate(zip(img_3d, mask_3d)):
# create filenames with reference to t and z position
img_file = '{}_t{}_z{}_{}{}'.format(patient_name, str(t), str(z), 'img', '.nrrd')
mask_file = '{}_t{}_z{}_{}{}'.format(patient_name, str(t), str(z), 'msk', '.nrrd')
# save nrrd file with metadata
copy_meta_and_save(slice_2d[0], img_4d_sitk, os.path.join(export_path, img_file), copy_direction=False)
copy_meta_and_save(slice_2d[1], img_4d_sitk, os.path.join(export_path, mask_file), copy_direction=False)
return [masked_t, list(img_4d_nda.shape)]
def create_2d_slices_from_3d_volume_files_any_filename(img_f, mask_f, export_path):
"""
Helper to split a GCN 3D dicom file into z x 2D slices
Expects an 3d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param full_path:
:return:
"""
import re
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_3d_sitk = sitk.ReadImage(mask_f)
img_3d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_3d = sitk.GetArrayFromImage(mask_3d_sitk)
img_3d = sitk.GetArrayFromImage(img_3d_sitk)
# get file names
_, img_f = os.path.split(img_f)
_, mask_f = os.path.split(mask_f)
def get_new_name(f_name, z):
match = ''
# check if image or mask
m = re.search('_img|_msk', f_name)
if m:
match = m.group(0)
# extend filename at the very last position before 'img' or 'msk'
return re.sub('{}.nrrd'.format(match), '_{}{}.nrrd'.format(z, match), f_name)
# create z x 2d slices
for z, slice_2d in enumerate(zip(img_3d, mask_3d)):
# create filenames with reference to t and z position
# extend the filename
img_file = get_new_name(img_f, z)
mask_file = get_new_name(mask_f, z)
# save nrrd file with metadata
copy_meta_and_save(slice_2d[0], img_3d_sitk, os.path.join(export_path, img_file))
copy_meta_and_save(slice_2d[1], img_3d_sitk, os.path.join(export_path, mask_file))
return list(img_3d.shape)
def create_2d_slices_from_3d_volume_files(img_f, mask_f, export_path):
"""
Helper for ACDC data
Expects an 3d-image and -mask file name and a target path
filter mask and image volumes with segmentation
copy all metadata
save them to the destination path
:param img_f:
:param mask_f:
:param full_path:
:return:
"""
logging.info('process file: {}'.format(img_f))
# get sitk images
mask_3d_sitk = sitk.ReadImage(mask_f)
img_3d_sitk = sitk.ReadImage(img_f)
# filter 4d image nda according to given mask nda
mask_3d = sitk.GetArrayFromImage(mask_3d_sitk)
img_3d = sitk.GetArrayFromImage(img_3d_sitk)
# get patient_name
patient_name = os.path.basename(img_f).split('_')[0]
frame = os.path.basename(img_f).split('frame')[1][:2]
# create z x 2d slices
for z, slice_2d in enumerate(zip(img_3d, mask_3d)):
# create filenames with reference to t and z position
img_file = '{}__t{}_z{}_{}{}'.format(patient_name, str(frame), str(z), 'img', '.nrrd')
mask_file = '{}__t{}_z{}_{}{}'.format(patient_name, str(frame), str(z), 'msk', '.nrrd')
# save nrrd file with metadata
copy_meta_and_save(slice_2d[0], img_3d_sitk, os.path.join(export_path, img_file))
copy_meta_and_save(slice_2d[1], img_3d_sitk, os.path.join(export_path, mask_file))
return [frame, list(img_3d.shape)]
def get_patient(filename_to_2d_nrrd_file):
"""
Split the nrrd filename and returns the patient id
split the filename by '_' returns the first two elements of that list
If the filename contains '__' it returns the part before
"""
import re
m = re.search('__', filename_to_2d_nrrd_file)
if m: # nrrd filename with '__'
return os.path.basename(filename_to_2d_nrrd_file).split('__')[0]
if os.path.basename(filename_to_2d_nrrd_file).startswith('patient'): # acdc file
return os.path.basename(filename_to_2d_nrrd_file).split('_')[0]
else: # gcn filename
return '_'.join(os.path.basename(filename_to_2d_nrrd_file).split('_')[:2])
def get_trainings_files(data_path, fold=0, path_to_folds_df='data/raw/gcn_05_2020_ax_sax_86/folds.csv'):
"""
Load trainings and test files of a directory by a given folds-dataframe
:param data_path:
:param fold:
:param path_to_folds_df:
:return: x_train, y_train, x_val, y_val
"""
img_suffix = '*img.nrrd'
mask_suffix = '*msk.nrrd'
# load the nrrd files with given pattern from the data path
x = sorted(glob.glob(os.path.join(data_path, img_suffix)))
y = sorted(glob.glob(os.path.join(data_path, mask_suffix)))
if len(x) == 0:
logging.info('no files found, try to load with acdc file pattern')
x, y = load_acdc_files(data_path)
df = pd.read_csv(path_to_folds_df)
patients = df[df.fold.isin([fold])]
# make sure we count each patient only once
patients_train = patients[patients['modality'] == 'train']['patient'].unique()
patients_test = patients[patients['modality'] == 'test']['patient'].unique()
logging.info('Found {} images/masks in {}'.format(len(x), data_path))
logging.info('Patients train: {}'.format(len(patients_train)))
def filter_files_for_fold(list_of_filenames, list_of_patients):
"""Helper to filter one list by a list of substrings"""
from src.data.Dataset import get_patient
return [str for str in list_of_filenames
if get_patient(str) in list_of_patients]
x_train = sorted(filter_files_for_fold(x, patients_train))
y_train = sorted(filter_files_for_fold(y, patients_train))
x_test = sorted(filter_files_for_fold(x, patients_test))
y_test = sorted(filter_files_for_fold(y, patients_test))
assert (len(x_train) == len(y_train)), 'len(x_train != len(y_train))'
logging.info('Selected {} of {} files with {} of {} patients for training fold {}'.format(len(x_train), len(x),
len(patients_train),
len(df.patient.unique()),
fold))
return x_train, y_train, x_test, y_test
def get_kfolded_data(kfolds=4, path_to_data='data/raw/tetra/2D/', extract_patient_id=get_patient):
"""
filter all image files by patient names defined in fold n
functions expects subfolders, collects all image, mask files
and creates a list of unique patient ids
create k folds of this patient ids
filter the filenames containing the patient ids from each kfold split
returns
:param kfolds: number of splits
:param path_to_data: path to root of split data e.g. 'data/raw/tetra/2D/'
:param extract_patient_id: function which returns the patient id for each filename in path_to_data
:return: a dataframe with the following columns:
fold (kfolds-1),
x_path (full filename to image as nrrd),
y_path (full filename to mask as nrrd),
modality(train or test)
patient (patient id)
"""
img_pattern = '*img.nrrd'
columns = ['fold', 'x_path', 'y_path', 'modality', 'patient']
modality_train = 'train'
modality_test = 'test'
seed = 42
# get all images, masks from given directory
# get all img files in all subdirs
x = sorted(glob.glob(os.path.join(path_to_data, '**/*{}'.format(img_pattern))))
# if no subdirs given, search in root
if len(x) == 0:
x = sorted(glob.glob(os.path.join(path_to_data, '*{}'.format(img_pattern))))
logging.info('found: {} files'.format(len(x)))
# create a unique list of patient ids
patients = sorted(list(set([extract_patient_id(f) for f in x])))
logging.info('found: {} patients'.format(len(patients)))
# create a k-fold instance with k = kfolds
kfold = KFold(n_splits=kfolds, shuffle=True, random_state=seed)
def filter_x_by_patient_ids_(x, patient_ids, modality, columns, f):
# create a dataframe from x (list of filenames) filter by patient ids
# returns a dataframe
df = pd.DataFrame(columns=columns)
df['x_path'] = [elem for elem in x if extract_patient_id(elem) in patient_ids]
df['y_path'] = [elem.replace('img', 'msk') for elem in df['x_path']]
df['fold'] = [f] * len(df['x_path'])
df['modality'] = [modality] * len(df['x_path'])
df['patient'] = [extract_patient_id(elem) for elem in df['x_path']]
logging.debug(len(df['x_path']))
logging.debug(patient_ids)
logging.debug(len(x))
logging.debug(extract_patient_id(x[0]))
return df
# split patients k times
# use the indexes to get the patient ids from x
# use the patient ids to filter train/test from the complete list of files
df_folds = pd.DataFrame(columns=columns)
for f, idx in enumerate(
kfold.split(patients)): # f = fold, idx = tuple with all indexes to split the patients in this fold
train_idx, test_idx = idx
# create a list of train and test indexes
logging.debug("TRAIN: {}, TEST: {}".format(train_idx, test_idx))
# slice the filenames by the given indexes
patients_train, patients_test = [patients[i] for i in train_idx], [patients[i] for i in test_idx]
df_train = filter_x_by_patient_ids_(x, patients_train, modality_train, columns, f)
df_test = filter_x_by_patient_ids_(x, patients_test, modality_test, columns, f)
# merge train and test
df_fold = pd.concat([df_train, df_test])
# merge fold into folds dataset
df_folds = pd.concat([df_fold, df_folds])
return df_folds
def filter_x_by_patient_ids(x, patient_ids, modality='test',
columns=['x_path', 'y_path', 'fold', 'modality', 'patient', 'pathology'], fold=0,
pathology=None, filter=True):
"""
Create a df from a given list of files
and a list of patient which are used to filter the file names
:param x:
:param patient_ids:
:param modality:
:param columns:
:param f:
:param pathology:
:return:
"""
# create a dataframe from x (list of filenames) filter by patient ids
# returns a dataframe
df = pd.DataFrame(columns=columns)
if filter:
df['x_path'] = [elem for elem in x if get_patient(elem) in patient_ids]
else:
df['x_path'] = [elem for elem in x]
df['y_path'] = [elem.replace('img', 'msk') for elem in df['x_path']]
df['fold'] = [fold] * len(df['x_path'])
df['modality'] = [modality] * len(df['x_path'])
df['patient'] = [get_patient(elem) for elem in df['x_path']]
df['pathology'] = [pathology] * len(df['x_path'])
return df
def get_n_patients(df, n=1):
"""
Select n random patients
Filter the data frame by this patients
Use the Fold 0 split as default
Override the modality for all random selected patients to "train"
return filtered df
:param df:
:param n:
:param fold:
:return:
"""
# fold is not important,
# because we return patients from train and test modality
fold = 0
# make random.choice idempotent
np.random.seed(42)
# select random patients
patients = np.random.choice(sorted(df['patient'].unique()), size=n)
logging.info('Added patients: {} from the GCN dataset'.format(patients))
# filter data frame by fold and by random selected patients ids, make sure to make a copy to avoid side effects
df_temp = df[(df['fold'] == fold) & (df['patient'].isin(patients))].copy()
# make sure all selected images will be used during training, change modality to train for this images
# train_kfolded will only use images with modality == train, override the modality for all selected patients/rows
df_temp.loc[:, 'modality'] = 'train'
df_temp.reset_index(inplace=True)
return df_temp
def get_train_data_from_df(first_df='reports/kfolds_data/2D/acdc/df_kfold.csv', second_df=None,
n_second_df=0, n_first_df=None, fold=0, ):
"""
load one df and select n patients, default: use all
load a second df, if given
select n patients from second df,
merge first df into second df
return x_train, y_train, x_val, y_val as list of files
:param df_fname: full file/pathname to first df (str)
:param second_df_fname: full file/pathname to second df (str)
:param n_second_df: number of patients from second df, that should be merged
:param n_patients_first_df: int - number of patients to load from the first dataframe
:param fold: select a fold from df
:return:
"""
extend = dict()
extend['GCN_PATIENTS'] = list()
extend['GCN_IMAGES'] = 0
df = pd.read_csv(first_df)
# take only n patients from the first dataframe
if n_first_df:
df = get_n_patients(df, n_first_df)
# if second dataframe given, load df, select m patients, and concat this dataframe with the first one
if second_df: # extend dataframe with n patients from second dataframe
df_second = pd.read_csv(second_df)
df_second = get_n_patients(df_second, n_second_df)
df = | pd.concat([df, df_second], sort=False) | pandas.concat |
import pandas as pd
from fbprophet import Prophet
from preprocess_ts_data import concatenate_features
# Make a dataframe of center id, meal id and the two concatenated into one string
center_meal_combo_id = concatenate_features(df_time_series, 'center_id', 'meal_id')
def make_time_series_predictions(full_raw_time_series, forecast_period):
"""
Use Prophet: A Time Series Forecasting technique developed and open-sourced by Facebook
"""
time_series_period = len(full_raw_time_series['week'].value_counts().index.to_list())
total_predictions=pd.DataFrame()
for combo in list(center_meal_combo_id['centre_id_meal_id']):
combo_time_series = full_raw_time_series.loc[full_raw_time_series['center_meal_combo_id'] == combo,['ds','y']]
# Instantiate a Prophet object and fit it to our time series
m = Prophet(daily_seasonality=False, weekly_seasonality=True, yearly_seasonality=True)
m.fit(combo_time_series)
# Make a dataframe of future dates
future = m.make_future_dataframe(freq='W', periods=forecast_period)
# Make predictions on future dates
forecast = m.predict(future)
mini_predictions = forecast.loc[:,['ds','yhat']]
combo_series = | pd.DataFrame([combo]*(time_series_period + forecast_period), columns=['centre_meal_combo_id']) | pandas.DataFrame |
import pandas
import requests
from bs4 import BeautifulSoup
import time, datetime, os
city = "San-Leandro"
realtor_base_url = "https://www.realtor.com/realestateandhomes-search/%s_CA" % city
scraperapi_Key = "<KEY>"
header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36" ,'referer':'https://www.google.com/'}
# Will be used to store Property results
results_list = []
def parseContent(webSoup, find_Number_of_Properties = False):
"""
Takes in a BeautifulSoup object that has already been converted to parse webpage and a boolean value to determine if looking for max number of properties to display
"""
if not isinstance(webSoup, BeautifulSoup):
raise ValueError("webSoup must be of type 'BeatifulSoup'")
elif not isinstance(find_Number_of_Properties, bool):
raise ValueError("find_Number_of_Properties must be of type 'bool'")
else:
# If find_Number_of_Properties is True, look for total number of houses
if find_Number_of_Properties:
try:
home_count_Container = webSoup.find("span", class_="result-count")
print("Total: %s" % home_count_Container.text)
except Exception as ex:
print("Error determing property count")
raise ex
# Get all property cards and parse through all
try:
properties_Container = webSoup.find_all("section", class_="srp-content")[0].find_all("li", attrs={"data-testid":"result-card"}, class_="component_property-card")
print("Number of properties on this page: %s" % len(properties_Container))
except Exception as ex:
print("%s Error occured initializing properties_container: %s" % (type(ex),ex))
# Return False to retry URL
return False
for prop in properties_Container:
home = {}
# Attempt to pull type
try:
home["Type"] = prop.find("div", class_="property-type", attrs={"data-label":"pc-type"}).text
except AttributeError:
home["Type"] = None
pass
except Exception as ex:
print("%s Error occured in Type: %s" % (type(ex),ex))
home["Type"] = None
# Attempt to pull price and estimated payment
try:
priceWrapper = prop.find_all("div", attrs={"data-label":"pc-price-wrapper"}, class_="price")
except Exception as ex:
print("%s Error occured initializing priceWrapper: %s" % (type(ex),ex))
return
for wrap in priceWrapper:
# pull price
try:
home["Price"] = wrap.find("span", attrs={"data-label":"pc-price"}).text
except AttributeError:
home["Price"] = None
pass
except Exception as ex:
print("%s Error occured in Price: %s" % (type(ex),ex))
home["Price"] = None
# TODO: pull estimated payment
# try:
# home["Estimate Payment"] = wrap.find("button", attrs={"estimate-payment-button"}, class_="estimate-payment-button").text
# except Exception as ex:
# print("Error occured in Estimate Payment: %s" % ex)
# home["Estimate Payment"] = None
# Find Home Details
try:
detailWrapper = prop.find("div", attrs={"data-testid":"property-meta-container"})
except:
print("%s Error occured initializing detailWrapper: %s" % (type(ex),ex))
return
# Attempt to pull Bed
try:
home["Beds"] = detailWrapper.find("li", attrs={"data-label":"pc-meta-beds"}).find("span", attrs={"data-label":"meta-value"}).text
except AttributeError:
home["Beds"] = None
pass
except Exception as ex:
print("%s Error occured in Bed: %s" % (type(ex),ex))
home["Beds"] = None
# Attempt to pull baths
try:
home["Baths"] = detailWrapper.find("li", attrs={"data-label":"pc-meta-baths"}).find("span", attrs={"data-label":"meta-value"}).text
except AttributeError:
home["Baths"] = None
pass
except Exception as ex:
print("%s Error occured in Baths: %s" % (type(ex), ex))
home["Baths"] = None
# Attempt to pull Home SqFt
try:
home["Home Square Footage"] = detailWrapper.find("li", attrs={"data-label":"pc-meta-sqft"}).find("span", attrs={"data-label":"meta-value"}).text
except AttributeError:
home["Home Square Footage"] = None
pass
except Exception as ex:
print("%s Error occured in Home Square Footage: %s" % (type(ex), ex))
home["Home Square Footage"] = None
# Attempt to pull Lot SqFt
try:
home["Lot Square Footage"] = detailWrapper.find("li", attrs={"data-label":"pc-meta-sqftlot"}).find("span", attrs={"data-label":"meta-value"}).text
except AttributeError:
home["Lot Square Footage"] = None
pass
except Exception as ex:
print("%s Error occured in Lot Square Footage: %s" % (type(ex), ex))
home["Lot Square Footage"] = None
# Attempt to pull address
try:
home["Address"] = prop.find("div", class_="address", attrs={"data-label":"pc-address"}).text
except AttributeError:
home["Address"] = None
pass
except Exception as ex:
print("%s Error occured in Address: %s" % (type(ex),ex))
home["Address"] = None
# Add Date Created
try:
home["Created"] = datetime.datetime.now().strftime("%b-%d-%Y %I:%M:%S")
except Exception as ex:
print("%s Error occured in Created: %s" % (type(ex),ex))
home["Created"] = "Unavailable"
results_list.append(home)
return True
def requestNewURL(url):
"""
Takes in base url to scrape through and converts into BeautifulSoup
"""
print("Looking at webpage: %s" % url)
payload = { "api_key": scraperapi_Key, "url": url }
req = requests.get("http://api.scraperapi.com",params=payload,headers=header)
if req.status_code != 200:
print("STATUS CODE FOR %s : %s" % (url, req.status_code))
if req.status_code == 500:
print("Request error 500. Trying again")
req = requests.get("http://api.scraperapi.com",params=payload,headers=header)
contents = req.text.strip()
soup = BeautifulSoup(contents, "html.parser")
if not parseContent(soup, False):
# something went wrong getting property container. Try URL again.
print("Trying URL Again")
requestNewURL(url)
# Start off parsing 1st page which will have some results
payload = { "api_key": scraperapi_Key, "url": realtor_base_url }
req = requests.get("http://api.scraperapi.com",params=payload,headers=header)
contents = req.text.strip()
soup = BeautifulSoup(contents, "html.parser")
# To see web content results, uncomment the following line
# print(soup.prettify())
# Parse through first page
parseContent(soup, True)
# Determine number of pages
try:
max_pages = soup.find_all("li", {"class":"pagination-number"})[-1].text
print("Total Number of pages %s" % max_pages)
# if more than 1 pages loop through all
if int(max_pages) >= 2:
# Since range is exclusive, we need to add one to the end
for page in range(2,int(max_pages)+1):
time.sleep(2)
requestNewURL(realtor_base_url + "/pg-" +str(page))
except IndexError:
print("Index Error occurred, Data may be missing.")
pass
print("Total number of successful pulled Properties: %s" % len(results_list))
# Create csv file with all results
dataFrame = | pandas.DataFrame(results_list) | pandas.DataFrame |
import math
import os
import gym
import re
from gym import spaces
from sklearn.feature_extraction.text import TfidfVectorizer
import random
import pandas as pd
import numpy as np
class weigher(object):
def __init__(self, ret_np=False):
self.ret_np = ret_np
self.model = TfidfVectorizer()
def fit(self, input_text):
if isinstance(input_text, str):
with open(input_text) as f:
self.model.fit(f)
else:
self.model.fit(input_text)
self.vocab = self.model.vocabulary_
self.prepr = self.model.build_preprocessor()
self.toker = self.model.build_tokenizer()
def tokenize(self, string):
return self.toker(self.prepr(string))
def tfidf(self, St):
sparse_wv = self.model.transform([St])
st = []
for w in self.tokenize(St):
try:
widx = self.vocab[w]
st.append(sparse_wv[0, widx])
except KeyError:
st.append(0.0)
return np.array(st) if self.ret_np else st
class textEnv(gym.Env):
"""Custom text environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, input_file_name, wsize=7, beta_rwd=1.5, gamma = 0.8,
sample_size=20, traject_length=100,
reward_smooth=True, n_trajects=10):
super(textEnv, self).__init__()
self.weiger = weigher(ret_np=True)
self.weiger.fit(input_file_name)
self.total_bytes = os.stat(input_file_name).st_size
self.file = open(input_file_name)
assert wsize > 3 # No context size smaller than 3 allowed
self.w_size = wsize
self.tlegth = traject_length
self.tcount = 0
self.reward_smooth = True
self.gamma = gamma
self.beta = beta_rwd
self.rand_byte = random.randint(0, self.total_bytes)
self.current_step = 0
self.sample_size = sample_size
self.n_trajects = n_trajects
try:
self.n_gram_model = TfidfVectorizer(analyzer='char_wb',
ngram_range=(1,3))
except TypeError:
self.n_gram_model = TfidfVectorizer(analyzer='char',
ngram_range=(1,3))
token_pattern = re.compile('\\w+')
self.tokenize = lambda s: token_pattern.findall(s)
self.char_prep = self.n_gram_model.build_preprocessor()
self.char_analyzer = self.n_gram_model.build_analyzer()
def char_tokenizer(self, s):
"""Gives character n-gram tokens.
args: s: string (a context window in string form).
rets: a list of strings, each being an n-gram.
"""
return [ng for ng in self.char_analyzer(" ".join(self.tokenize(
self.char_prep(s))))
if not ng in ['', ' ']]
def reset(self):
self.I_XZgY = []
self.I_XZ = []
self.tcount = 0
self.sample_semanticity = []
self.horizon_semanticity = 0.0
self.cum_rewards = []
self.current_byte = random.randint(0, self.total_bytes)
self.file.seek(self.current_byte)
self.current_step = 0
return self.next_observation()
def _check_EOF(self):
self.current_byte = self.file.tell()
if self.current_byte == self.total_bytes:
return True
else:
return False
def _read_next_context(self):
#if self._check_EOF():
# """If end of file is reached, then go to random line"""
# self.file.seek(0)
# self.current_byte = random.randint(0, self.total_bytes)
# self.file.seek(self.current_byte)
# self.file.readline() # skip this line to clear the partial line
self.lline = []
while len(self.lline) < self.w_size:
"""Do not deliver a text line if it is shorter than the allowed
window size"""
if self._check_EOF():
self.file.seek(0)
self.current_byte = random.randint(0, self.total_bytes)
self.file.seek(self.current_byte)
self.file.readline() # skip this line to clear the partial line
self._read_next_context()
else:
self.lline = self.weiger.tokenize(self.file.readline())
#self.current_byte = self.file.tell()
"""
Update the current file position, pick up a random context from
the current line at sc (start context, measured in tokens), and
return it as a string."""
if len(self.lline) - self.w_size > 0:
self.sc = random.randint(0, len(self.lline) - self.w_size)
else:
self.sc = 0
ctxt = " ".join(self.lline[self.sc:self.sc + self.w_size])
#print(ctxt)
return ctxt
def next_observation(self):
#def _next_state(self):
"""This method reads |D_k| contexts to form a D_k sample in a step, and
returns a matrix whose rows are \psi information signals of each context.
args: no arguments
rets: tuple(list of string contexts S_t, numpy matrix of \psi signals)
"""
D_k = []
#S_k = []
for _ in range(self.sample_size):
context = self._read_next_context()
D_k.append((context, self.weiger.tfidf(context)))
#S_k.append(self.weiger.tfidf(context))
return D_k #, S_k
def conditioned_MI(self, X, Y, Z):
"""Compute conditioned mutual information with respect to the hypothesis
of that Y = y is the head for each triplet of the action (sample) step.
args: X: list of strings corresponding to n-grams for the hypothesis of
that X = x for each triplet of the current action step.
Y: list of strings corresponding to n-grams for the hypothesis of
that Y = y for each triplet of the current action step.
Z: list of strings corresponding to n-grams for the hypothesis of
that Z = z for each triplet of the current action step.
rets: float: The method returns the CMI.
"""
Tn = set(X).intersection(Y).intersection(Z)
Tu = set(X).union(Y).union(Z)
XnY = set(X).intersection(Y)
ZnY = set(Z).intersection(Y)
P_XYZ = len(Tn)/len(Tu)
P_XZgY = len(Tn)/len(Y)
P_XgY = len(XnY) / len(Y)
P_ZgY = len(ZnY) / len(Y)
I_XZgY = P_XYZ * np.log(P_XZgY/(P_XgY * P_ZgY))
return I_XZgY
def mutual_info(self, X, Y, Z):
"""Compute mutual information between the hypotheses of that X = x and
Z = z for each triplet of the action (sample) step. The method needs the
whole triplets (X, Y, Z) to compute X, Y probabilities within the sample.
args: X: list of strings corresponding to n-grams for the hypothesis of
that X = x for each triplet of the current action step.
Y: list of strings corresponding to n-grams for the hypothesis of
that Y = y for each triplet of the current action step.
Z: list of strings corresponding to n-grams for the hypothesis of
that Z = z for each triplet of the current action step.
rets: float: The method returns the CMI.
"""
Tu = set(X).union(Y).union(Z)
XnZ = set(X).intersection(Z)
P_XZ = len(XnZ)/len(Tu)
P_X = len(X)/len(Tu)
P_Z = len(Z)/len(Tu)
I_XZ = P_XZ * np.log(P_XZ/(P_X * P_Z))
return I_XZ
def _interpret_action(self, action):
"""Actions 'a' from a sample constitute a step 'action', where
args: action: list of dicts [a1, a2,...]\equiv [{Y: list(w1, w2,...),
X: list(w1, w2,...),
Z: list(w1, w2,...)}, ]
rets: float: semanticity, and updating of reward domain via
self.I_XZgY and self.I_XZ
"""
self.Ak = | pd.DataFrame(action) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import unittest
import pandas as pd
import pandas.io.common
import biom
import skbio
import qiime2
from pandas.util.testing import assert_frame_equal, assert_series_equal
from q2_types.feature_table import BIOMV210Format
from q2_types.feature_data import (
TaxonomyFormat, HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
DNAFASTAFormat, DNAIterator, PairedDNAIterator,
PairedDNASequencesDirectoryFormat, AlignedDNAFASTAFormat,
DifferentialFormat, AlignedDNAIterator
)
from q2_types.feature_data._transformer import (
_taxonomy_formats_to_dataframe, _dataframe_to_tsv_taxonomy_format)
from qiime2.plugin.testing import TestPluginBase
# NOTE: these tests are fairly high-level and mainly test the transformer
# interfaces for the three taxonomy file formats. More in-depth testing for
# border cases, errors, etc. are in `TestTaxonomyFormatsToDataFrame` and
# `TestDataFrameToTSVTaxonomyFormat` below, which test the lower-level helper
# functions utilized by the transformers.
class TestTaxonomyFormatTransformers(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_taxonomy_format_to_dataframe_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_dataframe_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_series_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_series_equal(obs, exp)
def test_taxonomy_format_to_series_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_series_equal(obs, exp)
def test_headerless_tsv_taxonomy_format_to_tsv_taxonomy_format(self):
exp = (
'Feature ID\tTaxon\tUnnamed Column 1\tUnnamed Column 2\n'
'seq1\tk__Foo; p__Bar\tsome\tanother\n'
'seq2\tk__Foo; p__Baz\tcolumn\tcolumn!\n'
)
_, obs = self.transform_format(
HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
filename=os.path.join('taxonomy', 'headerless.tsv'))
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_tsv_taxonomy_format_to_dataframe(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TSVTaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_tsv_taxonomy_format_to_series(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TSVTaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_series_equal(obs, exp)
def test_dataframe_to_tsv_taxonomy_format(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Foo', 'Bar']
df = pd.DataFrame([['taxon1', '42', 'foo'], ['taxon2', '43', 'bar']],
index=index, columns=columns, dtype=object)
exp = (
'Feature ID\tTaxon\tFoo\tBar\n'
'seq1\ttaxon1\t42\tfoo\n'
'seq2\ttaxon2\t43\tbar\n'
)
transformer = self.get_transformer(pd.DataFrame, TSVTaxonomyFormat)
obs = transformer(df)
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_series_to_tsv_taxonomy_format(self):
index = pd.Index(['emrakul', 'peanut'], name='Feature ID',
dtype=object)
series = pd.Series(['taxon1', 'taxon2'],
index=index, name='Taxon', dtype=object)
exp = (
'Feature ID\tTaxon\n'
'emrakul\ttaxon1\n'
'peanut\ttaxon2\n'
)
transformer = self.get_transformer(pd.Series, TSVTaxonomyFormat)
obs = transformer(series)
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_biom_table_to_tsv_taxonomy_format(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
obs = transformer(table)
self.assertIsInstance(obs, TSVTaxonomyFormat)
self.assertEqual(
obs.path.read_text(),
'Feature ID\tTaxon\nO0\ta; b\nO1\ta; b\nO2\ta; b\nO3\ta; b\n')
def test_biom_table_to_tsv_taxonomy_format_no_taxonomy_md(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
observation_metadata = [dict(taxon=['a', 'b']) for _ in range(4)]
table = biom.Table(table.matrix_data,
observation_ids=table.ids(axis='observation'),
sample_ids=table.ids(axis='sample'),
observation_metadata=observation_metadata)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
with self.assertRaisesRegex(ValueError,
'O0 does not contain `taxonomy`'):
transformer(table)
def test_biom_table_to_tsv_taxonomy_format_missing_md(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
observation_metadata = [dict(taxonomy=['a', 'b']) for _ in range(4)]
observation_metadata[2]['taxonomy'] = None # Wipe out one entry
table = biom.Table(table.matrix_data,
observation_ids=table.ids(axis='observation'),
sample_ids=table.ids(axis='sample'),
observation_metadata=observation_metadata)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
with self.assertRaisesRegex(TypeError, 'problem preparing.*O2'):
transformer(table)
def test_biom_v210_format_to_tsv_taxonomy_format(self):
filename = os.path.join(
'taxonomy', 'feature-table-with-taxonomy-metadata_v210.biom')
_, obs = self.transform_format(BIOMV210Format, TSVTaxonomyFormat,
filename=filename)
self.assertIsInstance(obs, TSVTaxonomyFormat)
self.assertEqual(
obs.path.read_text(),
'Feature ID\tTaxon\nO0\ta; b\nO1\ta; b\nO2\ta; b\nO3\ta; b\n')
def test_biom_v210_format_no_md_to_tsv_taxonomy_format(self):
with self.assertRaisesRegex(TypeError, 'observation metadata'):
self.transform_format(
BIOMV210Format, TSVTaxonomyFormat,
filename=os.path.join('taxonomy', 'feature-table_v210.biom'))
def test_taxonomy_format_with_header_to_metadata(self):
_, obs = self.transform_format(TaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'3-column.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_taxonomy_format_without_header_to_metadata(self):
_, obs = self.transform_format(TaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'headerless.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp_df = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_format_to_metadata(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'3-column.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_to_metadata_trailing_whitespace_taxon(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join(
'taxonomy',
'trailing_space_taxon.tsv'))
index = pd.Index(['seq1'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_to_metadata_leading_whitespace_taxon(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join(
'taxonomy',
'leading_space_taxon.tsv'))
index = pd.Index(['seq1'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_to_metadata_trailing_leading_whitespace_taxon(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join(
'taxonomy',
'start_end_space_taxon.tsv'))
index = pd.Index(['seq1'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
# In-depth testing of the `_taxonomy_formats_to_dataframe` helper function,
# which does the heavy lifting for the transformers.
class TestTaxonomyFormatsToDataFrame(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_one_column(self):
with self.assertRaisesRegex(ValueError, "two columns, found 1"):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '1-column.tsv')))
def test_blanks(self):
with self.assertRaises(pandas.io.common.EmptyDataError):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'blanks')))
def test_empty(self):
with self.assertRaises(pandas.io.common.EmptyDataError):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', 'empty')))
def test_header_only(self):
with self.assertRaisesRegex(ValueError, 'one row of data'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'header-only.tsv')))
def test_has_header_with_headerless(self):
with self.assertRaisesRegex(ValueError, 'requires a header'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', 'headerless.tsv')),
has_header=True)
def test_jagged(self):
with self.assertRaises(pandas.io.common.ParserError):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', 'jagged.tsv')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, 'duplicated: SEQUENCE1'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join(
'taxonomy', 'duplicate-ids.tsv')))
def test_duplicate_columns(self):
with self.assertRaisesRegex(ValueError, 'duplicated: Column1'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join(
'taxonomy', 'duplicate-columns.tsv')))
def test_2_columns(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Bacteria; p__Proteobacteria'],
['k__Bacteria']], index=index, columns=['Taxon'],
dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '2-column.tsv')))
assert_frame_equal(obs, exp)
# has_header=True
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '2-column.tsv')),
has_header=True)
assert_frame_equal(obs, exp)
def test_3_columns(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '3-column.tsv')))
assert_frame_equal(obs, exp)
# has_header=True
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '3-column.tsv')),
has_header=True)
assert_frame_equal(obs, exp)
def test_valid_but_messy_file(self):
index = pd.Index(
['SEQUENCE1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Bar; p__Baz', 'foo'],
['some; taxonomy; for; ya', 'bar baz']],
index=index, columns=['Taxon', 'Extra Column'],
dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'valid-but-messy.tsv')))
assert_frame_equal(obs, exp)
# has_header=True
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'valid-but-messy.tsv')),
has_header=True)
assert_frame_equal(obs, exp)
def test_headerless(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'headerless.tsv')))
assert_frame_equal(obs, exp)
# has_header=False
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'headerless.tsv')),
has_header=False)
assert_frame_equal(obs, exp)
# In-depth testing of the `_dataframe_to_tsv_taxonomy_format` helper function,
# which does the heavy lifting for the transformers.
class TestDataFrameToTSVTaxonomyFormat(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_no_rows(self):
index = | pd.Index([], name='Feature ID', dtype=object) | pandas.Index |
import torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
def mean_of_attention_heads(matrix, out_dim):
chunks = torch.split(matrix, out_dim, dim=1)
return torch.mean(torch.stack(chunks), dim=0)
def latent_dim_participation_in_clusters(latent_data, labels):
latent_diff = np.zeros(shape=(latent_data.shape[1], len(set(labels)) + 1))
for l_dim in range(latent_data.shape[1]):
cells_in_dim = latent_data[:, l_dim]
l_dim_mean = np.mean(cells_in_dim)
l_dim_std = np.std(cells_in_dim)
variable_cells_larger = np.where(cells_in_dim > l_dim_mean + l_dim_std)
variable_cells_smaller = np.where(cells_in_dim < l_dim_mean - l_dim_std)
labels_larger = labels[variable_cells_larger]
labels_smaller = labels[variable_cells_smaller]
variable_labels = np.concatenate((labels_larger, labels_smaller), axis=None)
cluster_count = {x: list(variable_labels).count(x) for x in labels}
counter_per_cluster = np.array(list(cluster_count.values())) / len(variable_labels)
counter_per_cluster = np.around(counter_per_cluster * 100.0, decimals=2)
latent_diff[l_dim][1:] = counter_per_cluster
latent_diff[l_dim][0] = int(l_dim)
cluster_label = [str(i) for i in np.unique(labels)]
latent_diff = | pd.DataFrame(latent_diff, columns=['Latent dimension'] + cluster_label) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.utils.extmath import cartesian
from itertools import product
from sklearn import preprocessing
import gc
class contest(object):
__preferredColumnOrder = ['item_id','shop_id','date_block_num','quarter','half','year','item_category_id','new_item','new_shop_item',
'mode_item_price_month','min_item_price_month','max_item_price_month','mean_item_price_month',
'mean_item_category_price_month','min_item_category_price_month','max_item_category_price_month', 'mode_item_category_price_month']
def __init__(self, trainDataFile, testDataFile, itemDataFile, categoryDataFile):
#validate that files were passed in and exist at location provided by caller
if (not trainDataFile) | (not testDataFile) | (not itemDataFile) | (not categoryDataFile):
raise RuntimeError('file locations must be provided for train, test, items, and category data.')
for i,x in [[trainDataFile,'Train'], [testDataFile,'Test'], [itemDataFile, 'Item'], [categoryDataFile, 'Category']]:
i = str(i).replace('\\','/').strip()
if not Path(i).is_file():
raise RuntimeError('%s data file speicified [{%s}] does not exist.' % (x, i))
if x == 'Train':
self.__orig_trainDataFile = i
elif x == 'Test':
self.__orig_testDataFile = i
elif x == 'Item':
self.__orig_itemDataFile = i
else:
self.__orig_categoryDataFile = i
self.__out_trainDataFile = self.__outputFile(self.__orig_trainDataFile, 'pp_data_')
self.__out_trainLabelsFile = self.__outputFile(self.__orig_trainDataFile, 'pp_labels_')
self.__out_testDataFile = self.__outputFile(self.__orig_testDataFile, 'pp_')
self.__out_validateTrainDataFile = self.__outputFile(self.__orig_trainDataFile, 'val_train_data_')
self.__out_validateTrainLabelsFile = self.__outputFile(self.__orig_trainDataFile, 'val_train_labels_')
self.__out_validateTestDataFile = self.__outputFile(self.__orig_trainDataFile, 'val_test_data_')
self.__out_validateTestLabelsFile = self.__outputFile(self.__orig_trainDataFile, 'val_test_labels_')
def __outputFile(self, inFile, prefix):
x = inFile.split('/')
x[len(x) - 1] = prefix + x[len(x) - 1]
x = "/".join(x)
return x
def __downcast(self, df):
#reduce all float and int 64 values down to 32-bit to save memory
floats = [c for c in df if df[c].dtype == 'float64']
ints = [c for c in df if df[c].dtype == 'int64']
df[floats] = df[floats].astype(np.float32)
df[ints] = df[ints].astype(np.int32)
return df
def __openFilePrepared(self, fileName):
#open all files with no pre-specified index; downcast numeric data from 64 to 32-bit
df = | pd.read_csv(fileName, index_col=False) | pandas.read_csv |
'''
Support functions for 2nd-level feature engineering
'''
import pandas as pd
import pycocotools.mask as mask_util
from tqdm import tqdm
from sklearn.neighbors import KDTree
import numpy as np
import cv2
def calculate_max_IOU_with_gt(targ, pred):
'''Calculate IOU between predicted instances and target instances'''
pred_masks = pred['instances'].pred_masks >= 0.5
enc_preds = [mask_util.encode(np.asarray(p, order='F')) for p in pred_masks]
enc_targs = list(map(lambda x:x['segmentation'], targ))
ious = mask_util.iou(enc_preds, enc_targs, [0]*len(enc_targs))
return ious.max(axis=1)
def print_log(log):
for k in log.keys():
print(k, log[k])
def get_overlapping_features(pred):
'''Compute features representing overlapping characteristics of each instance'''
pred_masks = pred['instances'].pred_masks >= 0.5
enc_preds = [mask_util.encode(np.asarray(p, order='F')) for p in pred_masks]
ious = mask_util.iou(enc_preds, enc_preds, [0]*len(enc_preds))
return ious.max(axis=1), ious.min(axis=1), ious.mean(axis=1),\
ious.std(axis=1), (ious > 0).sum(axis=1)
def get_contour_features(pred):
'''Get some morphology features'''
masks = (pred['instances'].pred_masks.numpy() >= 0.5).astype('uint8')
data_dict = {
'centroid_x':[],
'centroid_y':[],
'num_contours': [],
'equi_diameter':[],
'hull_area':[],
'solidity':[],
'is_convex':[],
'perimeter':[],
'rotation_ang':[],
'major_axis_length':[],
'minor_axis_length':[]
}
for mask in masks:
contours, _ = cv2.findContours(mask, 1, 2)
areas = [cv2.contourArea(cnt) for cnt in contours]
max_ind = np.argmax(areas)
area = areas[max_ind]
cnt = contours[max_ind]
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area if hull_area > 0 else -1
equi_diameter = np.sqrt(4*area/np.pi)
is_convex = int(cv2.isContourConvex(cnt))
perimeter = cv2.arcLength(cnt,True)
try:
ellipse = cv2.fitEllipse(cnt)
_,(major_axis_length, minor_axis_length), rotation_ang = ellipse
except:
(major_axis_length, minor_axis_length), rotation_ang = (-1,-1),-1
data_dict['centroid_x'].append(cx)
data_dict['centroid_y'].append(cy)
data_dict['num_contours'].append(len(contours))
data_dict['equi_diameter'].append(equi_diameter)
data_dict['solidity'].append(solidity)
data_dict['hull_area'].append(hull_area)
data_dict['is_convex'].append(is_convex)
data_dict['perimeter'].append(perimeter)
data_dict['rotation_ang'].append(rotation_ang)
data_dict['major_axis_length'].append(major_axis_length)
data_dict['minor_axis_length'].append(minor_axis_length)
return pd.DataFrame(data_dict)
def get_pixel_scores_features(outputs):
'''Get features related to mask scores at pixel level'''
pred_masks = outputs['instances'].pred_masks
pred_masks_non_zeros = [mask[mask > 0] for mask in pred_masks]
min_pscores = [mask.min().item() for mask in pred_masks_non_zeros]
max_pscores = [mask.max().item() for mask in pred_masks_non_zeros]
median_pscores = [mask.median().item() for mask in pred_masks_non_zeros]
mean_pscores = [mask.mean().item() for mask in pred_masks_non_zeros]
q1_pscores = [mask.quantile(0.25).item() for mask in pred_masks_non_zeros]
q3_pscores = [mask.quantile(0.75).item() for mask in pred_masks_non_zeros]
std_pscores = [mask.std().item() for mask in pred_masks_non_zeros]
ret = {
'min_pixel_score':min_pscores,
'max_pixel_score':max_pscores,
'median_pixel_score':median_pscores,
'mean_pixel_score':mean_pscores,
'q1_pixel_score':q1_pscores,
'q3_pixel_score':q3_pscores,
'std_pixel_score':std_pscores
}
return pd.DataFrame(ret)
def get_image_pixel_features(im, outputs):
'''Get features related to pixels on the original images'''
pred_masks = outputs['instances'].pred_masks
pred_masks_binary = [mask > 0.5 for mask in pred_masks]
im_masks = [im[mask,0] for mask in pred_masks_binary]
min_pscores = [mask.min().item() for mask in im_masks]
max_pscores = [mask.max().item() for mask in im_masks]
median_pscores = [np.median(mask).item() for mask in im_masks]
mean_pscores = [mask.mean().item() for mask in im_masks]
q1_pscores = [np.quantile(mask, 0.25).item() for mask in im_masks]
q3_pscores = [np.quantile(mask, 0.75) for mask in im_masks]
std_pscores = [mask.std() for mask in im_masks]
ret = {
'im_min_pixel':min_pscores,
'im_max_pixel':max_pscores,
'im_median_pixel':median_pscores,
'im_mean_pixel':mean_pscores,
'im_q1_pixel':q1_pscores,
'im_q3_pixel':q3_pscores,
'im_std_pixel':std_pscores
}
return pd.DataFrame(ret)
def get_kdtree_nb_features(single_features):
'''Get features related to neighboring relation ship determine by distance'''
cols = ['centroid_x', 'centroid_y']
X = single_features[cols]
tree = KDTree(X)
ret = dict()
for r in [25, 50, 75, 100, 150, 200]:
ind, dist = tree.query_radius(X, r=r, return_distance=True, sort_results=True)
ind = [i[1:] for i in ind] # exclude neareast neighbor (itself)
dist = [d[1:] for d in dist] # exclude neareast neighbor (itself)
ret[f'kdtree_nb_r{r}_count'] = [len(ind) for i in ind]
ret[f'kdtree_nb_r{r}_median_dist'] = [np.median(d) if len(d)>0 else -1 for d in dist]
ret[f'kdtree_nb_r{r}_mean_dist'] = [d.mean() if len(d)>0 else -1 for d in dist]
ret[f'kdtree_nb_r{r}_std_dist'] = [np.std(d) if len(d)>0 else -1 for d in dist]
ret[f'kdtree_nb_r{r}_median_area'] = [single_features.loc[i, 'mask_area'].median() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_r{r}_mean_area'] = [single_features.loc[i, 'mask_area'].mean() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_r{r}_std_area'] = [single_features.loc[i, 'mask_area'].std() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_r{r}_median_box_score'] = [single_features.loc[i, 'box_score'].median() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_r{r}_mean_box_score'] = [single_features.loc[i, 'box_score'].mean() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_r{r}_std_box_score'] = [single_features.loc[i, 'box_score'].std() if len(i)>0 else -1 for i in ind]
for k in [2,3,5,7]:
dist, ind = tree.query(X, k=k, return_distance=True)
ind = [i[1:] for i in ind] # exclude neareast neighbor (itself)
dist = [d[1:] for d in dist] # exclude neareast neighbor (itself)
ret[f'kdtree_nb_top{k}_median_dist'] = [np.median(d) if len(d)>0 else -1 for d in dist]
ret[f'kdtree_nb_top{k}_mean_dist'] = [d.mean() if len(d)>0 else -1 for d in dist]
ret[f'kdtree_nb_top{k}_std_dist'] = [np.std(d) if len(d)>0 else -1 for d in dist]
ret[f'kdtree_nb_top{k}_median_area'] = [single_features.loc[i, 'mask_area'].median() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_top{k}_mean_area'] = [single_features.loc[i, 'mask_area'].mean() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_top{k}_std_area'] = [single_features.loc[i, 'mask_area'].std() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_top{k}_median_box_score'] = [single_features.loc[i, 'box_score'].median() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_top{k}_mean_box_score'] = [single_features.loc[i, 'box_score'].mean() if len(i)>0 else -1 for i in ind]
ret[f'kdtree_nb_top{k}_std_box_score'] = [single_features.loc[i, 'box_score'].std() if len(i)>0 else -1 for i in ind]
return pd.DataFrame(ret)
def get_features(im, outputs):
'''Master function for generating features'''
pred_masks = outputs['instances'].pred_masks
mask_areas = (pred_masks >= 0.5).sum(axis=(1,2))
pred_boxes = outputs['instances'].pred_boxes.tensor
widths = pred_boxes[:,2] - pred_boxes[:,0]
heights = pred_boxes[:,3] - pred_boxes[:,1]
box_areas = widths * heights
box_scores = outputs['instances'].scores
instance_count = len(outputs['instances'])
aspect_ratios = widths / heights
extents = mask_areas / box_areas
neighbor_iou_max, neighbor_iou_min, neighbor_iou_mean, \
neighbor_iou_std, neighbor_overlap_count = get_overlapping_features(outputs)
contour_features = get_contour_features(outputs)
pixel_features = get_pixel_scores_features(outputs)
im_pixel_features = get_image_pixel_features(im, outputs)
ret = pd.DataFrame({
'box_score':box_scores,
'mask_area':mask_areas,
'box_area':box_areas,
'box_x1':pred_boxes[:,0],
'box_y1':pred_boxes[:,1],
'box_x2':pred_boxes[:,2],
'box_y2':pred_boxes[:,3],
'width':widths,
'height':heights,
'instance_count':instance_count,
'neighbor_iou_max':neighbor_iou_max,
'neighbor_iou_min':neighbor_iou_min,
'neighbor_iou_mean':neighbor_iou_mean,
'neighbor_iou_std':neighbor_iou_std,
'neighbor_overlap_count':neighbor_overlap_count,
'aspect_ratio':aspect_ratios,
'extent':extents
})
ret = pd.concat([ret, contour_features, pixel_features, im_pixel_features], axis=1)
kdtree_nb_features = get_kdtree_nb_features(ret)
ret = | pd.concat([ret, kdtree_nb_features], axis=1) | pandas.concat |
"""Tests for ExtensionDtype Table Schema integration."""
from collections import OrderedDict
import datetime as dt
import decimal
import json
import pytest
from pandas import (
DataFrame,
array,
)
from pandas.core.arrays.integer import Int64Dtype
from pandas.core.arrays.string_ import StringDtype
from pandas.core.series import Series
from pandas.tests.extension.date import (
DateArray,
DateDtype,
)
from pandas.tests.extension.decimal.array import (
DecimalArray,
DecimalDtype,
)
from pandas.io.json._table_schema import (
as_json_table_type,
build_table_schema,
)
class TestBuildSchema:
def test_build_table_schema(self):
df = DataFrame(
{
"A": DateArray([dt.date(2021, 10, 10)]),
"B": DecimalArray([decimal.Decimal(10)]),
"C": array(["pandas"], dtype="string"),
"D": array([10], dtype="Int64"),
}
)
result = build_table_schema(df, version=False)
expected = {
"fields": [
{"name": "index", "type": "integer"},
{"name": "A", "type": "any", "extDtype": "DateDtype"},
{"name": "B", "type": "any", "extDtype": "decimal"},
{"name": "C", "type": "any", "extDtype": "string"},
{"name": "D", "type": "integer", "extDtype": "Int64"},
],
"primaryKey": ["index"],
}
assert result == expected
result = build_table_schema(df)
assert "pandas_version" in result
class TestTableSchemaType:
@pytest.mark.parametrize(
"date_data",
[
DateArray([dt.date(2021, 10, 10)]),
DateArray(dt.date(2021, 10, 10)),
Series(DateArray(dt.date(2021, 10, 10))),
],
)
def test_as_json_table_type_ext_date_array_dtype(self, date_data):
assert as_json_table_type(date_data.dtype) == "any"
def test_as_json_table_type_ext_date_dtype(self):
assert as_json_table_type(DateDtype()) == "any"
@pytest.mark.parametrize(
"decimal_data",
[
DecimalArray([decimal.Decimal(10)]),
Series(DecimalArray([decimal.Decimal(10)])),
],
)
def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data):
assert as_json_table_type(decimal_data.dtype) == "any"
def test_as_json_table_type_ext_decimal_dtype(self):
assert as_json_table_type(DecimalDtype()) == "any"
@pytest.mark.parametrize(
"string_data",
[
array(["pandas"], dtype="string"),
Series(array(["pandas"], dtype="string")),
],
)
def test_as_json_table_type_ext_string_array_dtype(self, string_data):
assert as_json_table_type(string_data.dtype) == "any"
def test_as_json_table_type_ext_string_dtype(self):
assert as_json_table_type(StringDtype()) == "any"
@pytest.mark.parametrize(
"integer_data",
[
array([10], dtype="Int64"),
Series(array([10], dtype="Int64")),
],
)
def test_as_json_table_type_ext_integer_array_dtype(self, integer_data):
assert as_json_table_type(integer_data.dtype) == "integer"
def test_as_json_table_type_ext_integer_dtype(self):
assert as_json_table_type(Int64Dtype()) == "integer"
class TestTableOrient:
def setup_method(self):
self.da = DateArray([dt.date(2021, 10, 10)])
self.dc = DecimalArray([decimal.Decimal(10)])
self.sa = array(["pandas"], dtype="string")
self.ia = array([10], dtype="Int64")
self.df = DataFrame(
{
"A": self.da,
"B": self.dc,
"C": self.sa,
"D": self.ia,
}
)
def test_build_date_series(self):
s = Series(self.da, name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "id", "type": "integer"},
{"name": "a", "type": "any", "extDtype": "DateDtype"},
]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]),
]
)
assert result == expected
def test_build_decimal_series(self):
s = Series(self.dc, name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [
{"name": "id", "type": "integer"},
{"name": "a", "type": "any", "extDtype": "decimal"},
]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
("data", [OrderedDict([("id", 0), ("a", 10.0)])]),
]
)
assert result == expected
def test_build_string_series(self):
s = | Series(self.sa, name="a") | pandas.core.series.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 11:56:51 2019
@author: btt1
Signal Tampering Problem
"""
import os
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import pulp as plp
import time
def extract_decision_variables(network_data):
intersections = []; intersection_variables = []
for i in range(len(data)):
if network_data.iloc[i]['type'] == 'intflow':
if network_data.iloc[i]['start'].isalpha():
intersections.append(network_data.iloc[i]['start'])
intersection_variables.append(network_data.iloc[i]['name'])
elif network_data.iloc[i]['end'].isalpha():
intersections.append(network_data.iloc[i]['end'])
intersection_variables.append(network_data.iloc[i]['name'])
intersections = np.unique(np.array(intersections))
intersection_variables = np.unique(np.array(intersection_variables))
ends = []; end_flow_variables = []
for i in range(len(network_data)):
if network_data.iloc[i]['type'] == 'end':
end = network_data.iloc[i]['start'].split('_')[0]
ends.append(end)
end_flow_variables.append('y'+end+'S')
ends = np.unique(np.array(ends))
end_flow_variables = np.unique(np.array(end_flow_variables))
start_nodes = []; end_nodes = []
for i in range(len(network_data)):
if network_data.iloc[i]['type'] == 'start':
node = network_data.iloc[i]['start'].split('_')[0]
start_nodes.append(node)
elif network_data.iloc[i]['type'] == 'end':
node = network_data.iloc[i]['start'].split('_')[0]
end_nodes.append(node)
start_nodes = np.unique(np.array(start_nodes))
end_nodes = np.unique(np.array(end_nodes))
return intersections, intersection_variables, ends, end_flow_variables, start_nodes, end_nodes
def create_graph_singletimestep(t, data):
g = nx.DiGraph(timestep=t)
for i in range(len(data)):
row = data.iloc[i]
start = str(row['start'])+'_{}'.format(t); end = str(row['end'])+'_{}'.format(t)
name = row['name'] + '_' + str(t)
g.add_edge(start, end, edge_type=row['type'], edge_name=name)
return g
def create_edge_betweengraphs(ts, G):
nodes = np.array(list(G.nodes()))
for t in range(ts-1):
condition = [i.split('_')[-1]==str(t) and i.split('_')[-2]=='d' for i in nodes]
start_nodes = np.extract(condition, nodes)
for each in start_nodes:
start = each
end = each.split('_')[0]+'_s_'+str(t+1)
name = 'x' + start.split('_')[0] + '_' + str(t+1)
G.add_edge(start, end, edge_type='occ', edge_name=name)
return G
def create_supergraph(ts, data, start_nodes, end_nodes):
print("\n----- Super graph -----\n")
directed_graphs = [];
print("\tCreating individual timestep graphs...")
for t in range(ts):
g = create_graph_singletimestep(t, data)
directed_graphs.append(g)
G = nx.DiGraph(name='SuperGraph')
print("\tCreating Supergraph...")
for g in directed_graphs:
G = nx.union(G, g)
G = create_edge_betweengraphs(ts, G);
G.add_node('S')
for i_start, start_cell in enumerate(start_nodes):
source_node = 'R'+str(int(i_start)+1)
start_node = start_cell + '_d'
G.add_node(source_node)
for t in range(ts):
start = start_node + '_' + str(t)
name_1 = 'y' + source_node + str(int(i_start)) + start_node.split('_')[0] + '_' + str(t)
G.add_edge(source_node, start, edge_type='flow', edge_name=name_1)
for end_cell in end_nodes:
end_node = end_cell + '_s'
for t in range(ts):
end = end_node + '_' + str(t)
name_2 = 'y' + end_node.split('_')[0] + 'S' + '_' + str(t)
G.add_edge(end, 'S', edge_type='flow', edge_name=name_2)
print("\tSupergraph created!")
print("\t", nx.info(G), "\n")
return G, directed_graphs
def create_opt_formulation_constants(G, cost, demand_source, demand_sink, slack_bound, occupancy_bound, flow_bound, edge_list, node_list):
A = np.array(nx.incidence_matrix(G, oriented=True).todense())
cost_vector = np.array([float(i.split("_")[-1]) + cost if i[0]=='x' else 0.0 for i in edge_list[:,2]])
demand_vector = np.array([-demand_source if 'R' in i else demand_sink if i=='S' else 0.0 for i in node_list])
bound_vector = np.array([occupancy_bound if i[0]=='x' else slack_bound if i[0]=='s' else flow_bound for i in edge_list[:,2]])
return A, cost_vector, demand_vector, bound_vector
def solve_optimal_assignment(A, d, u, c, edge_list):
s = time.time()
prob = None
print("\n----- Optimal Assignment Problem -----\n")
print("\tCreating new problem instance...")
prob = plp.LpProblem("Opt_assignment_problem", plp.LpMinimize)
print("\tAdding super-graph variables...")
flows = {i:plp.LpVariable(cat=plp.LpContinuous, lowBound=0, upBound=u[i], name=str(edge_list[:,2][i])) for i in range(A.shape[1])}
print("\tAdding constraints..."); percent_complete = 0
for j in range(A.shape[0]):
# prob += plp.LpAffineExpression([(flows[i],A[j,i]) for i in range(A.shape[1])]) == d[j]
prob += plp.lpSum(A[j,i]*flows[i] for i in range(A.shape[1])) == d[j]
if (j/A.shape[0])*100 > percent_complete:
print("\t\t{} % of constraints added".format(percent_complete))
percent_complete += 10
e1 = time.time()
print("\tConstraints added. Total time took: ", int((e1-s)/60), "mins")
objective = plp.lpSum([c[i]*flows[i] for i in range(A.shape[1])])
prob.setObjective(objective)
prob.writeLP("Opt_assignment_problem.lp")
print("\tSolving the optimal assignment problem...")
prob.solve(solver=plp.GUROBI_CMD())
print("\tSolution status: ", plp.LpStatus[prob.status])
print("\tObjective function value: ", plp.value(prob.objective))
solution = pd.DataFrame(columns=['Variable','OptimalValue'])
for i,v in enumerate(prob.variables()):
solution.loc[i] = [v.name, v.varValue]
solution.to_csv("./Optimal_solution.csv"); print("\tSolutions saved.\n"); e2 = time.time()
print("\tTotal time took for solving the optimal assignment: ", int((e2-s)/60), "mins")
return prob, flows, solution
def extract_intersection_flows(F, intersection_variables, sort=True):
i = 0
intersection_flows = pd.DataFrame(columns=["Intersection","Var_id","Variables","Timesteps","OptimalValue"])
for var_id, var in F.items():
if var.name.split("_")[0] in intersection_variables:
intersection_flows.loc[i] = [var.name.split("_")[0], float(var_id), var.name, int(var.name.split("_")[-1]), var.varValue]
i += 1
if sort:
intersection_flows.sort_values("Var_id", ascending=True, inplace=True)
return intersection_flows
def extract_end_flows(F, end_flow_variables):
i = 0
end_flows = | pd.DataFrame(columns=["Ends","Endpoint","Var_id","Variables","Timesteps","OptimalValue"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""linearregression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SFWk7Ap06ZkvP2HmLhXLiyyqo-ei35M1
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from datetime import datetime
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
# Download the daset with keras.utils.get_file
dataset_path = keras.utils.get_file("housing.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data")
column_names = ['CRIM','ZN','INDUS','CHAS','NOX',
'RM', 'AGE', 'DIS','RAD','TAX','PTRATION', 'B', 'LSTAT', 'MEDV']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
# Create a dataset instant
dataset = raw_dataset.copy()
# This function returns last n rows from the object
# based on position.
dataset.tail(n=10)
# Split data into train/test
# p = training data portion
p=0.8
trainDataset = dataset.sample(frac=p,random_state=0)
testDataset = dataset.drop(trainDataset.index)
# Visual representation of training data
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# With .pop() command, the associated columns are extracted.
x = trainDataset['RM']
y = trainDataset['MEDV']
ax.scatter(x, y, edgecolors=(0, 0, 0))
ax.set_xlabel('RM')
ax.set_ylabel('MEDV')
plt.show()
# Pop command return item and drop it from frame.
# After using trainDataset.pop('RM'), the 'RM' column
# does not exist in the trainDataset frame anymore!
trainInput = trainDataset['RM']
trainTarget = trainDataset['MEDV']
testInput = testDataset['RM']
testTarget = testDataset['MEDV']
# We don't specify anything for activation -> no activation is applied (ie. "linear" activation: a(x) = x)
# Check: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
def linear_model():
model = keras.Sequential([
layers.Dense(1, use_bias=True, input_shape=(1,), name='layer')
])
# Using adam optimizer
optimizer = tf.keras.optimizers.Adam(
learning_rate=0.01, beta_1=0.9, beta_2=0.99, epsilon=1e-05, amsgrad=False,
name='Adam')
# Check: https://www.tensorflow.org/api_docs/python/tf/keras/Model
# loss: String (name of objective function), objective function or tf.keras.losses.Loss instance. See tf.keras.losses.
# optimizer: String (name of optimizer) or optimizer instance. See tf.keras.optimizers.
# metrics: List of metrics to be evaluated by the model during training and testing
model.compile(loss='mse', optimizer=optimizer, metrics=['mae','mse'])
return model
# Create model instant
model = linear_model()
# Print the model summary
model.summary()
# params
n_epochs = 4000
batch_size = 256
n_idle_epochs = 100
n_epochs_log = 200
n_samples_save = n_epochs_log * trainInput.shape[0]
print('Checkpoint is saved for each {} samples'.format(n_samples_save))
# A mechanism that stops training if the validation loss is not improving for more than n_idle_epochs.
#See https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping for details.
earlyStopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=n_idle_epochs, min_delta=0.001)
# Creating a custom callback to print the log after a certain number of epochs
# Check: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks
predictions_list = []
class NEPOCHLogger(tf.keras.callbacks.Callback):
def __init__(self,per_epoch=100):
'''
display: Number of batches to wait before outputting loss
'''
self.seen = 0
self.per_epoch = per_epoch
def on_epoch_end(self, epoch, logs=None):
if epoch % self.per_epoch == 0:
print('Epoch {}, loss {:.2f}, val_loss {:.2f}, mae {:.2f}, val_mae {:.2f}, mse {:.2f}, val_mse {:.2f}'\
.format(epoch, logs['loss'], logs['val_loss'],logs['mae'], logs['val_mae'],logs['mse'], logs['val_mse']))
# Call the object
log_display = NEPOCHLogger(per_epoch=n_epochs_log)
# Include the epoch in the file name (uses `str.format`)
import os
checkpoint_path = "training/cp-{epoch:05d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights every 5 epochs
checkpointCallback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
save_freq=n_samples_save)
# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))
# Define the Keras TensorBoard callback.
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
history = model.fit(
trainInput, trainTarget, batch_size=batch_size,
epochs=n_epochs, validation_split = 0.1, verbose=0, callbacks=[earlyStopping,log_display,tensorboard_callback,checkpointCallback])
# The fit model returns the history object for each Keras model
# Let's explore what is inside history
print('keys:', history.history.keys())
# Returning the desired values for plotting and turn to numpy array
mae = np.asarray(history.history['mae'])
val_mae = np.asarray(history.history['val_mae'])
# Creating the data frame
num_values = (len(mae))
values = np.zeros((num_values,2), dtype=float)
values[:,0] = mae
values[:,1] = val_mae
# Using pandas to frame the data
steps = | pd.RangeIndex(start=0,stop=num_values) | pandas.RangeIndex |
# General imports
import numpy as np
import pandas as pd
# Keras imports
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import TensorBoard
# Sklearn imports
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import recall_score, accuracy_score, precision_score, confusion_matrix
class AutoEncoder(input_dim):
def __init__():
# Initialize self._autoencoder
self._autoencoder = Sequential()
self._autoencoder.add(Dense(int(0.75 * input_dim), activation="relu", input_shape=(input_dim,)))
self._autoencoder.add(Dense(int(0.5 * input_dim), activation="relu"))
self._autoencoder.add(Dense(int(0.33 * input_dim), activation="relu"))
self._autoencoder.add(Dense(int(0.25 * input_dim), activation="relu"))
self._autoencoder.add(Dense(int(0.33 * input_dim), activation="relu"))
self._autoencoder.add(Dense(int(0.5 * input_dim), activation="relu"))
self._autoencoder.add(Dense(int(0.75 * input_dim), activation="relu"))
self._autoencoder.add(Dense(input_dim))
# Initialize tensorboard
self._tensorboard = TensorBoard(
log_dir="logs",
histogram_freq=0,
write_graph=True,
write_images=True)
def preprocess(self, df):
# Create malicious set
malicious = df[df["anomaly"]==1]
# Create & segment begnin set
benign = df[df["anomaly"]==0]
benign_train, benign_validate, benign_test_unscald = np.split(benign.sample(frac=1, random_state=42), [int(1/3 * len(benign)), int(2/3 * len(benign))])
benign_train_scaled = scaler.fit_transform(benign_train.iloc[:, :-1].values)
benign_validate_scaled = scaler.fit_transform(benign_validate.iloc[:, :-1].values)
return benign_train_scaled, benign_validate_scaled, begnin_test_unscaled, malicious
def train(self, train_scaled):
self._autoencoder.compile(loss="mean_squared_error", optimizer="sgd")
self._autoencoder.fit(train_scaled,
train_scaled,
epochs=60,
batch_size=100,
verbose=1,
callbacks=[self._tensorboard]
)
def test(self, begnin_validation, begnin_test, malicious_test):
# Create MSE
valid_pred = self._autoencoder.predict(begnin_validation)
mse = np.mean(np.power(begnin_validation - valid_pred, 2), axis =1)
# Define begnin threshold
tr = mse.mean() + mse.std()
# Test model
test_set = pd.concat([begnin_test, malicious_test])
test_scaled = scaler.transform(test_set.iloc[:,:-1].values)
test_pred = self._autoencoder.predict(test_scaled)
# Predict test set
mse = np.mean(np.power(test_scaled - test_pred, 2), axis=1)
predictions = (mse > tr).astype(int)
print(f"Accuracy: {round(accuracy_score(test_set.iloc[:,-1], predictions), 4)*100}%")
print(f"Recall: {round(recall_score(test_set.iloc[:,-1], predictions), 4)*100}%")
print(f"Precision: {round(precision_score(test_set.iloc[:,-1], predictions), 4)*100}%")
if __name__=="__main__":
# Auto-encoder
model = AutoEncoder()
# Load dataset
df = pd.concat([x for x in | pd.read_csv("dataset.csv", low_memory=False, chunksize=100000) | pandas.read_csv |
"""
=================================================================================================
<NAME>
9 July 2021
=================================================================================================
Python >= 3.8.5
homebrew_stats.py
This module is meant to help with general statistical functions. Currently, there is only
a small number of statistics options supported, but I suspect this will grow in the future.
Currently supported:
FDR Estimation (Storey)
T-tests
=================================================================================================
Dependencies:
PACKAGE VERSION
Pandas -> 1.2.3
Numpy -> 1.20.1
SciPy -> 1.7.2
=================================================================================================
"""
import os
print(f"Loading the module: helpers.{os.path.basename(__file__)}\n")
#####################################################################################################################
#
# Importables
import fnmatch # Unix-like string searching
import pandas as pd # General use for data
import numpy as np # General use for data
from math import sqrt
import copy
import scipy
from scipy.stats import f, t
from scipy.stats import studentized_range as q
from scipy.interpolate import splrep, splev # Used for Storey Q-value estimation, fitting cubic spline
from scipy.interpolate import UnivariateSpline
from . import general_helpers as gh
print(f"numpy {np.__version__}")
print(f"scipy {scipy.__version__}")
print(f"pandas {pd.__version__}\n")
#
#
#####################################################################################################################
#
# Miscellaneous Functions
def filter_nans(data,
threshold = 3,
threshold_type = "data"):
"""
=================================================================================================
filter_nans(data, threshold, threshold_type)
This function is meant to filter out the nan values from a list, based on the input arguments.
=================================================================================================
Arguments:
data -> A list (or iterable) of data points. The points are assumed to be numbers.
threshold -> An integer describing the minimum value requirement.
threshold_type -> A string describing how the threshold integer will be applied.
"on_data" "on_nan"
=================================================================================================
Returns: The filtered list, or an empty list if the threshold requirements were not met.
=================================================================================================
"""
# Make sure the user gave a valid thresholding option
assert threshold_type.lower() in ["data",
"on_data",
"on data",
"nan",
"on_nan",
"on nan"], "Threshold is either relative to NaN or data."
assert type(data) == list, "The data should be in a list"
# Filter NaNs, as they do not equal themselves
filtered = [val for val in data if val == val]
# Keep data if there are at least <threshold> data points
if threshold_type.lower() in ["data", "on_data", "on data"]:
if len(filtered) >= threshold:
return filtered
else:
return []
# Keep data if there are no more than <threshold> nans
elif threshold_type.lower() in ["nan", "on_nan", "on nan"]:
if len(data) - len(filtered) <= threshold:
return filtered
else:
return []
def filter_nan_dict(data,
threshold = 3,
threshold_type = "data"):
"""
=================================================================================================
filter_nan_dict(data, threshold, thershold_type)
This function is meant to filter out nan values from the list-values in a dictionary. This
function uses filter_nans() to filter lists.
=================================================================================================
Arguments:
data -> A dictionary of lists of data points. The points are assumed to be numbers.
threshold -> An integer describing the minimum value requirement.
threshold_type -> A string describing how the threshold integer will be applied.
"on_data" "on_nan"
=================================================================================================
Returns: A dictionary where all values have been filtered from the list-values.
=================================================================================================
"""
# Make sure a dictionary is given as input
assert type(data) == dict, "The data should be in a dictionary"
# Initialize the new dictionary
filtered_dict = {}
# Loop over the keys/values in the dictionary
for key, value in data.items():
# Filter the nans
filt_list = filter_nans(value)
# If the list is not empty
if filt_list != []:
# then add it to the dictionary
filtered_dict[key] = filt_list
# IF the list is empty, it will not be added to the dictionary
# Return the filtered dictionary.
return filtered_dict
def count_list(dataset):
'''
Given a dataset, count the occurence of
each data point and return them as a dictionary.
'''
# First we create a dictionary to hold the counts.
# We then loop over the elements of the dataset
# and attempt to check the dictionary keys for them.
# If the element has appeard, we add one to the count
# and if the element is not in the dictionary, we
# add a key to the dictionary with that elements name
# and initialize it to 1 (since we have seen it once).
# At the end, return the dictionary.
dic = {} # Create the empty dictionary
for element in dataset: # Loop over the elemnts in the dataset
try: # Attempt
dic[str(element)] += 1 # Look for the key of the element, add one to count
except: # Otherwise
dic[str(element)] = 1 # Add a key to the dicitonary with value 1
return dic # Return the dictionary
#
#
#######################################################################################################
#
# General Statistical Functions
def mean(dataset):
'''
Given a dataset, return the average value.
'''
return sum(dataset) / len(dataset)
def median(dataset):
'''
Given a dataset, return the median value.
'''
# The calculation for median depends on whether the
# number of datapoints in the set is even or odd.
# If the number of datapoints is even, then we need to
# find the average of the two middle numbers. If the
# number of datapoints is odd, then we simply need
# to find the middle one. They also need to be sorted.
dataset = sorted(dataset)
if len(dataset) % 2 == 0: # if the dataset is even
index = len(dataset) // 2 # get the middle data point
med = (dataset[index] + dataset[index -1]) / 2 # average the middle two points
return med # return this value
elif len(dataset) % 2 == 1: # if the dataset is odd
index = len(dataset) // 2 # get the middle point
return dataset[index] # return the middle point
def grand_mean(*data):
all_data = gh.unpack_list(data)
return sum(all_data)/len(all_data)
def demean(data, grand = False):
if not grand:
return [d - mean(data) for d in data]
else:
return [mean(d) - grand_mean(data) for d in data]
def variance(dataset, correction = 1):
'''
Given a dataset, calculate the variance of the
parent population of the dataset.
'''
# Calculate the data without the mean, square
# all of the elements, then return the sum of
# those squares divided by the number of
# datapoints minus 1.
meanless = demean(dataset) # Remove the mean from the data
squares = [x**2 for x in meanless] # Square all of the meanless datapoints
return sum(squares) / (len(dataset) - correction) # return the sum of the squares divided by n-1
def standard_deviation(data, correction = 1):
return sqrt(variance(data, correction = correction))
def sem(data, correction = 1):
if len(data) < 2:
return float("nan")
return standard_deviation(data, correction=correction) / sqrt(len(data))
def sum_of_squares(vector):
return sum([v**2 for v in vector])
def var_within(*data, correction = 1):
variances = [variance(d, correction = correction) for d in data]
return mean(variances)
def var_between(*data, correction = 1):
means = [mean(d) for d in data]
return len(data[0]) * variance(means, correction = correction)
def total_variation(*data):
return sum_of_squares(demean(data, grand=True))
def sos_between(*data):
demeaned = demean(data, grand = True)
lens = [len(data[i]) for i in range(len(data))]
return sum([lens[i]*demeaned[i]**2 for i in range(len(data))])
def ms_between(*data):
return sos_between(*data) / (len(data)-1)
def sos_within(*data, correction = 1):
deg_frees = [len(d) - 1 for d in data]
var = [variance(d, correction = correction) for d in data]
return sum([deg_frees[i] * var[i] for i in range(len(data))])
def ms_within(*data, correction = 1):
deg_free = sum([len(d) for d in data]) - len(data)
return sos_within(*data, correction = correction)/deg_free
def mode(dataset):
'''
Given a dataset, returns the most frequent value
and how many times that value appears
'''
# First, we count all of the elements and arrange them in
# a dictionary. Then, we create a sorted list of tuples
# from the key value pairs. Initialize 'pair', to hold
# the key value pair of the highest value, and an empty
# list to hold any of the pairs that tie. We then loop
# over the sorted lists keys and values, and look for the
# highest counts. We return the highest count from the
# dictionary, or the highest counts if there were any ties.
counted = count_list(dataset) # Count the elements of the dataset
sort = sorted(counted.items()) # Sort the numbers and occurences
pair = 'hold', 0 # Initialize the pair
ties = [] # Initialize the tie list
for key, value in sort: # Loop over key, value in sorted dictionary
if value > pair[1]: # If the value is greater than the pair
pair = key, value # Re assign the pair to the current one
ties = [] # Reset the tie list
elif value == pair[1]: # If the value is equal to the current value
ties.append((key, value)) # Append the new key, value pair to the list
ties.append(pair) # After, append the pair to the list
svar = sorted(ties) # Sort the list of ties
if len(ties) > 1: # If there are any ties,
return svar # Return the sorted list of ties
elif len(ties) == 1: # If there are no ties
return pair # Return the highest value
def quantile(dataset, percentage):
'''
Given a dataset and a pecentage, the function returns
the value under which the given percentage of the data
lies
'''
# First, sort the dataset, then find the index at the
# given percentage of the list. Then, return the
# value of the dataset at that index.
dataset = sorted(dataset) # Sort the dataset
index = int(percentage * len(dataset)) # Get the index of element percentage
return dataset[index] # return the element at the index
def interquantile_range(dataset, per_1, per_2):
'''
Given a dataset and two percentages that define
a range of the dataset, find the range of the
elements between those elements.
'''
dataset = sorted(dataset)
return quantile(dataset, per_2) - quantile(dataset, per_1)
def data_range(dataset):
'''
Given a dataset, return the range of the elements.
'''
dataset = sorted(dataset)
return dataset[-1] - dataset[1]
def dot_product(data_1, data_2):
'''
Given two datasets of equal length, return the
dot product.
'''
# First, we make sure that the lists are the same size,
# Then we loop over the length of the lists, and sum the
# product of the corresponding elements of each list.
# Then, that sum is returned.
assert len(data_1) == len(data_2), "These lists are not the same length"
sum_total = 0 # Initialize the sum
for i in range(len(data_1)): # Loop over the size of the list
sum_total += data_1[i] * data_2[i] # Add to the sum the product of the datapoints in 1 and 2
return sum_total # Return the sum
def covariance(data_1, data_2):
'''
Given two datasets, calculate the covariance between them
'''
n = len(data_1)
return dot_product(demean(data_1),demean(data_2)) / (n-1)
def correlation(data_1, data_2):
'''
Given two datasets, calculate the correlation between them.
'''
return covariance(data_1, data_2) / (standard_deviation(data_1) * standard_deviation(data_2))
def vector_sum(vectors):
'''
Given a set of vectors, return a vector which contains the
sum of the ith elements from each vector in index i
'''
for i in range(len(vectors)-1):
assert len(vectors[i]) == len(vectors[i+1]), 'Vectors are not the same length'
return [sum(vector[i] for vector in vectors)
for i in range(len(vectors[0]))]
assert vector_sum([[1,2],[2,3],[3,4]]) == [6,9]
def scalar_multiply(scalar, vector):
'''
Given a scalar and a vector (list), return a vector
where each component is multiplied by the scalar.
'''
return [scalar * var for var in vector]
assert scalar_multiply(3, [1,2,3,4]) == [3,6,9,12]
def vector_subtract(vectors):
'''
Given a set of vectors, return the difference between
the vectors, in index order.
This will look like:
vectors[0] - vectors[1] - ... - vectors[n] = result
'''
for i in range(len(vectors)-1):
assert len(vectors[i]) == len(vectors[i+1]), 'Vectors are not the same length'
pass_count = 0
result = vectors[0]
for column in vectors:
if column == result and pass_count == 0:
pass_count += 1
pass
else:
for i in range(len(result)):
result[i] += -column[i]
pass_count += 1
return result
assert vector_subtract([[1,2,3], [3,4,5]]) == [-2,-2,-2]
def vector_mean(vectors):
'''
Given a list of lists (which correspond to vectors, where
each element of the vector represents a different variable)
return the vector mean of the vectors (add each the vectors
component-wise, divide each sum by the number of vectors)
'''
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1,2],[2,3],[3,4]]) == [2,3]
def scale(data_1):
'''
Given a set of datapoint sets, return the mean of each
dataset and the standard deviation for each set.
Data points should be give as
x = [[x1_1, x2_1,..., xn_1],...,[x1_n, x2_n,..., xn_n]]
if the data are not in this format, but are in the format
x = [[x1_1, x1_2,..., x1_n],...,[xn_1, xn_2,..., xn_n]]
apply the function reformat_starts(*args) to the data.
'''
# First, we make sure that all of the data points
# given are the same length, then save the size of
# the datasets as n. We then calculate the vector
# mean of the data, as well as the standard deviations
# of each data type. Then the means and SDs are returned
for q in range(len(data_1) -1):
assert len(data_1[q]) == len(data_1[q+1]), 'Data lists are different sizes'
n = len(data_1[0])
means = vector_mean(data_1)
s_deviations = [standard_deviation([vector[i]
for vector in data_1])
for i in range(n)]
return means, s_deviations
t_vectors = [[-3, -1, 1], [-1, 0, 1], [1, 1, 1]]
t_means, t_stdevs = scale(t_vectors)
assert t_means == [-1, 0, 1]
assert t_stdevs == [2, 1, 0]
def rescale(data_1):
'''
Given a set of data sets, return a list of the
data rescaled, based on the means and the
standard deviations of the data.
'''
# First, we calculate the mean and standard deviations
# of the data, and save the size of the datasets as
# n. We then copy each of the vectors to the list
# rescaled. Next, we loop over the vectors in rescaled,
# and loop over the size of the datasets, and
# scale each term in v[i] based on the mean and SD
means, s_deviations = scale(data_1)
n = len(data_1[0])
rescaled = [v[:] for v in data_1]
for v in rescaled:
for i in range(n):
if s_deviations[i] > 0:
v[i] = (v[i] - means[i]) / s_deviations[i]
return rescaled
t2_means, t2_stdevs = scale(rescale(t_vectors))
assert t2_means == [0, 0, 1]
assert t2_stdevs == [1, 1, 0]
def unscaled(scaled_data_1, data_1, coefficients = False):
'''
Given a set of scaled datapoints, the original datapoints,
and a truthy value for whether we are unscaling coefficients
of regression, return the unscaled data points.
'''
# This is basically 'rescale' in reverse, with the
# condition of if we are unscaling coefficients. If
# we are unscaling coefficients, we subtract from the
# alpha term (v[0]) all elements in the form
# v[j] * mean[j] / s_deviations[j] (as described in
# Data Science from Scratch, 2nd Edition in Chapter
# 16, Logistic Regression). OTherwise, all coefficient
# are divided by the standard deviation term of the
# corresponding data.
n = len(data_1[0])
means, s_deviations = scale(data_1)
unscaled = [v[:] for v in scaled_data_1]
for v in unscaled:
for i in range(n):
if coefficients == False:
v[i] = v[i]*s_deviations[i] + means[i]
elif coefficients == True:
if i == 0:
for j in range(1,n):
if s_deviations[j] > 0:
v[0] = v[0] - (v[j]*means[j])/s_deviations[j]
else:
v[0] = v[0] - v[j]
elif i != 0:
if s_deviations[i] > 0:
v[i] = v[i] / s_deviations[i]
else:
pass
return unscaled
assert unscaled(rescale(t_vectors), t_vectors) == t_vectors
#
#
#####################################################################################################################
#
# Q-Value Estimation Algorithms
#### Storey
def storey_check_groups(groups):
"""
=================================================================================================
storey_check_groups(groups)
This function is meant to check the groups argument input into the function storey()
=================================================================================================
Arguments:
groups -> Either a list, tuple, pandas DataFrame, or a numpy array that describes the groups
in Storey FDR estimation.
=================================================================================================
Returns: A list of lists that describe the groups used in Storey FDR estimation
=================================================================================================
"""
# If the input groups are a pandas DataFrame
if type(groups) == type(pd.DataFrame()):
# Then convert the groups into a transposed numpy array
groups = groups.to_numpy().transpose()
# and use list comprehension to reformat the groups into
# a list of pairs.
groups = [[groups[j][i] for j in range(len(groups))]
for i in range(len(groups[0])) ]
# If the input groups are a lsit
elif type(groups) == list:
# Then loop over the number of lists
for i in range(len(groups)):
# If the type of the input groups are not
# a list, tuple or array
if type(groups[i]) not in [list, tuple, type(np.array([]))]:
# Then list the element
groups[i] = list(groups[i])
# Otherwise
else:
# Just keep the list the same
groups[i] = groups[i]
# If the groups were given as a tuple
elif type(groups) == tuple:
# Then turn the groups into a lsit
groups = list(groups)
# and loop over the number of items in the groups
for i in range(len(groups)):
# and if the element is not a list, tuple, array,
if type(groups[i]) not in [list, tuple, type(np.array([]))]:
# then list the element and save it
groups[i] = list(groups[i])
# Otherwsie,
else:
# Keep the element the same
groups[i] = groups[i]
# If the input is a numpy array
elif type(groups) == type(np.array([])):
# Then use list comprehension to format the groups list.
# Assumes the groups have been transposed in this instance
groups = [[groups[j][i] for j in range(len(groups))] for i in range(len(groups[0]))]
# At then end, return the groups list.
return groups
def storey_check_test(test):
"""
=================================================================================================
storey_check_test(test)
This function is meant to check and format the T-test type from the inputs. The function works
almost exactly like storey_check_groups()
=================================================================================================
Arguments:
test -> A list, array, tuple, or array describing the T-test types used for each P-value.
=================================================================================================
Returns: A properly formatted list
=================================================================================================
"""
# If the groups are dataframes, make the input into a list of two-lists
if type(test) == type(pd.DataFrame()) or type(test) == type(pd.Series([])):
# If the input is a series or DataFrame object
# then attempt to list it
try:
test = list(test)
except:
raise ValueError("The test dataframe is incorrectly formatted. Try: df_name['Test']")
# If the input type is a list
elif type(test) == list:
# then iterate through each element of the list
for i in range(len(test)):
# and if any elements are not strings, then string them
if type(test[i]) != str:
test[i] = str(test[i])
else:
test[i] = test[i]
# If the input type is a tuple
elif type(test) == tuple:
# then list the test
test = list(test)
# and loop over the elements of test
for i in range(len(test)):
# If any elements are not strings, then string them
if type(test[i]) != str:
test[i] = str(test[i])
else:
test[i] = test[i]
# If the input is a numpy araray
elif type(test) == type(np.array([])):
# then use list comprehension to str all elements of the array
test = [str(test[i]) for i in range(len(test))]
# And at the end, return the test, reformatted
return test
def storey_check_args(pvals,
groups,
test):
"""
=================================================================================================
storey_check_args(pvals, groups, test)
This function is meant to check the arguments passed into the storey() function, and ensures that
FDR estimation may proceed without conflict.
=================================================================================================
Arguments:
pvals -> A list, numpy array, dataframe, tuple of P-values
groups -> A list, numpy array, dataframe, tuple of group labels, index matched to the pvalues
test -> A list, numpy array, dataframe, tuple of T-tests used for calculating P-values,
index matched to the pvals argument.
=================================================================================================
Returns: The pvalues, g_checker boolean (group checker) and the t_checker boolean (test checker)
=================================================================================================
"""
# First, type-check the inputs
assert type(pvals) in [list,
type(np.array([] ,dtype = float)),
type(pd.Series()),
type(pd.DataFrame())], "The p-values should be given as a list or a numpy array"
assert type(groups) in [type(None),
list,
tuple,
type(np.array([])),
type(pd.Series()),
type(pd.DataFrame())], "The p-values should be given as a list, tuple, numpy array, series or dataframe."
# Then, if the pvals were a series or DataFrame object
if type(pvals) == type(pd.Series()) or type(pvals) == type(pd.DataFrame()):
# Turn them into numpy arrays and transpose them
pvals = pvals.to_numpy().transpose()
# Then, if the length of pvals is not 1, then raise an error
if len(pvals) != 1:
raise ValueError("The DataFrame or Series input has more than one dimension...")
# Otherwise, pvals are the zeroeth element of the array
else:
pvals = pvals[0]
# Next, check the groups. If somethign other than NoneType
# was provided
if type(groups) in [list,
tuple,
type(np.array([])),
type(pd.Series()),
type(pd.Series()),
type(pd.DataFrame())]:
# Then set g_checker to True, so we will check groups
g_checker = True
# Otherwise, set g_checker to False, as we do not need to check groups
else:
g_checker = False
# If the test is a proper typed object
if type(test) in [list,
tuple,
type(np.array([])),
type(pd.Series()),
type(pd.Series()),
type(pd.DataFrame())]:
# Then set t_checker to True, as we need to check the test
t_checker = True
# Otherwise, set t_checker to False, as we do not need to check test
else:
t_checker = False
# and return pvals, g_chekcer and t_checker
return pvals, g_checker, t_checker
def storey_make_id_dict(pvals,
groups,
test,
g_checker,
t_checker):
"""
=================================================================================================
storey_make_id_dict(pvals, groups, test, g_checker, t_checker)
This function is meant to take all relevant arguments to storey() and perform checking
operations on each of those inputs.
=================================================================================================
Arguments:
For more information on pvals, groups, test, refer to storey_check_args().
g_checker -> A boolean, determines whether a group is in need of checking
t_checker -> A boolean, determines whether a test is in need of checking
=================================================================================================
Returns: A dictionary based on the pvals argument, and the groups and test arguments checked.
=================================================================================================
"""
# Initialize the idenities dictionary
identities = {}
# Then proceed with making the identity dictionary
# If groups are given and tests are not given
if g_checker != False and t_checker == False:
# Make sure the groups are in the correct format.
# Otherwise, terminate gracefully.
groups = storey_check_groups(groups)
# If there are not enough group labels given for all the pvals,
if len(groups) != len(pvals):
# Just proceed without group labels
print("Each p-value should have a corresponding label/group tuple, proceeding without labels")
# And make a dict of lists with key = pval, value = [position]
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i]
# Otherwise, use the labels as the keys of the dictionary
else:
# make a dict of lists with key = pval, value = [position, label]
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i, *groups[i]]
# If no groups were provided but tests were provieded
elif g_checker == False and t_checker != False:
# Make sure the tests are in the right format. Otherwise, terminate gracefully
test = storey_check_test(test)
# If there are not enough tests given for all the pvals,
if len(test) != len(pvals):
# Just proceed without labels
print("Each p-value should have a corresponding test, proceeding without test identifier")
# And make a dict of lists with key = pval, value = [position]
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i]
# Otherwise, use the tests as the keys of the dictionary
else:
# make a dict of lists with key = pval, value = [position, label]
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i, test[i]]
# If both tests and groups are provided
elif g_checker != False and t_checker != False:
# Make sure they're in the right format. Otherwise, terminate gracefully
groups = storey_check_groups(groups)
test = storey_check_test(test)
# If there are not enough labels given for all the pvals,
if len(groups) != len(pvals) and len(test) != len(pvals):
# Just proceed without labels
print("Each p-value should have a corresponding label/group tuple and test, proceeding without labels and test identifiers")
# And make a dict of lists with key = pval, value = [position]
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i]
# Otherwise, use the labels as the keys of the dictionary
elif len(groups) != len(pvals) and len(test) == len(pvals):
print("Each p-value should have a corresponding test, proceeding without test identifiers")
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i, *groups[i]]
elif len(groups) == len(pvals) and len(test) != len(pvals):
print("Each p-value should have a corresponding label/group tuple, proceeding without labels")
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i, test[i]]
else:
# make a dict of lists with key = pval, value = [position, label]
for i in range(len(pvals)):
identities[f"{i}?{pvals[i]}"] = [i, *groups[i], test[i]]
# If no labels are given, then just make the identities dictionary
else:
# by looping over the pvals
for i in range(len(pvals)):
# and making keys as index/pval and value index.
identities[f"{i}?{pvals[i]}"] = [i]
# Once checking is over, return the identities dictionary, groups and test
return identities, groups, test
def storey_reorder_qs(qs,
pvals,
og_pvals):
"""
=================================================================================================
storey_reorder_qs(qs, pvals, og_pvals)
This function is used in storey(), and is meant to take the list of qvalues, the list of pvalues,
and the original list of pvalues, and reorder the qvalue list in correspondence with the original
pvalue list.
=================================================================================================
Arguments:
qs -> A list of q values created using the pvals list
pvals -> A list of p values used to estimate the q values
og_pvals -> A list of p values in their original order
=================================================================================================
Returns: A list of q values which ahve been reordered to match the order of og_pvals
=================================================================================================
"""
# Initialize the list of seen pvalues and new qvalues
seen = []
newqs = []
# Loop over original order of pvalues
for i in range(len(og_pvals)):
# If the current pvalue is already seen
if og_pvals[i] in seen:
# Then find the the index of this particular pvalue. It will
# find the first instance of this pvalue in the pvals list.
ind = pvals.index(og_pvals[i])
# Then, see how many of these pvalues have been identified
num = seen.count(og_pvals[i])
# and qvalue corresponding to this pvalue is the index of
# the pvals list up to the current pval + the number seen,
# plus the number of elements before that list.
ind = pvals[ind+num:].index(og_pvals[i]) + len(pvals[:ind+num])
# If the current pvalue is not seen
else:
# find the index of the og_pval[i] in pvals
ind = pvals.index(og_pvals[i])
# move qs[ind] to newqs
newqs.append(qs[ind])
# Add this value to the seen list
seen.append(og_pvals[i])
# Once the loop is complete, the q value will be reordered based
# on the original pvalue order.
return newqs
def pi_0(ps, lam):
"""
=================================================================================================
pi_0(ps, lam)
This function is used to calculate the value of pi_0 in the Storey FDR estimation algorithm
=================================================================================================
Arguments:
ps -> A list of pvalues
lam -> A critical value of lambda
=================================================================================================
Returns: The sum of all p values greater than lambda divided by the number of p values times the
difference and 1 and lambda.
=================================================================================================
"""
# hat(pi_0) = num(p_j >0) / len(p)/(1-lambda)
# This just uses numpy functions to do that for us
return np.sum(ps>lam) / (ps.shape[0]*(1-lam))
def storey(pvals,
pi0 = None,
groups = None,
test = None):
"""
=================================================================================================
storey(pvals, pi0, groups, test)
This function performs Storey False Discovery Rate Estimation, as described in the publication
Statistical Significance for Genomewide Studies (Storey, Tibshirani 2003)
https://www.pnas.org/content/pnas/100/16/9440.full.pdf
=================================================================================================
Arguments:
pvals -> A list of pvalues. Can be unordered
pi0 -> A value for pi0. This does not need to be set.
groups -> A list, tuple, dataframe, or numpy array describing comparison groups
test -> A list, tuple, dataframe or numpy array describing the test used for Pvalue generation
=================================================================================================
Returns: A DataFrame describing the P-values, the Q-values, and whatever metadata was provided.
=================================================================================================
"""
# First, get a list of the group names if groups are provided.
group_names = None
if type(groups) == type( | pd.DataFrame([]) | pandas.DataFrame |
# *- coding: utf-8 -*
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# from model.ESPNet_v2.SegmentationModel import EESPNet_Seg
# from model.CGNet import CGNet
# from model.ContextNet import ContextNet
# from model.DABNet import DABNet
# from model.EDANet import EDANet
# from model.ENet import ENet
# from model.ERFNet import ERFNet
# from model.ESNet import ESNet
# from model.ESPNet import ESPNet
# from model.FastSCNN import FastSCNN
# from model.FPENet import FPENet
# from model.FSSNet import FSSNet
# from model.LEDNet import LEDNet
# from model.LinkNet import LinkNet
# from model.SegNet import SegNet
# from model.SQNet import SQNet
# from model.UNet import UNet
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
def analysisG(): # 对比比原网络和CONV6*6
#
# https://www.cnblogs.com/happymeng/p/10481293.html
dataset = 'camvid352'
module_names = ['DABNet', 'ESNet', 'FastSCNN', 'LEDNet', 'FPENet', 'DF1Seg']
train_types = ['bs2gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for n in module_names:
for t in train_types:
try:
df1 = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
df2 = pd.read_table("./checkpoint/" + dataset + "/" + n + 'G' + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
# df_loss = df1.loc[:,['Epoch']]
df_loss = df1[['Epoch']].copy()
df_loss[n] = df1['Loss(Tr)']
df_loss[n + 'G'] = df2['Loss(Tr)']
df_loss.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
df_miou = df1[['Epoch']].copy()
df_miou[n] = df1['mIOU(val)']
df_miou[n + 'G'] = df2['mIOU(val)']
df_miou = df_miou.dropna(axis=0, how='any')
df_miou.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
if len(losses.index) == 0:
losses = df_loss.copy()
mious = df_miou.copy()
else:
losses = pd.merge(losses, df_loss)
mious = pd.merge(mious, df_miou)
except:
pass
losses[500:].plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + t + "all_loss_vs_epochs.png")
plt.clf()
mious[4:].plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + t + "_all_iou_vs_epochs.png")
plt.close('all')
def fastscnn(): # compair with FastSCNN
dataset = 'FastSCNN/FastSCNN-4'
module_names = ['FastSCNN', 'FastSCNNG1', 'FastSCNNG3', 'FastSCNNG6']
train_types = ['bs4gpu1_train']
losses = pd.DataFrame()
mious = pd.DataFrame()
for t in train_types:
for n in module_names:
df = pd.read_table("./checkpoint/" + dataset + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
else:
losses[n] = df['Loss(Tr)']
mious[n] = df['mIOU(val)']
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'FastSCNN1234' + train_types[0] + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + dataset + "/" + 'FastSCNN1234' + train_types[0] + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
def fastscnn_mean(): # compair with FastSCNN
datasets = ['FastSCNN/FastSCNN-{}'.format(i) for i in range(1, 11)]
module_names = ['FastSCNN', 'FastSCNNG1', 'FastSCNNG3', 'FastSCNNG6', 'FastSCNNG7', 'FastSCNNG8']
train_types = ['bs4gpu1_train']
losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types[0]
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FastSCNN' + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FastSCNN' + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.to_csv('FastSCNNG_mious_mean.csv')
mious_mean.plot()
plt.savefig("./checkpoint/" + 'FastSCNN' + "/" + "FastSCNN" + t + "_avg_iou_vs_epochs.png")
plt.clf()
def lednet_mean(): # compair with FastSCNN
datasets = ['LEDNet/LEDNet-{}'.format(i) for i in range(1, 2)]
module_names = ['LEDNet', 'LEDNetG1', 'LEDNetG2', 'LEDNetG3']
train_types = ['bs4gpu1_train']
losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types[0]
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'LEDNet' + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'LEDNet' + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.plot(figsize=(20, 15))
plt.savefig("./checkpoint/" + 'LEDNet' + "/" + "LEDNet" + t + "_avg_iou_vs_epochs.png")
plt.clf()
def fpenet_mean(): # compair with FastSCNN
datasets = ['FPENet/FPENet-{}'.format(i) for i in range(1, 2)]
module_names = ['FPENet', 'FPENetG0', 'FPENetG1', 'FPENetG2']
train_types = ['bs4gpu1_train']
losses_mean = pd.DataFrame()
mious_mean = pd.DataFrame()
t = train_types[0]
for n in module_names:
losses = pd.DataFrame()
mious = pd.DataFrame()
for d in datasets:
df = pd.read_table("./checkpoint/" + d + "/" + n + t + "/log.txt", sep='\t\t', skiprows=2,
header=None,
names=['Epoch', 'Loss(Tr)', 'mIOU(val)', 'lr'], engine='python')
if len(losses.index) == 0:
losses = df[['Epoch']].copy()
mious = df[['Epoch']].copy()
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
else:
losses[d + '/' + n] = df['Loss(Tr)']
mious[d + '/' + n] = df['mIOU(val)']
tmp = losses.drop(['Epoch'], axis=1)
losses[n + '_avg'] = tmp.mean(axis=1)
losses.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FPENet' + "/" + n + t + "_loss_vs_epochs.png")
plt.clf()
mious = mious.dropna(axis=0, how='any')
tmp = mious.drop(['Epoch'], axis=1)
mious[n + '_avg'] = tmp.mean(axis=1)
mious_mean[n + '_avg'] = mious[n + '_avg']
mious.plot(x='Epoch')
plt.savefig("./checkpoint/" + 'FPENet' + "/" + n + t + "_iou_vs_epochs.png")
plt.clf()
# plt.close('all')
print(mious)
print(mious_mean)
mious_mean.plot(figsize=(20, 15))
plt.savefig("./checkpoint/" + 'FPENet' + "/" + "FPENet" + t + "_avg_iou_vs_epochs.png")
plt.clf()
def LEDNet_19_a(): #
dataset = 'camvid352'
module_names = ['LEDNet', 'LEDNet_19']
train_types = ['bs4gpu1_train']
losses = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
===============================================================================
FINANCIAL IMPACT FILE
===============================================================================
Most recent update:
21 January 2019
===============================================================================
Made by:
<NAME>
Copyright:
<NAME>, 2018
For more information, please email:
<EMAIL>
===============================================================================
"""
import numpy as np
import pandas as pd
import sys
sys.path.insert(0, '/***YOUR LOCAL FILE PATH***/CLOVER 4.0/Scripts/Conversion scripts')
from Conversion import Conversion
class Finance():
def __init__(self):
self.location = 'Bahraich'
self.CLOVER_filepath = '/***YOUR LOCAL FILE PATH***/CLOVER 4.0'
self.location_filepath = self.CLOVER_filepath + '/Locations/' + self.location
self.location_inputs = pd.read_csv(self.location_filepath + '/Location Data/Location inputs.csv',header=None,index_col=0)[1]
self.finance_filepath = self.location_filepath + '/Impact/Finance inputs.csv'
self.finance_inputs = pd.read_csv(self.finance_filepath,header=None,index_col=0).round(decimals=3)[1]
self.inverter_inputs = pd.read_csv(self.location_filepath + '/Load/Device load/yearly_load_statistics.csv',index_col=0)
#%%
#==============================================================================
# EQUIPMENT EXPENDITURE (NOT DISCOUNTED)
# Installation costs (not discounted) for new equipment installations
#==============================================================================
# PV array costs
def get_PV_cost(self,PV_array_size,year=0):
'''
Function:
Calculates cost of PV
Inputs:
PV_array_size Capacity of PV being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_cost = PV_array_size * self.finance_inputs.loc['PV cost']
annual_reduction = 0.01 * self.finance_inputs.loc['PV cost decrease']
return PV_cost * (1.0 - annual_reduction)**year
# PV balance of systems costs
def get_BOS_cost(self,PV_array_size,year=0):
'''
Function:
Calculates cost of PV BOS
Inputs:
PV_array_size Capacity of PV being installed
year Installation year
Outputs:
Undiscounted cost
'''
BOS_cost = PV_array_size * self.finance_inputs.loc['BOS cost']
annual_reduction = 0.01 * self.finance_inputs.loc['BOS cost decrease']
return BOS_cost * (1.0 - annual_reduction)**year
# Battery storage costs
def get_storage_cost(self,storage_size,year=0):
'''
Function:
Calculates cost of battery storage
Inputs:
storage_size Capacity of battery storage being installed
year Installation year
Outputs:
Undiscounted cost
'''
storage_cost = storage_size * self.finance_inputs.loc['Storage cost']
annual_reduction = 0.01 * self.finance_inputs.loc['Storage cost decrease']
return storage_cost * (1.0 - annual_reduction)**year
# Diesel generator costs
def get_diesel_cost(self,diesel_size,year=0):
'''
Function:
Calculates cost of diesel generator
Inputs:
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
diesel_cost = diesel_size * self.finance_inputs.loc['Diesel generator cost']
annual_reduction = 0.01 * self.finance_inputs.loc['Diesel generator cost decrease']
return diesel_cost * (1.0 - annual_reduction)**year
# Installation costs
def get_installation_cost(self,PV_array_size,diesel_size,year=0):
'''
Function:
Calculates cost of installation
Inputs:
PV_array_size Capacity of PV being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_installation = PV_array_size * self.finance_inputs.loc['PV installation cost']
annual_reduction_PV = 0.01 * self.finance_inputs.loc['PV installation cost decrease']
diesel_installation = diesel_size * self.finance_inputs.loc['Diesel installation cost']
annual_reduction_diesel = 0.01 * self.finance_inputs.loc['Diesel installation cost decrease']
return PV_installation * (1.0 - annual_reduction_PV)**year + diesel_installation * (1.0 - annual_reduction_diesel)**year
# Miscellaneous costs
def get_misc_costs(self,PV_array_size,diesel_size):
'''
Function:
Calculates cost of miscellaneous capacity-related costs
Inputs:
PV_array_size Capacity of PV being installed
diesel_size Capacity of diesel generator being installed
Outputs:
Undiscounted cost
'''
misc_costs = (PV_array_size + diesel_size) * self.finance_inputs.loc['Misc. costs']
return misc_costs
# Total cost of newly installed equipment
def get_total_equipment_cost(self,PV_array_size,storage_size,diesel_size,year=0):
'''
Function:
Calculates cost of all equipment costs
Inputs:
PV_array_size Capacity of PV being installed
storage_size Capacity of battery storage being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_cost = self.get_PV_cost(PV_array_size,year)
BOS_cost = self.get_BOS_cost(PV_array_size,year)
storage_cost = self.get_storage_cost(storage_size,year)
diesel_cost = self.get_diesel_cost(diesel_size,year)
installation_cost = self.get_installation_cost(PV_array_size,diesel_size,year)
misc_costs = self.get_misc_costs(PV_array_size,diesel_size)
return PV_cost + BOS_cost + storage_cost + diesel_cost + installation_cost + misc_costs
#%%
#==============================================================================
# EQUIPMENT EXPENDITURE (DISCOUNTED)
# Find system equipment capital expenditure (discounted) for new equipment
#==============================================================================
def discounted_equipment_cost(self,PV_array_size,storage_size,diesel_size,year=0):
'''
Function:
Calculates cost of all equipment costs
Inputs:
PV_array_size Capacity of PV being installed
storage_size Capacity of battery storage being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Discounted cost
'''
undiscounted_cost = self.get_total_equipment_cost(PV_array_size,storage_size,diesel_size,year)
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
return undiscounted_cost * discount_fraction
def get_connections_expenditure(self,households,year=0):
'''
Function:
Calculates cost of connecting households to the system
Inputs:
households DataFrame of households from Energy_System().simulation(...)
year Installation year
Outputs:
Discounted cost
'''
households = pd.DataFrame(households)
connection_cost = self.finance_inputs.loc['Connection cost']
new_connections = np.max(households) - np.min(households)
undiscounted_cost = float(connection_cost * new_connections)
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
total_discounted_cost = undiscounted_cost * discount_fraction
# Section in comments allows a more accurate consideration of the discounted
# cost for new connections, but substantially increases the processing time.
# new_connections = [0]
# for t in range(int(households.shape[0])-1):
# new_connections.append(households['Households'][t+1] - households['Households'][t])
# new_connections = pd.DataFrame(new_connections)
# new_connections_daily = Conversion().hourly_profile_to_daily_sum(new_connections)
# total_daily_cost = connection_cost * new_connections_daily
# total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
# Grid extension components
def get_grid_extension_cost(self,grid_extension_distance,year):
'''
Function:
Calculates cost of extending the grid network to a community
Inputs:
grid_extension_distance Distance to the existing grid network
year Installation year
Outputs:
Discounted cost
'''
grid_extension_cost = self.finance_inputs.loc['Grid extension cost'] # per km
grid_infrastructure_cost = self.finance_inputs.loc['Grid infrastructure cost']
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
return grid_extension_distance * grid_extension_cost * discount_fraction + grid_infrastructure_cost
#%%
# =============================================================================
# EQUIPMENT EXPENDITURE (DISCOUNTED) ON INDEPENDENT EXPENDITURE
# Find expenditure (discounted) on items independent of simulation periods
# =============================================================================
def get_independent_expenditure(self,start_year,end_year):
'''
Function:
Calculates cost of equipment which is independent of simulation periods
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
inverter_expenditure = self.get_inverter_expenditure(start_year,end_year)
total_expenditure = inverter_expenditure # ... + other components as required
return total_expenditure
def get_inverter_expenditure(self,start_year,end_year):
'''
Function:
Calculates cost of inverters based on load calculations
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
# Initialise inverter replacement periods
replacement_period = int(self.finance_inputs.loc['Inverter lifetime'])
system_lifetime = int(self.location_inputs['Years'])
replacement_intervals = pd.DataFrame(np.arange(0,system_lifetime,replacement_period))
replacement_intervals.columns = ['Installation year']
# Check if inverter should be replaced in the specified time interval
if replacement_intervals.loc[replacement_intervals['Installation year'].isin(
range(start_year,end_year))].empty == True:
inverter_discounted_cost = float(0.0)
return inverter_discounted_cost
# Initialise inverter sizing calculation
max_power = []
inverter_step = float(self.finance_inputs.loc['Inverter size increment'])
inverter_size = []
for i in range(len(replacement_intervals)):
# Calculate maximum power in interval years
start = replacement_intervals['Installation year'].iloc[i]
end = start + replacement_period
max_power_interval = self.inverter_inputs['Maximum'].iloc[start:end].max()
max_power.append(max_power_interval)
# Calculate resulting inverter size
inverter_size_interval = np.ceil(0.001*max_power_interval / inverter_step) * inverter_step
inverter_size.append(inverter_size_interval)
inverter_size = pd.DataFrame(inverter_size)
inverter_size.columns = ['Inverter size (kW)']
inverter_info = pd.concat([replacement_intervals,inverter_size],axis=1)
# Calculate
inverter_info['Discount rate'] = [(1 - self.finance_inputs.loc['Discount rate']) **
inverter_info['Installation year'].iloc[i] for i in range(len(inverter_info))]
inverter_info['Inverter cost ($/kW)'] = [self.finance_inputs.loc['Inverter cost'] *
(1 - 0.01*self.finance_inputs.loc['Inverter cost decrease'])
**inverter_info['Installation year'].iloc[i] for i in range(len(inverter_info))]
inverter_info['Discounted expenditure ($)'] = [inverter_info['Discount rate'].iloc[i] *
inverter_info['Inverter size (kW)'].iloc[i] * inverter_info['Inverter cost ($/kW)'].iloc[i]
for i in range(len(inverter_info))]
inverter_discounted_cost = np.sum(inverter_info.loc[inverter_info['Installation year'].
isin(np.array(range(start_year,end_year)))
]['Discounted expenditure ($)']).round(2)
return inverter_discounted_cost
#%%
#==============================================================================
# EXPENDITURE (DISCOUNTED) ON RUNNING COSTS
# Find expenditure (discounted) incurred during the simulation period
#==============================================================================
def get_kerosene_expenditure(self,kerosene_lamps_in_use_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of kerosene usage
Inputs:
kerosene_lamps_in_use_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
kerosene_cost = kerosene_lamps_in_use_hourly * self.finance_inputs.loc['Kerosene cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(kerosene_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_kerosene_expenditure_mitigated(self,kerosene_lamps_mitigated_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of kerosene usage that has been avoided by using the system
Inputs:
kerosene_lamps_mitigated_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
kerosene_cost = kerosene_lamps_mitigated_hourly * self.finance_inputs.loc['Kerosene cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(kerosene_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_grid_expenditure(self,grid_energy_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of grid electricity used by the system
Inputs:
grid_energy_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
grid_cost = grid_energy_hourly * self.finance_inputs.loc['Grid cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(grid_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_diesel_fuel_expenditure(self,diesel_fuel_usage_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of diesel fuel used by the system
Inputs:
diesel_fuel_usage_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
diesel_fuel_usage_daily = Conversion().hourly_profile_to_daily_sum(diesel_fuel_usage_hourly)
start_day = start_year * 365
end_day = end_year * 365
diesel_price_daily = []
original_diesel_price = self.finance_inputs.loc['Diesel fuel cost']
r_y = 0.01 * self.finance_inputs.loc['Diesel fuel cost decrease']
r_d = ((1.0 + r_y) ** (1.0/365.0)) - 1.0
for t in range(start_day,end_day):
diesel_price = original_diesel_price * (1.0 - r_d)**t
diesel_price_daily.append(diesel_price)
diesel_price_daily = | pd.DataFrame(diesel_price_daily) | pandas.DataFrame |
import sqlite3
import json
import os
import pandas as pd
import re
conn = sqlite3.connect('happiness.db')
c = conn.cursor()
#Create Countries table
c.execute("""CREATE TABLE countries (id INTEGER PRIMARY KEY AUTOINCREMENT,country varchar, images_file text, image_url text, alpha2 text, alpha3 text,
country_code integer, iso_3166_2 text, region text, sub_region text, intermediate_region text, region_code integer,
sub_region_code integer, intermediate_region_code integer
)""")
#Read countries json file
myJsonFile = open('Data_Files\Data Files\countries_continents_codes_flags_url.json','r')
json_data = myJsonFile.read()
countries_json_obj = json.loads(json_data)
#Insert Data in Countries table
for country in countries_json_obj:
c.execute("insert into countries (country,images_file,image_url,alpha2,alpha3,country_code,iso_3166_2,region,sub_region,intermediate_region,region_code,sub_region_code,intermediate_region_code) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[country['country'], country['images_file'], country['image_url'], country['alpha-2'], country['alpha-3'],
country['country-code'], country['iso_3166-2'], country['region'], country['sub-region'], country['intermediate-region'],
country['region-code'], country['sub-region-code'], country['intermediate-region-code']])
conn.commit()
#Read CSV files
csv_file_path = os.getcwd()+'\\Data_Files\\Data Files\\csv_files\\'
csv_files = []
for file in os.listdir(csv_file_path):
if(file.endswith('.csv')):
csv_files.append(file)
#Create DataFrame of csv files
df = {}
df_list = []
for file in csv_files:
df[file] = pd.read_csv(csv_file_path + file)
file_name_str = str(file)
report_year = re.findall('(\d{4})', file_name_str)
df[file].loc[:, 'year'] = str(report_year[0])
df[file].columns = [x.lower().replace(" ","_").replace("?","") \
.replace("-","_").replace("(","").replace(")","").replace("..","_").replace(".","_") \
for x in df[file].columns]
for x in df[file].columns:
col_name = str(x)
if col_name.endswith("_"):
c_col_name = col_name
col_name = col_name[:-1]
df[file].rename(columns = ({c_col_name: col_name}),inplace=True)
df[file].rename(columns=({"economy_gdp_per_capita": "gdp_per_capita"}), inplace=True)
df[file].rename(columns=({"score": "happiness_score"}), inplace=True)
df[file].rename(columns=({"freedom": "freedom_to_make_life_choices"}), inplace=True)
df[file].rename(columns=({"country_or_region": "country"}), inplace=True)
df[file].rename(columns=({"healthy_life_expectancy": "health_life_expectancy"}), inplace=True)
df_list.append(df[file])
result = pd.concat(df_list)
replacements = {
'object': 'varchar',
'float64': 'float',
'int64': 'int',
'datetime64': 'timestamp',
'timedelta64[ns]': 'varchar'
}
col_str = ", ".join("{} {}".format(n, d) for (n, d) in zip(result.columns, result.dtypes.replace(replacements)))
conn = sqlite3.connect('happiness.db')
c = conn.cursor()
#Create countries_happiness record table
c.execute("""CREATE TABLE countries_happiness (ID INTEGER PRIMARY KEY AUTOINCREMENT, %s);""" % (col_str))
conn.commit()
#Insert data from csv files to countries_happiness table
result.to_sql(name="countries_happiness", con=conn, if_exists='append', index=False)
#Question 3 - SQL Query to CSV
SQL_Query_Q3 = | pd.read_sql_query('''select ch.year,c.country,c.image_url,c.region_code,c.region,ch.gdp_per_capita,ch.family,ch.social_support,ch.health_life_expectancy,ch.freedom_to_make_life_choices,ch.generosity,ch.perceptions_of_corruption from countries c inner join countries_happiness ch on c.country=ch.country''', conn) | pandas.read_sql_query |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = | date_range("20010101", periods=4, tz="UTC") | pandas.date_range |
import json
import csv
import codecs
import pandas as pd
dic = {}
# 名前の意味情報を読み取る
with codecs.open('./meaning.csv', "r") as f:
reader = csv.reader(f)
meaning = [row for row in reader]
dic_name = {}
# ヘッダー削除
meaning = meaning[1:]
for mean in meaning:
if mean[3] == "" or mean[4] == "":
continue
if mean[4] == "備考":
continue
category = int(mean[4]) - 1
if category > 13 or category < 0:
print(mean[0])
print(category)
continue
if not mean[3] in dic.keys():
dic[mean[3]] = [] # alphabet to bukken
if not mean[3] in dic_name.keys():
dic_name[mean[3]] = mean[0] #alphabet to katakana
# 建物情報を読み取る
with codecs.open('./suumo_tokyo.csv', "r") as f:
reader = csv.reader(f)
buiding = [row for row in reader]
# ヘッダー削除
buiding = buiding[1:]
for bill in buiding:
for key in dic.keys():
if dic_name[key] in bill[1]: # 建物名にキーワードが含まれているか
dic[key].append(bill[1])
name = []
billdings = []
for key in dic.keys():
for bill in dic[key]:
name.append(key)
billdings.append(bill)
name = pd.Series(name)
billdings = pd.Series(billdings)
df = | pd.concat([name, billdings], axis=1) | pandas.concat |
import logging
import multiprocessing
import os
from collections import defaultdict
from typing import Iterable
import pandas as pd
from pascal_voc_tools import XmlParser
from PIL import Image
from ...files import ensure_directory, iterate_directories
from .annotation import AnnotatedImage, Annotation, BoundingBox
def load_voc_from_directories(
directories: Iterable[str], num_workers: int = None
) -> pd.DataFrame:
"""Load a pandas dataframe from directories containing VOC files"""
items = defaultdict(lambda: [])
num_workers = num_workers or multiprocessing.cpu_count()
files = tuple(iterate_directories(directories, "xml"))
with multiprocessing.Pool(num_workers) as pool:
for result in pool.imap(_parse_dataset_item, files):
if result is not None:
file, image_file = result
items["annotation"].append(file)
items["image"].append(image_file)
return | pd.DataFrame(items) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestSqlQuery(unittest.TestCase):
def test_batch(self):
source = CsvSourceBatchOp() \
.setSchemaStr(
"sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \
.setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv")
source.registerTableName("A")
result = BatchOperator.sqlQuery("SELECT sepal_length FROM A")
result.print()
def test_batch2(self):
data = np.array([
["1", 1, 1.1, 1.0, True],
["2", -2, 0.9, 2.0, False],
["3", 100, -0.01, 3.0, True],
["4", -99, None, 4.0, False],
["5", 1, 1.1, 5.0, True],
["6", -2, 0.9, 6.0, False]
])
df = pd.DataFrame({"f1": data[:, 0], "f2": data[:, 1], "f3": data[:, 2], "f4": data[:, 3], "f5": data[:, 4]})
data = dataframeToOperator(df, schemaStr='f1 string, f2 long, f3 double, f4 double, f5 boolean', opType='batch')
data.print()
data.registerTableName("t1")
data.registerTableName("t2")
res = BatchOperator.sqlQuery("select a.f1,b.f2 from t1 as a join t2 as b on a.f1=b.f1")
res.print()
def test_batch3(self):
data = np.array([
["1", 1, 1.1, 1.0, True],
["2", -2, 0.9, 2.0, False],
["3", 100, -0.01, 3.0, True],
["4", -99, None, 4.0, False],
["5", 1, 1.1, 5.0, True],
["6", -2, 0.9, 6.0, False]
])
df = | pd.DataFrame({"f1": data[:, 0], "f2": data[:, 1], "f3": data[:, 2], "f4": data[:, 3], "f5": data[:, 4]}) | pandas.DataFrame |
"""
This module includes two types of discrete state-space formulations for biogas plants.
The anaerobic digestion model in FlexibleBiogasPlantModel is based on the work in
https://doi.org/10.1016/j.energy.2017.12.073 and ISBN: 978-3-319-16192-1
The module is designed to work with fledge: https://doi.org/10.5281/zenodo.3715873
The code is organized and implemented based on the flexible building model cobmo: https://zenodo.org/record/3523539
"""
import numpy as np
import pandas as pd
import scipy.linalg
import os
import inspect
import sys
import datetime as dt
import pyomo.environ as pyo
import bipmo.utils
class BiogasPlantModel(object):
"""
BiogasPlantModel represents all attributes and functions that all biogas plants have in common. It is the basis for
every model that inherits from it. Caution: It does not work as a standalone model!
"""
model_type: str = None
der_name: str = 'Biogas Plant'
plant_scenarios: pd.DataFrame
states: pd.Index
controls: pd.Index
outputs: pd.Index
switches: pd.Index
chp_schedule: pd.DataFrame
disturbances: pd.Index
state_vector_initial: pd.Series
state_matrix: pd.DataFrame
control_matrix: pd.DataFrame
disturbance_matrix: pd.DataFrame
state_output_matrix: pd.DataFrame
control_output_matrix: pd.DataFrame
disturbance_output_matrix: pd.DataFrame
timestep_start: pd.Timestamp
timestep_end: pd.Timestamp
timestep_interval: pd.Timedelta
timesteps: pd.Index
disturbance_timeseries: pd.DataFrame
output_maximum_timeseries: pd.DataFrame
output_minimum_timeseries: pd.DataFrame
marginal_cost: float
lhv_table: pd.DataFrame
temp_in: float
cp_water: float
feedstock_limit_type: str
available_feedstock: float
def __init__(
self,
scenario_name: str,
timestep_start=None,
timestep_end=None,
timestep_interval=None,
connect_electric_grid=True,
):
# Scenario name.
self.scenario_name = scenario_name
# Define the biogas plant model (change paths accordingly).
base_path = os.path.dirname(os.path.dirname(os.path.normpath(__file__)))
# Load the scenario.
self.plant_scenarios = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_scenario.csv')
)
self.plant_scenarios = self.plant_scenarios[
self.plant_scenarios['scenario_name'] == self.scenario_name]
self.plant_scenarios.index = pd.Index([self.scenario_name])
# Load marginal cost
self.marginal_cost = self.plant_scenarios.loc[
self.scenario_name, 'marginal_cost_EUR_Wh-1']
# Load feedstock data used in the scenario.
self.plant_feedstock = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_feedstock.csv')
)
self.plant_feedstock = self.plant_feedstock[
self.plant_feedstock['feedstock_type']
== self.plant_scenarios.loc[self.scenario_name, 'feedstock_type']
]
self.plant_feedstock.index = pd.Index([self.scenario_name])
self.feedstock_limit_type = self.plant_scenarios.loc[
self.scenario_name, 'availability_limit_type']
self.available_feedstock = self.plant_scenarios.loc[
self.scenario_name, 'availability_substrate_ton_per_year']
# Load CHP data used in the scenario.
self.CHP_list = self.plant_scenarios.CHP_name[self.scenario_name].split()
self.number_CHP = len(self.CHP_list)
self.plant_CHP_source = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_CHP.csv')
)
self.plant_CHP = pd.DataFrame(columns=self.plant_CHP_source.columns)
for i in self.CHP_list:
self.plant_CHP = pd.concat([
self.plant_CHP,
self.plant_CHP_source[self.plant_CHP_source['CHP_name'] == i]
])
self.plant_CHP.index = self.plant_CHP['CHP_name']
self.elec_cap_list = pd.DataFrame([cap for cap in self.plant_CHP.elec_cap_Wel],
index=self.CHP_list,
columns=['elec_cap_Wel'])
self.ramp_rate_list = pd.DataFrame([rate for rate in self.plant_CHP.ramp_capacity_W_min],
index=self.CHP_list,
columns=['ramp_rate_W_min'])
# Load storage data used in the scenario.
self.plant_storage = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_storage.csv')
)
self.plant_storage = self.plant_storage[
self.plant_storage['storage_name']
== self.plant_scenarios.loc[self.scenario_name, 'storage_name']
]
self.plant_storage.index = pd.Index([self.scenario_name])
# Define useful values.
self.lhv_table = pd.DataFrame(
# Lower heating value of methane in J/m3.
[35.8e6],
pd.Index(['LHV_methane']),
pd.Index(['LHV value (in J/m^3)'])
)
self.temp_in = self.plant_scenarios.loc[
# Temperature of the digestion process in °C.
self.scenario_name, 'digester_temp']
self.cp_water = 4182 # Specific heat of water in J/(K*kg) at 20°C.
# Define CHP coefficients
self.set_gains = pd.Index([])
# Define the heat and power CHP coefficients.
for i in range(len(self.CHP_list)):
self.set_gains = pd.Index([
self.plant_CHP['CHP_name'][i] + '_biogas_volume_inflow_m3_s-1'
]).union(self.set_gains)
self.gain_heat = pd.DataFrame(
0.0,
pd.Index([0]),
pd.Index(range(0, self.set_gains.size))
)
self.gain_power = pd.DataFrame(
0.0,
pd.Index([0]),
pd.Index(range(0, self.set_gains.size))
)
for i in range(0, self.number_CHP):
for j in range(0, self.lhv_table.size):
self.gain_heat[self.lhv_table.size * i + j] = self.plant_CHP['therm_eff'][i] * \
self.lhv_table['LHV value (in J/m^3)'][j] * \
self.plant_feedstock['methane_content'][
self.scenario_name]
self.gain_power[self.lhv_table.size * i + j] = self.plant_CHP['elec_eff'][i] * \
self.lhv_table['LHV value (in J/m^3)'][j] * \
self.plant_feedstock['methane_content'][
self.scenario_name]
self.gain_heat.columns = self.set_gains
self.gain_power.columns = self.set_gains
# Empty control variables (are added in the inherited classes)
self.controls = pd.Index(
[],
name='control_name'
)
# Add the chp controls (every biogas plant has at least one CHP)
for i in range(len(self.CHP_list)):
self.controls = pd.Index([
# CHPs Biogas inflows
self.plant_CHP['CHP_name'][i] + '_biogas_volume_inflow_m3_s-1'
]).union(self.controls)
# State variable for storage (every bg has a storage)
self.states = pd.Index(
# Storage biogas content.
self.plant_scenarios['scenario_name'] + '_storage_content_m3',
name='state_name'
)
# Output variables.
self.outputs = pd.Index(
# Storage biogas content.
self.plant_scenarios['scenario_name']
+ '_storage_content_m3',
name='output_name'
)
self.outputs = pd.Index([
# net active power output
'active_power',
# net reactive power output
'reactive_power',
# net thermal output (heat)
'thermal_power'
]).union(self.outputs)
self.switches = pd.Index([])
for i in range(len(self.CHP_list)):
self.outputs = pd.Index([
# CHPs active power production.
self.plant_CHP['CHP_name'][i] + '_active_power_Wel',
# CHPs reactive power production.
self.plant_CHP['CHP_name'][i] + '_react_power_Var',
# CHPs heat power production.
self.plant_CHP['CHP_name'][i] + '_heat_Wth'
]).union(self.outputs)
self.switches = pd.Index([
# CHP switch to turn on/off
self.plant_CHP['CHP_name'][i] + '_switch',
]).union(self.switches)
# Define timesteps.
if timestep_start is not None:
self.timestep_start = pd.Timestamp(timestep_start)
else:
self.timestep_start = pd.Timestamp(self.plant_scenarios.loc[self.scenario_name, 'time_start'])
if timestep_end is not None:
self.timestep_end = pd.Timestamp(timestep_end)
else:
self.timestep_end = pd.Timestamp(self.plant_scenarios.loc[self.scenario_name, 'time_end'])
if timestep_interval is not None:
self.timestep_interval = pd.Timedelta(timestep_interval)
else:
self.timestep_interval = pd.Timedelta(self.plant_scenarios.loc[self.scenario_name, 'time_step'])
self.timesteps = pd.Index(
pd.date_range(
start=self.timestep_start,
end=self.timestep_end,
freq=self.timestep_interval
),
name='time'
)
# construct default chp schedule
self.chp_schedule = pd.DataFrame(
+1.0,
self.timesteps,
self.switches
)
# Disturbance variables: add constant active and thermal power requirements
self.disturbances = pd.Index([
'active_power_requirement_const_Wel',
'thermal_power_requirement_const_Wth'
], name='disturbance_name')
self.disturbances_data_set = {
'active_power_requirement_const_Wel': float(self.plant_scenarios.loc[self.scenario_name, 'const_power_requirement']),
'thermal_power_requirement_const_Wth': float(self.plant_scenarios.loc[self.scenario_name, 'const_heat_requirement']),
}
def instantiate_state_space_matrices(self):
# Instantiate empty state-space model matrices.
self.state_matrix = pd.DataFrame(
0.0,
self.states,
self.states
)
self.control_matrix = pd.DataFrame(
0.0,
self.states,
self.controls
)
self.disturbance_matrix = pd.DataFrame(
0.0,
self.states,
self.disturbances
)
self.state_output_matrix = pd.DataFrame(
0.0,
self.outputs,
self.states
)
self.control_output_matrix = pd.DataFrame(
0.0,
self.outputs,
self.controls
)
self.disturbance_output_matrix = pd.DataFrame(
0.0,
self.outputs,
self.disturbances
)
def define_state_output_matrix(self):
# Define the state output matrix.
for state in self.states:
for output in self.outputs:
if state == output:
self.state_output_matrix.loc[state, output] = 1
def define_control_output_matrix(self):
# Define the control output matrix.
for control in self.controls:
for output in self.outputs:
if ('active_power_Wel' in output) and (control[0:5] == output[0:5]):
self.control_output_matrix.loc[output, control] \
= self.gain_power[control][0] * self.plant_CHP.loc[control[0:5], 'power_factor']
if ('react_power_Var' in output) and (control[0:5] == output[0:5]):
self.control_output_matrix.loc[output, control] \
= self.gain_power[control][0] * (1 - self.plant_CHP.loc[control[0:5], 'power_factor'])
if ('heat_Wth' in output) and (control[0:5] == output[0:5]):
self.control_output_matrix.loc[output, control] \
= self.gain_heat[control][0]
# add net active/reactive/thermal output
for chp in self.plant_CHP['CHP_name'].to_list():
for control in self.controls:
if control[0:5] == chp:
self.control_output_matrix.loc['active_power', control] \
= self.gain_power[control][0] * self.plant_CHP.loc[control[0:5], 'power_factor']
self.control_output_matrix.loc['reactive_power', control] \
= self.gain_power[control][0] * (1 - self.plant_CHP.loc[control[0:5], 'power_factor'])
self.control_output_matrix.loc['thermal_power', control] \
= self.gain_heat[control][0]
def define_disturbance_timeseries(self):
self.disturbance_timeseries = pd.DataFrame(
0.0,
self.timesteps,
self.disturbances
)
# Reindex, interpolate and construct full disturbance timeseries.
for disturbance in self.disturbances:
self.disturbance_timeseries[disturbance] = self.disturbances_data_set[disturbance]
def define_disturbance_output_matrix(self):
# Add a constant heat and power demand
self.disturbance_output_matrix.loc['active_power', 'active_power_requirement_const_Wel']\
= -1.0
self.disturbance_output_matrix.loc['thermal_power', 'thermal_power_requirement_const_Wth']\
= -1.0
def define_output_constraint_timeseries(self):
# Instantiate constraint timeseries.
self.output_maximum_timeseries = pd.DataFrame(
+1.0 * np.infty,
self.timesteps,
self.outputs
)
self.output_minimum_timeseries = pd.DataFrame(
-1.0 * np.infty,
self.timesteps,
self.outputs
)
# Minimum constraint for active power outputs.
for i in self.CHP_list:
self.output_minimum_timeseries.loc[
:, self.outputs.str.contains(i + '_active_power_Wel')] \
= self.plant_CHP.loc[i, 'elec_min_Wel']
# Maximum constraint for active power outputs.
self.output_maximum_timeseries.loc[
:, self.outputs.str.contains(i + '_active_power_Wel')] \
= self.plant_CHP.loc[i, 'elec_cap_Wel']
# Minimum constraint for storage content.
self.output_minimum_timeseries.loc[
:, self.outputs.str.contains('_storage')
] = self.plant_storage.loc[self.scenario_name, 'SOC_min_m3']
# Maximum constraint for storage content.
self.output_maximum_timeseries.loc[
:, self.outputs.str.contains('_storage')
] = self.plant_storage.loc[self.scenario_name, 'SOC_max_m3']
# Optimization methods
def define_optimization_variables(
self,
optimization_problem: pyo.ConcreteModel,
):
# Define variables.
optimization_problem.state_vector = pyo.Var(self.timesteps, [self.der_name], self.states)
optimization_problem.control_vector = pyo.Var(self.timesteps, [self.der_name], self.controls)
optimization_problem.output_vector = pyo.Var(self.timesteps, [self.der_name], self.outputs)
def define_optimization_constraints(
self,
optimization_problem: pyo.ConcreteModel,
):
# Define shorthand for indexing 't+1'.
# - This implementation assumes that timesteps are always equally spaced.
timestep_interval = self.timesteps[1] - self.timesteps[0]
# Define constraints.
if optimization_problem.find_component('der_model_constraints') is None:
optimization_problem.der_model_constraints = pyo.ConstraintList()
# Initial state.
for state in self.states:
# Set initial state according to the initial state vector.
optimization_problem.der_model_constraints.add(
optimization_problem.state_vector[self.timesteps[0], self.der_name, state]
==
self.state_vector_initial.at[state]
)
for timestep in self.timesteps[:-1]:
# State equation.
for state in self.states:
optimization_problem.der_model_constraints.add(
optimization_problem.state_vector[timestep + timestep_interval, self.der_name, state]
==
sum(
self.state_matrix.at[state, state_other]
* optimization_problem.state_vector[timestep, self.der_name, state_other]
for state_other in self.states
)
+ sum(
self.control_matrix.at[state, control]
* optimization_problem.control_vector[timestep, self.der_name, control]
for control in self.controls
)
+ sum(
self.disturbance_matrix.at[state, disturbance]
* self.disturbance_timeseries.at[timestep, disturbance]
for disturbance in self.disturbances
)
)
for timestep in self.timesteps:
# Output equation.
for output in self.outputs:
optimization_problem.der_model_constraints.add(
optimization_problem.output_vector[timestep, self.der_name, output]
==
sum(
self.state_output_matrix.at[output, state]
* optimization_problem.state_vector[timestep, self.der_name, state]
for state in self.states
)
+ sum(
self.control_output_matrix.at[output, control]
* optimization_problem.control_vector[timestep, self.der_name, control]
for control in self.controls
)
+ sum(
self.disturbance_output_matrix.at[output, disturbance]
* self.disturbance_timeseries.at[timestep, disturbance]
for disturbance in self.disturbances
)
)
# Output limits.
for output in self.outputs:
if self.chp_schedule is not None and 'active_power_Wel' in output:
for chp in self.CHP_list:
if chp in output and any(self.switches.str.contains(chp)):
pass # this is done in the script currently to support MILP
# optimization_problem.der_model_constraints.add(
# optimization_problem.output_vector[timestep, self.der_name, output]
# >=
# self.output_minimum_timeseries.at[timestep, output]
# * self.chp_schedule.loc[timestep, chp+'_switch']
# )
# optimization_problem.der_model_constraints.add(
# optimization_problem.output_vector[timestep, self.der_name, output]
# <=
# self.output_maximum_timeseries.at[timestep, output]
# * self.chp_schedule.loc[timestep, chp+'_switch']
# )
else:
optimization_problem.der_model_constraints.add(
optimization_problem.output_vector[timestep, self.der_name, output]
>=
self.output_minimum_timeseries.at[timestep, output]
)
optimization_problem.der_model_constraints.add(
optimization_problem.output_vector[timestep, self.der_name, output]
<=
self.output_maximum_timeseries.at[timestep, output]
)
# Control limits.
for timestep in self.timesteps:
# Feedstock input limits (maximum daily or hourly feed-in depending on available feedstock).
for control in self.controls:
if self.feedstock_limit_type == 'daily':
if ('mass_flow' in control) and (timestep + dt.timedelta(days=1) - self.timestep_interval <= self.timestep_end):
optimization_problem.der_model_constraints.add(
sum(
self.timestep_interval.seconds *
optimization_problem.control_vector[timestep + i * self.timestep_interval, self.der_name, control]
for i in range(int(dt.timedelta(days=1)/self.timestep_interval))
)
<= self.available_feedstock * 1000/365
)
elif self.feedstock_limit_type == 'hourly':
if ('mass_flow' in control) and (timestep + dt.timedelta(hours=1) - self.timestep_interval <= self.timestep_end):
optimization_problem.der_model_constraints.add(
sum(
self.timestep_interval.seconds *
optimization_problem.control_vector[
timestep + i * self.timestep_interval, self.der_name, control]
for i in range(int(dt.timedelta(hours=1) / self.timestep_interval))
)
<= self.available_feedstock * 1000 / (365*24)
)
# Final SOC storage
soc_end = self.plant_storage.loc[self.scenario_name, 'SOC_end']
if soc_end == 'init':
# Final SOC greater or equal to initial SOC
optimization_problem.der_model_constraints.add(
optimization_problem.output_vector[self.timesteps[-1], self.der_name, self.scenario_name
+ '_storage_content_m3']
== self.state_vector_initial[self.scenario_name + '_storage_content_m3']
)
def define_optimization_objective(
self,
optimization_problem: pyo.ConcreteModel,
price_timeseries: pd.DataFrame
):
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / | pd.Timedelta('1h') | pandas.Timedelta |
import pandas
import os
import ast
def create_CSV_pipeline1(
platename, seriesperwell, path, illum_path, platedict, one_or_many, Channeldict
):
if one_or_many == "one":
print("CSV creation not enabled for Channeldict for one file/well")
return
else:
columns_per_channel = ["PathName_", "FileName_", "Frame_"]
columns = ["Metadata_Plate", "Metadata_Series", "Metadata_Site"]
channels = []
Channeldict = ast.literal_eval(Channeldict)
rounddict = {}
Channelrounds = list(Channeldict.keys())
for eachround in Channelrounds:
templist = []
templist += Channeldict[eachround].values()
channels += list(i[0] for i in templist)
rounddict[eachround] = list(i[0] for i in templist)
df = pandas.DataFrame(columns=columns)
for chan in channels:
listoffiles = []
for round in rounddict.keys():
if chan in rounddict[round]:
for well in platedict.keys():
listoffiles.append(platedict[well][round])
listoffiles = [x for l in listoffiles for x in l]
df["FileName_Orig" + chan] = listoffiles
df["Metadata_Plate"] = [platename] * len(listoffiles)
df["Metadata_Series"] = list(range(seriesperwell)) * len(platedict.keys())
for eachround in Channelrounds:
pathperround = path + eachround + "/"
for chan in channels:
for i in list(Channeldict[eachround].values()):
if chan == i[0]:
df["PathName_Orig" + chan] = pathperround
df["Frame_Orig" + chan] = i[1]
file_out_name = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name, index=False)
# Make .csv for 2_CP_ApplyIllum
df["Metadata_Site"] = df["Metadata_Series"]
well_df_list = []
well_val_df_list = []
for eachwell in platedict.keys():
well_df_list += [eachwell] * seriesperwell
wellval = eachwell.split("Well")[1]
if wellval[0] == "_":
wellval = wellval[1:]
well_val_df_list += [wellval] * seriesperwell
df["Metadata_Well"] = well_df_list
df["Metadata_Well_Value"] = well_val_df_list
for chan in channels:
listoffiles = []
for round in rounddict.keys():
if chan in rounddict[round]:
for well in platedict.keys():
listoffiles.append(platedict[well][round])
listoffiles = [x for l in listoffiles for x in l]
df["PathName_Illum" + chan] = [illum_path] * len(listoffiles)
df["FileName_Illum" + chan] = [platename + "_Illum" + chan + ".npy"] * len(
listoffiles
)
file_out_name_2 = "/tmp/" + str(platename) + ".csv"
df.to_csv(file_out_name_2, index=False)
return file_out_name, file_out_name_2
def create_CSV_pipeline3(platename, seriesperwell, path, well_list, range_skip):
columns = [
"Metadata_Plate",
"Metadata_Site",
"Metadata_Well",
"Metadata_Well_Value",
]
columns_per_channel = ["PathName_", "FileName_"]
channels = ["DNA", "Phalloidin"]
columns += [col + chan for col in columns_per_channel for chan in channels]
df = | pandas.DataFrame(columns=columns) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
#----------------------------------------------------------
l_pct=[0, 20, 50, 80, 100]
n=5
fd_out='./out/a00_random_01_score'
f_in='./out/a00_random_00_mod/data.csv'
#-----------------------------------------------------------
Path(fd_out).mkdir(exist_ok=True, parents=True)
df=pd.read_csv(f_in, index_col=0)
#-----------------------------------------------------------
def get_f1(df, name):
#pp
y_true=df['tumor']
df_tmp=df.drop('tumor', axis=1).copy()
#get score
l_data=[]
for pct in l_pct:
df_pred=(df_tmp>pct).astype('int') #20 means 80% predict tumor
l_score=[f1_score(y_true, df_pred[col]) for col in df_pred.columns]
avg=np.array(l_score).mean()
std=np.array(l_score).std()
l_data.append((f'{name}-{pct}', avg, std))
df_tmp=pd.DataFrame(l_data, columns=['mod', 'avg', 'std'])
return df_tmp
def get_accu(df, name):
#pp
y_true=df['tumor']
df_tmp=df.drop('tumor', axis=1).copy()
#get score
l_data=[]
for pct in l_pct:
df_pred=(df_tmp>pct).astype('int') #20 means 80% predict tumor
l_score=[accuracy_score(y_true, df_pred[col]) for col in df_pred.columns]
avg=np.array(l_score).mean()
std=np.array(l_score).std()
l_data.append((f'{name}-{pct}', avg, std))
df_tmp= | pd.DataFrame(l_data, columns=['mod', 'avg', 'std']) | pandas.DataFrame |
import psychopy.core
import psychopy.event
import psychopy.visual
import pandas as pd
import numpy as np
import psychopy.gui
import psychopy.sound
from SBDM_Data import SBDM_Data
from Block import Block
class Game:
def __init__(self, params, data_frame, no_of_blocks, break_interval, win):
self.data_frame = data_frame
self.no_of_blocks = no_of_blocks
self.break_interval = break_interval
self.win = win
self.params = params
self.textmsg = self.params['break_text']
self.sbdm = SBDM_Data(self.data_frame)
self.stim_list = self.sbdm.create_stim_list()
def run_game(self):
list_of_results = [] # will contain DataFrames
for block_idx in range(self.no_of_blocks):
block = Block(self.stim_list, self.data_frame, self.params)
list_of_results[block] = block.run_block()
if np.mod(block_idx / self.break_interval) == 0:
# load the break instructions message
message = psychopy.visual.TextStim(self.win,
text=self.textmsg) # opens a break instructions picture from Images folder
# draw the break instructions image
message.draw()
self.win.flip()
psychopy.event.waitKeys(keyList=['space'])
curr_final_results = | pd.DataFrame(list_of_results[0]) | pandas.DataFrame |
# 101803503 <NAME>
import pandas as pd
from os import path
import sys
import math
def validate_input_file(data_file):
if not (path.exists(data_file)):
print(" 🛑 File doesn't exist")
exit(0)
if not data_file.endswith('.csv'):
print("🛑 CSV is the only supported format")
exit(0)
try:
input_file = pd.read_csv(data_file)
except Exception:
print( "🛑 Error Opening File" )
exit(0)
col = input_file.shape
if not col[1] >= 3:
print(f"🛑 {data_file} should have 3 columns ")
exit(0)
k = 0
for i in input_file.columns:
k = k + 1
for j in input_file.index:
if k != 1:
val = isinstance(input_file[i][j], int)
val1 = isinstance(input_file[i][j], float)
if not val and not val1:
print(f'Value is not numeric in {k} column')
exit(0)
return 1
def validate_result_file(data_file):
if not data_file.endswith('.csv'):
print("🛑 CSV is the only supported format for result files")
exit(0)
return 1
def validate_weights(data_file, weights_str):
input_file = pd.read_csv(data_file)
col = input_file.shape
weight = []
split_weights_str = weights_str.split(',')
for split_weights_str_obj in split_weights_str :
split_weights_str_obj_ = 0
for split_weights_str_obj_char in split_weights_str_obj:
if not split_weights_str_obj_char.isnumeric():
if split_weights_str_obj_ >= 1 or split_weights_str_obj_char != '.':
print("🛑 Weights not in Corrent Format")
exit(0)
else:
split_weights_str_obj_ = split_weights_str_obj_ + 1
weight.append(float(split_weights_str_obj))
if len(weight) != (col[1] - 1):
print(f"🛑 No. of Weights should be same as no. of columns in {data_file}")
exit(0)
return weight
def validate_impacts(data_file, impact_str):
input_file = pd.read_csv(data_file)
col = input_file.shape
impact = impact_str.split(',')
for i in impact:
if i not in {'+', '-'}:
print(f"🛑 Only \" + \" or \" - \" are allowed not {i}")
exit(0)
if len(impact) != (col[1] - 1):
print(f"🛑 Columns in {data_file} and Impacts shouls be Equal in No.")
exit(0)
return impact
def input_matrix_normalized(data_file):
data_frame = | pd.read_csv(data_file) | pandas.read_csv |
import pandas as pd
import os
# where to save or read
CSV_DIR = 'OECD_csv_datasets'
PROCESSED_DIR = 'OECD_csv_processed'
# datafile = 'OECD_csv_processed/industry_candidates.csv'
if not os.path.exists(PROCESSED_DIR):
os.makedirs(PROCESSED_DIR)
# STAGE 3:
def standardize_data(dset_id, df):
# standardized column names
stdcol_dict = {'Time Period': 'YEAR', 'Observation': 'series', 'Industry': 'INDUSTRY', 'Measure': 'MEASURE',
'Country': 'NATION'}
cols = df.columns.values.tolist()
print(dset_id, cols)
# for test
# original_df = df
# first deal with any potential tuple columns
# e.g. 'Country - distribution'
tuple_col = 'Country - distribution'
if tuple_col in cols:
split_list = tuple_col.split(' - ')
new_col_list = [split_list[0], split_list[1]]
for n, col in enumerate(new_col_list):
df[col] = df[tuple_col].apply(lambda x: x.split('-')[n])
df = df.drop(tuple_col, axis=1)
# rename common occurrence column names
# 'Time Period' to 'YEAR', 'Observation' to 'series'
# 'Industry' to 'INDUSTRY', 'Country' to 'NATION'
df.rename(stdcol_dict, axis='columns', inplace=True)
cols = df.columns.values.tolist()
# Industry 'other' rename
industry_renames = ['Activity', 'ISIC3', 'Sector']
if any(k in industry_renames for k in cols):
no = list(set(industry_renames) & set(cols))
df.rename(columns={no[0]: 'INDUSTRY'}, inplace=True)
cols = df.columns.values.tolist()
# Country 'other' rename - has do be done in order
# 'Country - distribution' is a special case already dealt with above
country_renames = ['Declaring country', 'Partner country', 'Reporting country']
for cname in country_renames:
if cname in cols:
df.rename({cname: 'NATION'}, axis='columns', inplace=True)
break
cols = df.columns.values.tolist()
print(dset_id, cols)
# now find columns that are not YEAR, series, INDUSTRY, MEASURE or NATION
stdcols_list = []
nonstdcols_list = []
measurecol = False
for k in stdcol_dict:
stdcols_list.append(stdcol_dict[k])
for cname in cols:
if cname not in stdcols_list:
nonstdcols_list.append(cname)
elif not measurecol and cname == 'MEASURE':
measurecol = True
if nonstdcols_list:
if measurecol:
df = df.rename(columns={'MEASURE': 'temp'})
nonstdcols_list.append('temp')
df['MEASURE'] = df[nonstdcols_list].apply(lambda x: ','.join(x), axis=1)
df.drop(nonstdcols_list, axis=1, inplace=True)
cols = df.columns.values.tolist()
print(dset_id, nonstdcols_list, measurecol)
print(dset_id, cols)
df.set_index('YEAR', inplace=True)
df.to_csv(os.path.join(PROCESSED_DIR, dset_id + '_C.csv'))
# STAGE 1: OECD data set CSV analysis for data sets covering industries
# criteria
criteria = ['Industry', 'Activity', 'ISIC3', 'Sector']
candidates = []
column_name = []
# iterate through each CSV file in the directory and analyse it
for filename in os.listdir(CSV_DIR):
if filename.endswith(".csv"):
dsetid = os.path.splitext(filename)[0]
fromfile = os.path.join(CSV_DIR, filename)
oecd_dataset_df = pd.read_csv(fromfile)
oecd_cols = oecd_dataset_df.columns.values.tolist()
if any(k in criteria for k in oecd_cols):
intersection = list(set(criteria) & set(oecd_cols))
candidates.append(dsetid)
occurrence = next((x for x in intersection if x == criteria[0]), None)
if occurrence is None:
column_name.append(intersection[0])
else:
column_name.append(occurrence)
print(dsetid, intersection, occurrence)
# create candidate DataFrame
candidates_df = pd.DataFrame({'KeyFamilyId': candidates, 'ColumnName': column_name})
# diagnostic info
print(len(candidates), 'industry candidates found')
# STAGE 2 : analysis of OECD industry related data set for specific industry criteria
# criteria
industryTypeKey = 'ELECTRICITY'
hasTarget = []
# find which have data on target industry type
for row in candidates_df.iterrows():
datasetId = row[1]['KeyFamilyId']
colName = row[1]['ColumnName']
dataset_df = pd.read_csv(os.path.join(CSV_DIR, datasetId + '.csv'))
print('checking', datasetId)
try:
filtered_df = dataset_df[dataset_df[colName].str.startswith(industryTypeKey)]
except ValueError:
# all NaNs in target column, nothing to see here - move on
pass
else:
if len(filtered_df.index):
# non-empty DataFrame
hasTarget.append(datasetId)
# call stage 3
standardize_data(datasetId, filtered_df)
# diagnostic info
print(len(hasTarget), 'beginning with', industryTypeKey)
print(hasTarget)
# target data frame
def_cols = ['YEAR', 'series', 'INDUSTRY', 'NATION', 'MEASURE']
combined_df = pd.DataFrame(columns=def_cols)
# STAGE 4. Iterate through each CSV file in the directory and concatenate it
for filename in os.listdir(PROCESSED_DIR):
if filename.endswith("_C.csv"):
fname = os.path.splitext(filename)[0]
fromfile = os.path.join(PROCESSED_DIR, filename)
print(fname)
source_df = pd.read_csv(fromfile)
list_of_series = [source_df[def_cols[0]], source_df[def_cols[1]], source_df[def_cols[2]],
source_df[def_cols[3]], source_df[def_cols[4]]]
stripped_df = | pd.concat(list_of_series, axis=1) | pandas.concat |
import os, sys
import collections
import pprint
import pandas as pd
import pysam
class Call:
def __init__(self, call, quality = None, is_error = False):
self.call = call
self.quality = quality
self.is_error = is_error
self.is_indel = len(call) > 1
def get_call_for_pileup_read(pileup_read, ref = None):
if pileup_read.alignment.mapping_quality < 20:
return Call('_MAPQ', is_error = True)
elif pileup_read.alignment.is_secondary or pileup_read.alignment.is_supplementary or pileup_read.alignment.is_qcfail:
return Call('_FLAG', is_error = True)
elif pileup_read.alignment.is_duplicate:
return Call('_DUPE', is_error = True)
elif pileup_read.indel > 0:
quals = pileup_read.alignment.query_qualities[pileup_read.query_position:pileup_read.query_position+pileup_read.indel+1]
return Call(
pileup_read.alignment.query_sequence[pileup_read.query_position:pileup_read.query_position+pileup_read.indel+1],
1.0 * sum(quals) / len(quals)
)
elif pileup_read.indel < 0:
#print(ref, pileup_read.indel, len(ref), ref[0:-abs(pileup_read.indel)])
#if abs(pileup_read.indel) < len(ref):
# return ref[0:-abs(pileup_read.indel)]
#else:
# return '_DEL'
#hacky way to handle deletions...
return Call(
'%s-%d' % (pileup_read.alignment.query_sequence[pileup_read.query_position], abs(pileup_read.indel)),
sum(pileup_read.alignment.query_qualities[pileup_read.query_position:pileup_read.query_position+2]) / 2.0
)
elif pileup_read.is_del:
return Call('_DEL', is_error = True)
elif pileup_read.is_refskip:
return Call('_SKIP', is_error = True)
else:
return Call(pileup_read.alignment.query_sequence[pileup_read.query_position], pileup_read.alignment.query_qualities[pileup_read.query_position])
def get_read_calls(samfile, chrom, pos1, ref = None, max_depth = 1e7, calculate_stats = False):
read_calls = {}
read_stats = {}
for pileup_column in samfile.pileup(chrom, pos1-1, pos1, max_depth = max_depth, stepper = 'nofilter', truncate=True):
print(chrom, pos1, '->', pileup_column.reference_name, pileup_column.reference_pos, ' - found ', pileup_column.nsegments, 'alignments')
for pileup_read in pileup_column.pileups:
#ignore secondary and supplementary alignments
if pileup_read.alignment.is_secondary:
continue
if pileup_read.alignment.is_supplementary:
continue
assert not pileup_read.alignment.query_name in read_calls, 'encountered multiple alignments for single read?'
read_calls[pileup_read.alignment.query_name] = get_call_for_pileup_read(pileup_read, ref)
if calculate_stats:
read_stats[pileup_read.alignment.query_name] = {
'length': pileup_read.alignment.infer_query_length(),
'mismatches': pileup_read.alignment.get_tag('NM'),
'mapping_quality': pileup_read.alignment.mapping_quality,
'mean_baseq': 1.0 * sum(pileup_read.alignment.query_qualities) / len(pileup_read.alignment.query_qualities)
}
if calculate_stats:
return read_calls, read_stats
else:
return read_calls
"Get counts of how often each call was observed at each SNP"
def get_call_counts(samfile, snps):
snp_call_counts = {}
for snp in snps.itertuples():
snp_call_counts[snp.name] = collections.Counter()
for pileup_column in samfile.pileup(snp.CHROM, snp.POS-1, snp.POS, max_depth = 1e4, stepper = 'nofilter', truncate=True):
for pileup_read in pileup_column.pileups:
call = get_call_for_pileup_read(pileup_read) #, snp.REF
snp_call_counts[snp.name][call.call] += 1
return snp_call_counts
def get_allele_type(allele, snp):
if allele in snp.paternal and allele in snp.maternal:
return 'shared'
elif allele in snp.maternal:
return 'maternal'
elif allele in snp.paternal:
return 'paternal'
else:
return None
def get_mutation_allele_type(allele, mutation):
if allele == mutation['REF_processed']:
return 'wild-type'
elif allele == mutation['ALT_processed']:
return 'mutation'
else:
return None
def process_family(fam, fam_rows, bam_mask, snps_mask, subsample = None):
print()
print(fam, 'STARTING')
assert 'proband' in fam_rows['relationship'].values, 'need at least one proband'
snps_fn = snps_mask % fam
if not os.path.isfile(snps_fn):
raise Exception('%s: %s missing!' % (fam, snps_fn))
snps = pd.read_csv(snps_fn, sep='\t', dtype={'#CHROM': str, 'POS': int, 'ID': str, 'REF': str, 'ALT': str})
snps.rename(columns = { '#CHROM': 'CHROM' }, inplace=True)
snps.loc[~snps['CHROM'].str.startswith('chr'), 'CHROM'] = ['chr' + c for c in snps.loc[~snps['CHROM'].str.startswith('chr'), 'CHROM']]
snps['name'] = ['%s_%d' % (snp.CHROM, snp.POS) for snp in snps.itertuples()]
#the "calls" we actually get back from the pileup are just a single base, so if we have a deletion
#such as GA>G, what we actually see if a G with indel == 0 or indel == 1.
#thus, we need to adjust the REF/ALT we actually expect to see
#this is stupid, what we should really do is to process the pileups in a smarter way...
snps['REF_processed'] = [snp.REF[0] if len(snp.REF) > 1 else snp.REF for snp in snps.itertuples()]
snps['ALT_processed'] = ['%s-%d' % (snp.REF[0], len(snp.REF) - len(snp.ALT)) if len(snp.REF) > 1 else snp.ALT for snp in snps.itertuples()]
print(snps)
mutation = snps[snps.ID == 'mutation']
assert len(mutation) == 1, 'only one mutation allowed'
mutation = mutation.iloc[0]
background_snps_list = []
for offset in [10, 50, 100]:
for sign in [1, -1]:
background_snps_list.append(
pd.DataFrame([
{
'CHROM': snp.CHROM,
'POS': snp.POS + sign * offset,
'name': '{}_{}'.format(snp.name, sign * offset)
} for snp in snps.itertuples()
])
)
background_snps = pd.concat(background_snps_list)
background_snps = background_snps[background_snps.POS > 0]
background_snps.drop_duplicates(inplace = True)
background_snps.set_index(['name'], inplace = True)
print(fam, 'Using', len(background_snps), 'background SNPs:')
print(background_snps)
#get allele counts for all SNPs
snp_sample_counts = collections.defaultdict(dict)
for sample in fam_rows.itertuples():
fn = bam_mask % sample.BC
print(fam, 'Getting allele counts for', sample.BC)
with pysam.AlignmentFile(fn, "rb") as samfile:
snp_sample_counts[sample.BC] = get_call_counts(samfile, snps)
print(fam, 'Checked', len(snp_sample_counts), 'SNPs.')
#make a dataframe, the stupid way
rows = []
for sample, x in snp_sample_counts.items():
for snp_name, xx in x.items():
for snp_allele, count in xx.items():
rows.append({
'count': count,
'snp_name': snp_name,
'snp_allele': snp_allele,
'sample': sample
})
all_allele_counts = pd.DataFrame(rows).set_index(['sample', 'snp_name', 'snp_allele'])
print(all_allele_counts.head())
if 'mother' in fam_rows['relationship'].values and 'father' in fam_rows['relationship'].values:
assert (fam_rows['relationship'] == 'proband').sum() == 1, 'only one proband sample allowed'
print('Found parents, finding informative SNPs...')
informative_snp_dicts = []
for snp in snps.itertuples():
#we don't want the mutation
if snp.ID == 'mutation':
continue
gt_calls = {}
for sample in fam_rows.itertuples():
relationship = sample.relationship
sample_counts = snp_sample_counts[sample.BC]
snp_counts = sample_counts[snp.name]
#only consider REF + ALT here, ignore the rest
total_counts = snp_counts[snp.REF_processed] + snp_counts[snp.ALT_processed]
if snp_counts[snp.REF_processed] > 0.25 * total_counts and snp_counts[snp.ALT_processed] > 0.25 * total_counts:
gt_calls[relationship] = 'het'
elif snp_counts[snp.REF_processed] > 0.6 * total_counts:
gt_calls[relationship] = snp.REF_processed
elif snp_counts[snp.ALT_processed] > 0.6 * total_counts:
gt_calls[relationship] = snp.ALT_processed
else:
gt_calls[relationship] = None
print(snp.name, relationship, snp_counts[snp.REF_processed], snp_counts[snp.ALT_processed], total_counts, gt_calls[relationship])
print(snp.name, gt_calls)
if 'mother' in gt_calls and 'father' in gt_calls and 'proband' in gt_calls \
and gt_calls['proband'] == 'het' \
and gt_calls['mother'] != gt_calls['father'] \
and gt_calls['mother'] is not None \
and gt_calls['father'] is not None:
snp_dict = snp._asdict()
snp_dict['maternal'] = [gt_calls['mother']] if gt_calls['mother'] != 'het' else [snp.REF_processed, snp.ALT_processed]
snp_dict['paternal'] = [gt_calls['father']] if gt_calls['father'] != 'het' else [snp.REF_processed, snp.ALT_processed]
informative_snp_dicts.append(snp_dict)
informative_snps = pd.DataFrame(informative_snp_dicts)
if len(informative_snps) == 0:
print(fam, 'No informative SNPs')
return None, None, None
informative_snps = informative_snps[list(snps.columns) + ['maternal', 'paternal']] #remove Index, ensure right order
else:
print(fam, 'Assuming that all non-mutation SNPs are informative')
informative_snps = snps.loc[snps.ID != 'mutation'].copy()
informative_snps['maternal'] = [[] for _ in range(len(informative_snps))]
informative_snps['paternal'] = [[] for _ in range(len(informative_snps))]
print(fam, 'Found', len(informative_snps), 'informative SNPs:')
print(informative_snps)
sample_informative_allele_counts = {}
sample_mutation_read_stats = {}
sample_read_mutation_calls = {}
sample_read_snp_calls = {}
sample_read_background_calls = {}
phase_evidence = []
for sample in fam_rows.itertuples(): #include all -- [rows['relationship'] == 'proband']
fn = bam_mask % sample.BC
print(fn)
#get calls for each read at the mutation and each informative snp
informative_calls = {}
sample_read_background_calls_rows = []
with pysam.AlignmentFile(fn, "rb") as samfile:
mutation_calls, mutation_read_stats = get_read_calls(samfile, mutation['CHROM'], mutation['POS'], calculate_stats = True)
#get calls for background snps (should all be ref and high q)
for snp in background_snps.itertuples():
for read, background_call in get_read_calls(samfile, snp.CHROM, snp.POS).items():
sample_read_background_calls_rows.append({
'read': read,
'snp_name': snp.Index, #name = Index
'snp_call': background_call.call,
'snp_call_quality': background_call.quality,
})
#get calls for actual informative snps
for snp in informative_snps.itertuples():
informative_calls[snp.name] = get_read_calls(samfile, snp.CHROM, snp.POS)
sample_read_background_calls[sample.BC] = pd.DataFrame(sample_read_background_calls_rows) \
.set_index(['read']) \
.join(background_snps, on = 'snp_name')
#should we subsample reads? just subsample on mutation calls, we will simply ignore the reads for which we don't have a mutation call
if subsample is not None:
n_subsample = ceil(subsample * len(mutation_calls))
print('subsampling', len(mutation_calls), 'mutation_calls to', n_subsample, '(', subsample, ')')
mutation_calls = dict(random.sample(mutation_calls.items(), k = n_subsample))
mutation_read_stats = dict(random.sample(mutation_read_stats.items(), k = n_subsample))
assert len(mutation_calls) == n_subsample
#process mutation read stats
sample_mutation_read_stats[sample.BC] = pd.DataFrame(list(mutation_read_stats.values()))
#summarize basecounts for mutation and each SNP
sample_read_mutation_calls_rows = []
site_basecounts = collections.Counter()
for read, mutation_base in mutation_calls.items():
simplified_call = mutation_base.call
if mutation_base.is_error:
simplified_call = '_FILTERED'
elif mutation_base.is_indel and not mutation_base.call in [mutation['REF_processed'], mutation['ALT_processed']]:
simplified_call = '_OTHER_INDEL'
site_basecounts[(mutation['name'],simplified_call)] += 1
sample_read_mutation_calls_rows.append({
'read': read,
'mutation_call': mutation_base.call,
'mutation_call_type': get_mutation_allele_type(mutation_base.call, mutation = mutation),
'mutation_call_simplified': simplified_call,
'mutation_call_quality': mutation_base.quality,
})
sample_read_mutation_calls[sample.BC] = pd.DataFrame(sample_read_mutation_calls_rows) \
.set_index(['read'])
for snp in informative_snps.itertuples():
snp_calls = informative_calls[snp.name]
for snp_base in snp_calls.values():
simplified_call = snp_base.call
if snp_base.is_error:
simplified_call = '_FILTERED'
elif snp_base.is_indel and not snp_base.call in [snp.REF_processed, snp.ALT_processed]:
simplified_call = '_OTHER_INDEL'
site_basecounts[(snp.name,simplified_call)] += 1
sample_informative_allele_counts[sample.BC] = pd.DataFrame(
[{'count': x, 'snp_name': name, 'snp_allele': allele} for ((name, allele), x) in site_basecounts.items()],
).set_index(['snp_name', 'snp_allele'])
#count haplotypes for each informative SNP
sample_read_snp_calls_rows = []
mutation_allele_counts = {}
for snp in informative_snps.itertuples():
snp_calls = informative_calls[snp.name]
mutation_allele_counts[snp.name] = collections.defaultdict(collections.Counter)
for read, mutation_base in mutation_calls.items():
simplified_mutatation_call = mutation_base.call
#only allow REF/ALT for mutation
if not mutation_base.call in [mutation['REF_processed'], mutation['ALT_processed']]:
simplified_mutatation_call = '_OTHER'
#did we cover the SNP? if so, get the call
simplified_snp_call = '_NONE'
if read in snp_calls:
snp_base = snp_calls[read]
#only allow REF/ALT for SNP
simplified_snp_call = snp_base.call
if not snp_base.call in [snp.REF_processed, snp.ALT_processed]:
simplified_snp_call = '_OTHER'
#note each call separately for later testing
sample_read_snp_calls_rows.append({
'read': read,
'snp_name': snp.name,
'snp_call': snp_base.call,
'snp_call_type': get_allele_type(snp_base.call, snp = snp),
'snp_call_quality': snp_base.quality,
})
#also build up a dict of dicts with counts for each individual snp for simple heatmaps
mutation_allele_counts[snp.name][simplified_mutatation_call][simplified_snp_call] += 1
sample_read_snp_calls[sample.BC] = pd.DataFrame(sample_read_snp_calls_rows) \
.set_index(['read', 'snp_name'])
#post-process counts into dataframe
for snp in informative_snps.itertuples():
pair_counts = mutation_allele_counts[snp.name]
df = pd.DataFrame.from_dict(pair_counts)
df.reset_index(inplace = True)
df.fillna(0, inplace=True)
df.rename(columns={'index': 'snp_allele'}, inplace=True)
df = df.melt(id_vars=['snp_allele'], var_name='mutation_allele', value_name='count').astype({'count': int})
#add annotation about paternal/maternal allele
df['snp_allele_type'] = df['snp_allele'].apply(get_allele_type, snp = snp)
#add annotation about mutation/wild-type
df['mutation_allele_type'] = df['mutation_allele'].apply(get_mutation_allele_type, mutation = mutation)
#add general info
df['sample'] = sample.BC
df['snp_name'] = snp.name
df = df[['sample', 'snp_name', 'snp_allele', 'mutation_allele', 'count', 'snp_allele_type', 'mutation_allele_type']] #, 'fraction'
df.set_index(['sample', 'snp_name', 'snp_allele', 'mutation_allele'], inplace=True)
phase_evidence.append(df)
print(fam, 'processed sample:', sample)
print(fam, 'DONE')
return (
informative_snps,
all_allele_counts,
pd.concat(sample_mutation_read_stats, names=['sample']) if len(sample_mutation_read_stats) > 0 else None,
pd.concat(sample_informative_allele_counts, names=['sample']) if len(sample_informative_allele_counts) > 0 else None,
pd.concat(sample_read_background_calls, names=['sample']) if len(sample_read_background_calls_rows) > 0 else None,
pd.concat(sample_read_mutation_calls, names=['sample']) if len(sample_read_mutation_calls_rows) > 0 else None,
pd.concat(sample_read_snp_calls, names=['sample']) if len(sample_read_snp_calls_rows) > 0 else None,
pd.concat(phase_evidence) if len(phase_evidence) > 0 else None,
)
def write_csv(d, fn):
d.to_csv(fn)
print('Wrote', len(d), 'rows:')
print(fn)
def main():
import argparse
parser = argparse.ArgumentParser(
description = "ddOWL mutatation allele phasing and plotting tools, v0.1 - <NAME>",
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
#specify parameters
parser.add_argument("-v", "--version", help="print version and exit", action="store_true")
parser.add_argument("--first", help="only first family", action="store_true")
parser.add_argument("--debug", help="enable debug output", action="store_true")
parser.add_argument("--family", help="specifiv family to run")
parser.add_argument("FAMILIES", help="CSV file with family info")
parser.add_argument("SNPS", help="mask for SNP TSV filenames (%s replaced by family ID)")
parser.add_argument("BAMS", help="mask for BAM filenames (%s replaced by sample ID)")
args = parser.parse_args()
families = pd.read_csv(args.FAMILIES)
assert len(families) > 0, 'no families'
print(families.head())
if args.family:
print('Only analysing family {}'.format(args.family))
families = families[families['FamilyID'] == args.family]
assert len(families) > 0, 'no families after filtering'
print(families.head())
fn_prefix = '{}.{}'.format(args.FAMILIES, args.family)
else:
fn_prefix = args.FAMILIES
if args.debug:
fn_prefix = '{}.DEBUG'.format(fn_prefix)
if args.first:
fn_prefix = '{}.FIRST'.format(fn_prefix)
informative_snp_dict = {}
read_background_calls_dict = {}
read_mutation_calls_dict = {}
read_snp_calls_dict = {}
phase_evidence_dict = {}
read_stat_dict = {}
allele_count_dict = {}
informative_allele_count_dict = {}
for family, group in families.groupby('FamilyID'):
try:
(
fam_informative_snps,
fam_allele_counts,
fam_read_stats,
fam_informative_allele_counts,
fam_read_background_calls,
fam_read_mutation_calls,
fam_read_snp_calls,
fam_phase_evidence
) = process_family(
family,
group,
bam_mask = args.BAMS,
snps_mask = args.SNPS,
)
if fam_informative_snps is not None:
informative_snp_dict[family] = fam_informative_snps
if fam_allele_counts is not None:
allele_count_dict[family] = fam_allele_counts
if fam_read_stats is not None:
read_stat_dict[family] = fam_read_stats
if fam_informative_allele_counts is not None:
informative_allele_count_dict[family] = fam_informative_allele_counts
if fam_read_background_calls is not None:
read_background_calls_dict[family] = fam_read_background_calls
if fam_read_mutation_calls is not None:
read_mutation_calls_dict[family] = fam_read_mutation_calls
if fam_read_snp_calls is not None:
read_snp_calls_dict[family] = fam_read_snp_calls
if fam_phase_evidence is not None:
phase_evidence_dict[family] = fam_phase_evidence
except Exception as e:
#print(e)
raise
if args.first:
print('--first is set: Stopping after first family!')
break
informative_snps = | pd.concat(informative_snp_dict, names=['FamilyID']) | pandas.concat |
import os
import sys
import time
import argparse
import unicodedata
import librosa
import numpy as np
import pandas as pd
from tqdm import tqdm
from hparams import hparams
def run_prepare(args, hparams):
def normalize_wave(wave, sample_rate):
"""normalize wave format"""
wave = librosa.resample(wave, sample_rate, hparams.sample_rate)
return wave
def normalize_text(text):
"""normalize text format"""
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn')
return text.strip()
if args.dataset == 'BIAOBEI':
dataset_name = 'BZNSYP'
dataset_path = os.path.join('./', dataset_name)
if not os.path.isdir(dataset_path):
print("BIAOBEI dataset folder doesn't exist")
sys.exit(0)
total_duration = 0
text_file_path = os.path.join(dataset_path, 'ProsodyLabeling', '000001-010000.txt')
try:
text_file = open(text_file_path, 'r', encoding='utf8')
except FileNotFoundError:
print('text file no exist')
sys.exit(0)
data_array = np.zeros(shape=(1, 3), dtype=str)
for index, each in tqdm(enumerate(text_file.readlines())):
if index % 2 == 0:
list = []
basename = each.strip().split()[0]
raw_text = each.strip().split()[1]
list.append(basename)
list.append(raw_text)
else:
pinyin_text = normalize_text(each)
list.append(pinyin_text)
data_array = np.append(data_array, np.array(list).reshape(1, 3), axis=0)
wave_file_path = os.path.join(dataset_path, 'Wave', '{}.wav'.format(basename))
if not os.path.exists(wave_file_path):
# print('wave file no exist')
continue
try:
wave, sr = librosa.load(wave_file_path, sr=None)
except EOFError:
print('wave format error at {}'.format(basename+'.wav'))
continue
if not sr == hparams.sample_rate:
wave = normalize_wave(wave, sr)
duration = librosa.get_duration(wave)
total_duration += duration
librosa.output.write_wav(wave_file_path, wave, hparams.sample_rate)
data_frame = pd.DataFrame(data_array[1:])
data_frame.to_csv(os.path.join(dataset_path, 'metadata.csv'), sep='|', header=False, index=False, encoding='utf8')
text_file.close()
print("total audio duration: %ss" % (time.strftime('%H:%M:%S', time.gmtime(total_duration))))
elif args.dataset == 'THCHS-30':
dataset_name = 'data_thchs30'
dataset_path = os.path.join('./', dataset_name)
if not os.path.isdir(dataset_path):
print("{} dataset folder doesn't exist".format(args.dataset))
sys.exit(0)
total_duration = 0
raw_dataset_path = os.path.join(dataset_path, 'wavs')
data_array = np.zeros(shape=(1, 3), dtype=str)
for root, dirs, files in os.walk(raw_dataset_path):
for file in tqdm(files):
if not file.endswith('.wav.trn'):
continue
list = []
basename = file[:-8]
list.append(basename)
text_file = os.path.join(raw_dataset_path, file)
if not os.path.exists(text_file):
print('text file {} no exist'.format(file))
continue
with open(text_file, 'r', encoding='utf8') as f:
lines = f.readlines()
raw_text = lines[0].rstrip('\n')
pinyin_text = lines[1].rstrip('\n')
pinyin_text = normalize_text(pinyin_text)
list.append(raw_text)
list.append(pinyin_text)
wave_file = os.path.join(raw_dataset_path, '{}.wav'.format(basename))
if not os.path.exists(wave_file):
print('wave file {}.wav no exist'.format(basename))
continue
try:
wave, sr = librosa.load(wave_file, sr=None)
except EOFError:
print('wave file {}.wav format error'.format(basename))
continue
if not sr == hparams.sample_rate:
print('sample rate of wave file {}.wav no match'.format(basename))
wave = librosa.resample(wave, sr, hparams.sample_rate)
duration = librosa.get_duration(wave)
if duration < 10:
total_duration += duration
librosa.output.write_wav(wave_file, wave, hparams.sample_rate)
data_array = np.append(data_array, np.array(list).reshape(1, 3), axis=0)
data_frame = | pd.DataFrame(data_array[1:]) | pandas.DataFrame |
# Imports
from bs4 import BeautifulSoup
import pandas as pd
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
# Definindo a url
url = 'https://www.fundamentus.com.br/resultado.php'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (HTML, like Gecko) '
'Chrome/76.0.3809.100 Safari/537.36'}
# Realizando o request:
try:
request = Request(url, headers=headers)
response = urlopen(request)
print("Request realizado!")
print(response.getcode())
html = response.read()
# Tratando possíveis erros:
except HTTPError as e:
print('HTTPError\n\n')
print(response.getcode())
print(e.reason)
except URLError as e:
print('URLError\n\n')
print(response.getcode())
print(e.reason)
# Instanciando um objeto BeautifulSoup:
soup = BeautifulSoup(html, 'html.parser')
# Pegando os nomes das colunas da tabela
colunas_names = [col.getText() for col in soup.find('table', {'id': 'resultado'}).find('thead').findAll('th')]
colunas = {i: col.getText() for i, col in enumerate(soup.find('table', {'id': 'resultado'}).find('thead').findAll('th'))}
# Criando um DataFrame com os nomes das colunas
dados = | pd.DataFrame(columns=colunas_names) | pandas.DataFrame |
import time
import re
import math
import matplotlib
matplotlib.use('Agg')
from matplotlib.ticker import FormatStrFormatter, PercentFormatter
from unidecode import unidecode
from db import get_db_config
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def get_player_data(db, ids, min_battles=100):
sql = "SELECT accountid as player_id, CAST(wins AS float) / CAST(battlescount AS float) as player_winrate " \
"FROM wot.player " \
"WHERE accountid in %s and battlescount > %s" % (
str(tuple(ids.values)), min_battles) # that's a hack to get the proper string
data = pd.read_sql(sql, con=db, index_col='player_id')
return data
def get_tank_winrates(db, vehicle_id, min_battles=100):
sql = "SELECT player_id, CAST(wins AS float) / CAST(battles AS float) as tank_winrate, battles " \
"FROM wot.player_vehicle " \
"WHERE vehicle_id = %s and battles > %s" % (vehicle_id, str(min_battles))
data = | pd.read_sql(sql, con=db, index_col='player_id') | pandas.read_sql |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = | tm.makeDateIndex(4) | pandas.util.testing.makeDateIndex |
import dominate
from dominate.tags import *
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import json
import glob
import datetime
import numpy as np
import nibabel
import itertools
import os
from nilearn import plotting
FORMATRECS=5
XLABELROT=30
def create_document(title, stylesheet=None, script=None):
doc = dominate.document(title = title)
if stylesheet is not None:
with doc.head:
link(rel='stylesheet',href=stylesheet)
if script is not None:
with doc.head:
script(type='text/javascript',src=script)
with doc:
with div(id='header'):
h1(title)
p('Report generated on {}'.format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%dT%H:%M:%S.%f')) )
return doc
def create_section(doc, divid, divclass, captiontext):
with doc:
if divclass is None:
d = div(id=divid)
else:
d = div(id=divid, cls=divclass)
with d:
h2(captiontext)
return doc
def create_table(doc, divid, divclass, tabid, tabclass, headers, captiontext, reportlist):
with doc:
if divclass is None:
d = div(id=divid)
else:
d = div(id=divid, cls=divclass)
with d:
h3(captiontext)
if tabclass is None:
t = table(id = tabid)
else:
t = table(id = tabid, cls = tabclass)
with t:
with thead():
with tr():
for header in headers:
th(header)
with tbody():
for listitem in reportlist:
with tr():
for itemvalue in listitem:
td(itemvalue)
return doc
def create_float_table(tabid, tabclass, headers, reportlist):
if tabclass is None:
t = table(id = tabid)
else:
t = table(id = tabid, cls = tabclass)
with t:
with thead():
with tr():
for header in headers:
th(header)
with tbody():
for listitem in reportlist:
with tr():
for itemvalue in listitem:
td(itemvalue)
return t
def add_image(doc, divid, divclass, captiontext, image):
with doc:
if divclass is None:
d = div(id=divid)
else:
d = div(id=divid, cls=divclass)
with d:
h3(captiontext)
img(src=image)
return doc
def add_float_image(imgid, imgclass, image):
if imgclass is None:
m = img(id=imgid, src=image)
else:
m = img(id=imgid, cls=imgclass, src=image)
return m
def getSnrData(reportdict, modality):
table_data=[]
for keydate, rep in reportdict.items():
with open (rep, 'r') as file:
rep_json = json.load(file)
acqdate=rep_json[modality]["DateTime"]
snr=rep_json[modality]["snr"]
for itemkey, itemvalue in snr.items():
roi_val = itemkey
for itemkey, itemvalue in itemvalue.items():
space_val = itemkey
# only insert base_space; remove this if statement for both spaces
if space_val == 'base_space':
snr_val = itemvalue['snr']
signal_roi = itemvalue['signal_roi']
noise_roi = itemvalue['noise_roi']
in_file = itemvalue['in_file']
#table_data.append([acqdate, roi_val, space_val, snr_val ])
table_data.append([acqdate, roi_val, snr_val, signal_roi, noise_roi, in_file ])
return table_data
def getTsnrData(reportdict, modality):
table_data=[]
for keydate, rep in reportdict.items():
with open (rep, 'r') as file:
rep_json = json.load(file)
acqdate=rep_json[modality]["DateTime"]
snr=rep_json[modality]["tsnr"]
for itemkey, itemvalue in snr.items():
roi_val = itemkey
for itemkey, itemvalue in itemvalue.items():
space_val = itemkey
# only insert base_space; remove this if statement for both spaces
if space_val == 'base_space':
tsnr_val = itemvalue["tsnr_in_roi"]
tsnr_file = itemvalue["tsnr_file"]
signal_roi = itemvalue["signal_roi"]
#table_data.append([acqdate, roi_val, space_val, snr_val ])
table_data.append([acqdate, roi_val, tsnr_val,tsnr_file,signal_roi ])
return table_data
def getSortedReportSet(reportjson, numitems=None):
reportjson_dict={}
for rep in reportjson:
with open (rep, 'r') as file:
rep_json = json.load(file)
acqdate=rep_json["structural"]["DateTime"]
reportjson_dict[acqdate]=rep
sorted_dict = dict(sorted(reportjson_dict.items(),reverse=True))
if numitems is not None and numitems < len(sorted_dict):
sorted_dict = dict(itertools.islice(sorted_dict.items(), numitems))
return sorted_dict
def getGeometryData(reportdict, modality):
table_data=[]
for keydate, rep in reportdict.items():
with open (rep, 'r') as file:
rep_json = json.load(file)
acqdate=rep_json[modality]["DateTime"]
geom=rep_json[modality]["geometry"]
for itemkey, itemvalue in geom.items():
if itemkey == 'determinant' or itemkey == 'average_scaling' or itemkey == 'scales' or itemkey == 'skews':
if isinstance(itemvalue,list):
itemvalue=str(itemvalue)
table_data.append([acqdate, itemkey, itemvalue ])
return table_data
def formatDateTime(row, fmt):
dt = datetime.datetime.strptime(row['datetime'],'%Y-%m-%dT%H:%M:%S.%f')
return datetime.datetime.strftime(dt, fmt)
def returnAverage(row):
if len(str(row['value']).split(',')) > 1:
row_values= [float(x) for x in row['value'].replace('[','').replace(']','').replace(' ','').split(',')]
return np.mean(np.asarray(row_values))
else:
return row['value']
def writeROIimage(mask_rois, threeDfile, image ):
combo = None
for roi in mask_rois:
roiimg = nibabel.load(roi)
roidata = roiimg.get_fdata()
if combo is None:
combo = roidata
combo_affine = roiimg.affine
combo_header = roiimg.header
else:
combo=np.add(combo,roidata)
funcimg = nibabel.load(threeDfile)
if len(funcimg.header.get_data_shape()) > 3:
funclist = nibabel.funcs.four_to_three(funcimg)
threeDfile = funclist[0]
combo_img = nibabel.Nifti1Image(combo, combo_affine, combo_header)
display=plotting.plot_roi(combo_img, bg_img=threeDfile)
display.savefig(image)
def writeStatImage(threeDfile, image, dmode='ortho'):
display=plotting.plot_stat_map(threeDfile, display_mode=dmode)
display.savefig(image)
def createSNRSection(doc,reportdict,modality,imagedir,reportcurr=None):
table_columns=['datetime','roi', 'snr', 'signal_roi', 'noise_roi', 'in_file']
if reportcurr is not None:
d = div()
captiontext="{} ROIs used for average SNR".format(modality).capitalize()
d += h3(captiontext,id='{}_snr_roi_display_h'.format(modality))
snr_table_data = getSnrData(reportcurr,modality)
snr_df = | pd.DataFrame(snr_table_data, columns=table_columns) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.