prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import operator
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import moneypandas as mpd
@pytest.fixture
def series():
return pd.Series(mpd.MoneyArray([None, 1, 2], 'USD'))
@pytest.fixture
def frame():
return pd.DataFrame({"A": mpd.MoneyArray([None, 1, 2], 'GBP'),
"B": [0, 1, 2],
"C": mpd.MoneyArray([np.nan, 1, 2], 'USD')})
@pytest.fixture(params=['series', 'frame'])
def obj(request, series, frame):
if request.param == 'series':
return series
elif request.param == 'frame':
return frame
# -----
# Tests
# -----
@pytest.mark.parametrize('method', [
operator.methodcaller('head'),
operator.methodcaller('rename', str),
])
def test_works_generic(obj, method):
method(obj)
@pytest.mark.parametrize('method', [
operator.methodcaller('info'),
])
def test_works_frame(frame, method):
method(frame)
def test__take(frame):
return frame.take([0], axis=0)
def test_iloc_series(series):
series.iloc[slice(None)]
series.iloc[0]
series.iloc[[0]]
series.iloc[[0, 1]]
def test_iloc_frame(frame):
frame.iloc[:, 0]
frame.iloc[:, [0]]
frame.iloc[:, [0, 1]]
frame.iloc[:, [0, 2]]
frame.iloc[0, 0]
frame.iloc[0, [0]]
frame.iloc[0, [0, 1]]
frame.iloc[0, [0, 2]]
frame.iloc[[0], 0]
frame.iloc[[0], [0]]
frame.iloc[[0], [0, 1]]
frame.iloc[[0], [0, 2]]
def test_loc_series(series):
series.loc[:]
series.loc[0]
series.loc[1]
series.loc[[0, 1]]
def test_loc_frame(frame):
frame.loc[:, 'A']
frame.loc[:, ['A']]
frame.loc[:, ['A', 'B']]
frame.loc[:, ['A', 'C']]
frame.loc[0, 'A']
frame.loc[0, ['A']]
frame.loc[0, ['A', 'B']]
frame.loc[0, ['A', 'C']]
frame.loc[[0], 'A']
frame.loc[[0], ['A']]
frame.loc[[0], ['A', 'B']]
frame.loc[[0], ['A', 'C']]
def test_reindex(frame):
result = frame.reindex([0, 10])
expected = pd.DataFrame({"A": mpd.MoneyArray([None, np.nan], 'USD'),
"B": [0, np.nan],
"C": mpd.MoneyArray([None, np.nan], 'USD')},
index=[0, 10])
tm.assert_frame_equal(result, expected)
def test_isna(series):
expected = pd.Series([True, False, False], index=series.index,
name=series.name)
result = pd.isna(series)
tm.assert_series_equal(result, expected)
result = series.isna()
tm.assert_series_equal(result, expected)
def test_isna_frame(frame):
result = frame.isna()
expected = pd.DataFrame({"A": [True, False, False],
"B": [False, False, False],
"C": [True, False, False]})
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented")
def test_fillna():
result = pd.Series(mpd.MoneyArray([1, 0], 'USD')).fillna(method='ffill')
expected = pd.Series(mpd.MoneyArray([1, 1], 'USD'))
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented")
def test_dropna():
missing = pd.Series(mpd.MoneyArray([1, 0], 'USD'))
result = missing.dropna()
expected = pd.Series(mpd.MoneyArray([1], 'USD'))
tm.assert_series_equal(result, expected)
result = missing.to_frame().dropna()
expected = expected.to_frame()
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# write_Crosswalk_USGS_NWIS_WU.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
# <EMAIL>
"""
Create a crosswalk linking the downloaded USGS_NWIS_WU to NAICS_12. Created by selecting unique Activity Names and
manually assigning to NAICS
"""
import pandas as pd
from flowsa.common import datapath
from scripts.common_scripts import unique_activity_names, order_crosswalk
def assign_naics(df):
"""manually assign each ERS activity to a NAICS_2012 code"""
df.loc[df['Activity'] == 'Aquaculture', 'Sector'] = '1125'
# df.loc[df['Activity'] == 'Commercial', 'Sector'] = ''
df.loc[df['Activity'] == 'Domestic', 'Sector'] = 'F01000'
df.loc[df['Activity'] == 'Hydroelectric Power', 'Sector'] = '221111'
df.loc[df['Activity'] == 'Industrial', 'Sector'] = '1133'
df = df.append(pd.DataFrame([['Industrial', '23']], columns=['Activity', 'Sector']), sort=True)
df = df.append( | pd.DataFrame([['Industrial', '31']], columns=['Activity', 'Sector']) | pandas.DataFrame |
import os
from os.path import join, isfile
import subprocess
import json
import pandas as pd
from abc import ABC, abstractmethod
from typing import List, Dict, Tuple, Optional, Union, Any
import random
from nerblackbox.modules.utils.util_functions import get_dataset_path
from nerblackbox.modules.utils.env_variable import env_variable
from nerblackbox.modules.datasets.formatter.util_functions import get_ner_tag_mapping
from nerblackbox.modules.datasets.analyzer import Analyzer
SEED_SHUFFLE = {
"train": 4,
"val": 5,
"test": 6,
}
SENTENCES_ROWS_PRETOKENIZED = List[List[List[str]]]
SENTENCES_ROWS_UNPRETOKENIZED = List[Dict[str, Any]]
SENTENCES_ROWS = Union[SENTENCES_ROWS_PRETOKENIZED, SENTENCES_ROWS_UNPRETOKENIZED]
class BaseFormatter(ABC):
def __init__(
self, ner_dataset: str, ner_tag_list: List[str], ner_dataset_subset: str = ""
):
"""
Args:
ner_dataset: 'swedish_ner_corpus' or 'suc'
ner_tag_list: e.g. ['PER', 'LOC', ..]
ner_dataset_subset: e.g. 'original_cased'
"""
self.ner_dataset: str = ner_dataset
self.ner_tag_list: List[str] = ner_tag_list
self.dataset_path: str = get_dataset_path(ner_dataset, ner_dataset_subset)
self.file_name: Dict[str, str] = {}
self.analyzer = Analyzer(self.ner_dataset, self.ner_tag_list, self.dataset_path)
####################################################################################################################
# ABSTRACT BASE METHODS
####################################################################################################################
@abstractmethod
def get_data(self, verbose: bool) -> None: # pragma: no cover
"""
I: get data
Args:
verbose: [bool]
"""
pass
@abstractmethod
def create_ner_tag_mapping(self) -> Dict[str, str]: # pragma: no cover
"""
II: customize ner_training tag mapping if wanted
Returns:
ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
"""
pass
@abstractmethod
def format_data(
self, shuffle: bool = True, write_csv: bool = True
) -> Optional[SENTENCES_ROWS]: # pragma: no cover
"""
III: format data
Args:
shuffle: whether to shuffle rows of dataset
write_csv: whether to write dataset to csv (should always be True except for testing)
"""
pass
def set_original_file_paths(self) -> None: # pragma: no cover
"""
III: format data
Changed Attributes:
file_paths: [Dict[str, str]], e.g. {'train': <path_to_train_csv>, 'val': ..}
Returns: -
"""
pass
@abstractmethod
def _parse_row(self, _row: str) -> List[str]: # pragma: no cover
"""
III: format data
Args:
_row: e.g. "Det PER X B"
Returns:
_row_list: e.g. ["Det", "PER", "X", "B"]
"""
pass
def _format_original_file(
self, _row_list: List[str]
) -> Optional[List[str]]: # pragma: no cover
"""
III: format data
Args:
_row_list: e.g. ["test", "PER", "X", "B"]
Returns:
_row_list_formatted: e.g. ["test", "B-PER"]
"""
pass
@abstractmethod
def resplit_data(
self, val_fraction: float, write_csv: bool
) -> Optional[Tuple[pd.DataFrame, ...]]: # pragma: no cover
"""
IV: resplit data
Args:
val_fraction: [float], e.g. 0.3
write_csv: whether to write dataset to csv (should always be True except for testing)
"""
pass
####################################################################################################################
# BASE METHODS
####################################################################################################################
def create_directory(self) -> None: # pragma: no cover
"""
0: create directory for dataset
"""
directory_path = join(self.dataset_path, "analyze_data")
os.makedirs(directory_path, exist_ok=True)
bash_cmd = (
f'echo "*" > {env_variable("DIR_DATASETS")}/{self.ner_dataset}/.gitignore'
)
try:
subprocess.run(bash_cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print(e)
def create_ner_tag_mapping_json(self, modify: bool) -> None: # pragma: no cover
"""
II: create customized ner_training tag mapping to map tags in original data to tags in formatted data
Args:
modify: [bool], if True: modify tags as specified in method modify_ner_tag_mapping()
Returns: -
"""
if modify:
ner_tag_mapping = self.create_ner_tag_mapping()
else:
ner_tag_mapping = dict()
json_path = join(self.dataset_path, "ner_tag_mapping.json")
with open(json_path, "w") as f:
json.dump(ner_tag_mapping, f)
print(f"> dumped the following dict to {json_path}:")
print(ner_tag_mapping)
####################################################################################################################
# HELPER: READ ORIGINAL
####################################################################################################################
def _read_original_file(self, phase: str) -> SENTENCES_ROWS_PRETOKENIZED:
"""
III: format data
Args:
phase: 'train', 'val', 'test'
Returns:
sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
"""
self.set_original_file_paths()
file_path_original = join(self.dataset_path, self.file_name[phase])
_sentences_rows = list()
if isfile(file_path_original):
_sentence = list()
with open(file_path_original) as f:
for row in f.readlines():
row_list = self._parse_row(row)
if len(row_list) > 0:
row_list_formatted = self._format_original_file(row_list)
if row_list_formatted is not None:
_sentence.append(row_list_formatted)
else:
if len(_sentence):
_sentences_rows.append(_sentence)
_sentence = list()
print(f"\n> read {file_path_original}")
else: # pragma: no cover
raise Exception(f"ERROR! could not find file {file_path_original}!")
return _sentences_rows
####################################################################################################################
# HELPER: WRITE FORMATTED
####################################################################################################################
def _write_formatted_csv(
self, phase: str, sentences_rows: SENTENCES_ROWS_PRETOKENIZED
) -> None: # pragma: no cover
"""
III: format data
Args:
phase: 'train', 'val', 'test'
sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
Returns: -
"""
sentences_rows_formatted = self._format_sentences_rows(sentences_rows)
df = pd.DataFrame(sentences_rows_formatted)
file_path = join(self.dataset_path, f"{phase}_formatted.csv")
df.to_csv(file_path, sep="\t", header=False, index=False)
print(f"> phase = {phase}: wrote {len(df)} sentences to {file_path}")
def _write_formatted_jsonl(
self, phase: str, sentences_rows: SENTENCES_ROWS_UNPRETOKENIZED
) -> None: # pragma: no cover
"""
save to jsonl file
Args:
phase: 'train', 'val', 'test'
sentences_rows: e.g. (-unpretokenized-)
[
{
'text': 'Inger säger ..',
'tags': [{'token': 'Inger', 'tag': 'PER', 'char_start': 0, 'char_end': 5}, ..],
},
{
'text': 'Det ..',
'tags': [{..}, ..]
}
]
Returns: -
"""
file_path = join(self.dataset_path, f"{phase}_formatted.jsonl")
with open(file_path, "w") as file:
for sentence_row in sentences_rows:
file.write(json.dumps(sentence_row, ensure_ascii=False) + "\n")
print(
f"> phase = {phase}: wrote {len(sentences_rows)} sentences to {file_path}"
)
def _format_sentences_rows(
self, sentences_rows: SENTENCES_ROWS_PRETOKENIZED
) -> List[Tuple[str, str]]:
"""
III: format data
Args:
sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
Returns:
sentences_rows_formatted, e.g. (-pretokenized-)
[
('PER O', 'Inger säger'),
('O', 'Det'),
]
"""
# ner tag mapping
ner_tag_mapping = get_ner_tag_mapping(
path=join(self.dataset_path, "ner_tag_mapping.json")
)
# processing
sentences_rows_formatted = list()
for sentence in sentences_rows:
text_list = list()
tags_list = list()
for row in sentence:
assert (
len(row) == 2
), f"ERROR! row with length = {len(row)} found (should be 2): {row}"
text_list.append(row[0])
tags_list.append(
ner_tag_mapping(row[1]) if row[1] != "0" else "O"
) # replace zeros by capital O (!)
sentences_rows_formatted.append((" ".join(tags_list), " ".join(text_list)))
return sentences_rows_formatted
@staticmethod
def _convert_iob1_to_iob2(
sentences_rows_iob1: SENTENCES_ROWS_PRETOKENIZED,
) -> SENTENCES_ROWS_PRETOKENIZED:
"""
III: format data
convert tags from IOB1 to IOB2 format
Args:
sentences_rows_iob1: e.g. (-pretokenized-)
[
[['Inger', 'I-PER'], ['säger', '0'], .., []],
]
Returns:
sentences_rows_iob2: e.g. (-pretokenized-)
[
[['Inger', 'B-PER'], ['säger', '0'], .., []],
]
"""
sentences_rows_iob2 = list()
for sentence in sentences_rows_iob1:
sentence_iob2 = list()
for i, row in enumerate(sentence):
assert (
len(row) == 2
), f"ERROR! row = {row} should have length 0 or 2, not {len(row)}"
current_tag = row[1]
if (
current_tag == "O"
or "-" not in current_tag
or current_tag.startswith("B-")
):
sentence_iob2.append(row)
elif current_tag.startswith("I-"):
previous_tag = (
sentence[i - 1][1]
if (i > 0 and len(sentence[i - 1]) == 2)
else None
)
if previous_tag not in [
current_tag,
current_tag.replace("I-", "B-"),
]:
tag_iob2 = current_tag.replace("I-", "B-")
sentence_iob2.append([row[0], tag_iob2])
else:
sentence_iob2.append(row)
sentences_rows_iob2.append(sentence_iob2)
return sentences_rows_iob2
@staticmethod
def _shuffle_dataset(_phase: str, _sentences_rows: List[Any]) -> List[Any]:
"""
III: format data
Args:
_phase: "train", "val", "test"
_sentences_rows: e.g. (-pretokenized-)
[
[['Inger', 'PER'], ['säger', '0'], .., []],
[['Det', '0'], .., []]
]
e.g. (-unpretokenized-)
[
{
'text': 'Inger säger ..',
'tags': [{'token': 'Inger', 'tag': 'PER', 'char_start': 0, 'char_end': 5}, ..],
},
{
'text': 'Det ..',
'tags': [{..}, ..]
}
]
Returns:
_sentences_rows_shuffled: e.g. (-pretokenized-)
[
[['Det', '0'], .., [0],
[['Inger', 'PER'], ['säger', '0'], .., []]
]
e.g. (-unpretokenized-)
[
{
'text': 'Det ..',
'tags': [{..}, ..]
},
{
'text': 'Inger säger ..',
'tags': [{'token': 'Inger', 'tag': 'PER', 'char_start': 0, 'char_end': 5}, ..],
}
]
"""
# change _sentences_rows by shuffling sentences
random.Random(SEED_SHUFFLE[_phase]).shuffle(_sentences_rows)
return _sentences_rows
####################################################################################################################
# HELPER: READ FORMATTED
####################################################################################################################
def _read_formatted_csvs(self, phases: List[str]) -> pd.DataFrame:
"""
IV: resplit data
Args:
phases: .. to read formatted csvs from, e.g. ['val', 'test']
Returns:
df_phases: contains formatted csvs of phases
"""
df_phases = [self._read_formatted_csv(phase) for phase in phases]
return | pd.concat(df_phases, ignore_index=True) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_rolling_apply_consistency_sum_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum_no_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
],
)
def test_rolling_functions_window_non_shrinkage_binary(f):
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr():
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
@pytest.mark.parametrize(
"data_type",
[np.dtype(f"f{width}") for width in [4, 8]]
+ [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
)
def test_rolling_min_max_numeric_types(data_type):
# GH12373
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=0).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
],
)
def test_moment_functions_zero_length_pairwise(f):
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index= | Index([], name="bar") | pandas.Index |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.font_manager import FontProperties
from statsmodels.tsa import stattools
from statsmodels.graphics import tsaplots
class Chp023(object):
def __init__(self):
self.name = 'Chp022'
# 数据文件格式:编号 日期 星期几 开盘价 最高价
# 最低价 收益价 收益
self.data_file = 'data/pqb/chp023_001.txt'
def startup(self):
print('第23章:时间序列基本性质')
#self.acf_pacf_demo()
#self.dwn_demo()
#self.random_walk_demo()
self.random_walk_fit()
def acf_pacf_demo(self):
data = pd.read_csv(self.data_file, sep='\t', index_col='Trddt')
sh_index = data[data.Indexcd==1]
sh_index.index = pd.to_datetime(sh_index.index)
sh_return = sh_index.Retindex
print('时间序列长为:N={0}'.format(len(sh_return)))
acfs = stattools.acf(sh_return)
print(acfs)
pacfs = stattools.pacf(sh_return)
print(pacfs)
tsaplots.plot_acf(sh_return, use_vlines=True, lags=30)
plt.show()
tsaplots.plot_pacf(sh_return, use_vlines=True, lags=30)
plt.show()
def dwn_demo(self):
'''
白噪声举例
'''
dwn = np.random.standard_normal(size=500)
plt.plot(dwn, c='b')
plt.title('White Noise Demo')
plt.show()
acfs = stattools.acf(dwn)
print(acfs)
tsaplots.plot_acf(dwn, use_vlines=True, lags=30)
plt.show()
def random_walk_demo(self):
'''
随机游走时间序列建模示例
'''
w = np.random.standard_normal(size=1000)
x = w
for t in range(1, len(w)):
x[t] = x[t-1] + w[t]
plt.plot(x, c='b')
plt.title('Random Walk Demo')
plt.show()
acfs = stattools.acf(x)
print(acfs)
tsaplots.plot_acf(x, use_vlines=True, lags=30)
plt.show()
# 拟合随机游走信号
r = []
for t in range(1, len(x)):
r.append(x[t] - x[t-1])
rd = np.array(r)
plt.plot(rd, c='r')
plt.title('Residue Signal')
plt.show()
rd_acfs = stattools.acf(rd)
print(rd_acfs)
tsaplots.plot_acf(rd, use_vlines=True, lags=30)
plt.show()
def random_walk_fit(self):
data = | pd.read_csv(self.data_file, sep='\t', index_col='Trddt') | pandas.read_csv |
import os
""" First change the following directory link to where all input files do exist """
os.chdir("D:\\Book writing\\Codes\\Chapter 2")
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
#from sklearn.model_selection import train_test_split
#from sklearn.metrics import r2_score
wine_quality = pd.read_csv("winequality-red.csv",sep=';')
# Step for converting white space in columns to _ value for better handling
wine_quality.rename(columns=lambda x: x.replace(" ", "_"), inplace=True)
# Simple Linear Regression - chart
model = sm.OLS(wine_quality['quality'],sm.add_constant(wine_quality['alcohol'])).fit()
print (model.summary())
plt.scatter(wine_quality['alcohol'],wine_quality['quality'],label = 'Actual Data')
plt.plot(wine_quality['alcohol'],model.params[0]+model.params[1]*wine_quality['alcohol'],
c ='r',label="Regression fit")
plt.title('Wine Quality regressed on Alchohol')
plt.xlabel('Alcohol')
plt.ylabel('Quality')
plt.show()
# Simple Linear Regression - Model fit
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
wine_quality = pd.read_csv("winequality-red.csv",sep=';')
wine_quality.rename(columns=lambda x: x.replace(" ", "_"), inplace=True)
x_train,x_test,y_train,y_test = train_test_split(wine_quality['alcohol'],wine_quality["quality"],train_size = 0.7,random_state=42)
x_train = | pd.DataFrame(x_train) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with | tm.assert_raises_regex(ValueError, 'not unique') | pandas.util.testing.assert_raises_regex |
## GitHub: dark-teal-coder
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
from fpdf import FPDF
import datetime
import string
import os
## Get datetime information
current_datetime = datetime.datetime.now()
current_year = current_datetime.year
## Get the running script path
script_path = os.path.dirname(os.path.abspath(__file__))
## Get the current working directiory
cwd = os.path.abspath(os.getcwd())
# print(script_path, cwd)
def read_noc(noc_filepath):
"""This function reads a data file containing a table of National Occupational Classification (NOC) codes related to computer
science and information technology jobs and returns the data in DataFrame format."""
try:
## Use Pandas to read in csv file
## Python parsing engine for RegEx delimiters
df_noc = pd.read_csv(noc_filepath, sep=', ', header=0, engine='python')
except FileNotFoundError:
print(f"The following file cannot be found:", noc_filepath, sep='\n')
except:
print("An unknown error occurs while reading in the following file causing the program to exit prematurely:", noc_filepath,
sep='\n')
else:
## Unify the headers
df_noc.columns = df_noc.columns.str.lower()
## Trim leading and ending spaces in the headers
## Ref.: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rename.html
## (inplace=True) means not to return a new DataFrame
df_noc.rename(columns=lambda x: x.strip(), inplace=True)
# print(df_noc)
return df_noc
def get_page(url_code, c):
"""This function scrapes wage data of 10 tech occupations classified by NOC from Job Bank and returns the data list."""
url_base = "https://www.jobbank.gc.ca/wagereport/occupation/"
## Add URL code to the end of the base URL to go to different wage report pages on Job Bank
url = url_base + str(url_code[c])
html_response = requests.get(url)
## The .content attribute holds raw bytes, which can be decoded better than the .text attribute.
html_doc = BeautifulSoup(html_response.content, 'html.parser')
# print(html_doc)
data_list = []
# wage_table = html_doc.find(id="wage-occ-report")
# print(wage_table)
nation_wages = html_doc.find("tr", class_="areaGroup national")
data_list.append(nation_wages.text.strip().split())
province_wages = html_doc.find_all("tr", class_="areaGroup province prov")
for prov_wage in province_wages:
data_list.append(prov_wage.text.strip().rsplit(maxsplit=3))
# print([row for row in data_list])
return data_list
def write_excel(filepath_in, df_noc, url_code):
writer = pd.ExcelWriter(filepath_in, engine='xlsxwriter')
headers_nation = ['NOC', 'Occupation', 'URL Code', 'Low', 'Mid', 'High']
headers_province = ['Province', 'Low', 'Mid', 'High']
## Each iteration will scrape a webpage and change the data for 1 NOC into a DataFrame
df_tech_wages_ca = pd.DataFrame()
df_tech_wages_prov = pd.DataFrame()
for i in range(len(url_code)):
noc = f"NOC{df_noc.loc[i, 'noc']}"
data_list = get_page(url_code, i)
# print(df_noc.loc[i])
df_wage_table = pd.DataFrame(data_list, columns =['area', 'low', 'mid', 'high'])
# df_wage_table = pd.to_numeric(df_wage_table, errors='coerce')
# df_wage_table = df_wage_table.astype({'low': 'float64', 'mid': 'float64', 'high': 'float64'}, errors='ignore')
print(df_wage_table)
## Get the national wage data from the 1st row of each DataFrame
df_can_wage = df_wage_table.iloc[[0]]
df_career = df_noc.iloc[[i]].reset_index(drop=True)
df_can_wage_career = | pd.concat([df_career, df_can_wage], axis=1) | pandas.concat |
"""
Module for building a complete daily dataset from Quandl's WIKI dataset.
"""
from io import BytesIO
import tarfile
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from six import iteritems
from trading_calendars import register_calendar_alias
from . import core as bundles
import numpy as np
import pymongo
#################################################################
# The data must be re-indexed and fill na with 0
#################################################################
def QT_mongo2df(uri,dbname,collectionname):
client = pymongo.MongoClient(uri)
db = client.get_database(dbname)
df = pd.DataFrame(list(db[collectionname].find({})))
try:
df.drop(['_id'], axis=1,inplace=True)
df.drop_duplicates(keep='last', inplace=True)
except:
print('Record not found',collectionname)
client.close()
return df
DayDBurl = 'mongodb://Thomas:[email protected]:25402/zipline'
DayDBname = 'zipline'
MinuteDBurl = 'mongodb://Thomas:[email protected]:47420/timeseries'
MinuteDBname = 'minutebar'
Metadataurl='D://Quantopian/metadata/metadata.csv'
###################################################################
log = Logger(__name__)
def gen_asset_metadata(metadataurl, show_progress):
if show_progress:
log.info('Generating asset metadata.')
metadataframe=pd.read_csv(metadataurl)
metadataframe.set_index('sid',inplace=True)
metadataframe.drop(['dburl', 'dbname','dbcollection'],axis=1,inplace=True)
return metadataframe
def parse_pricing_and_vol(metadataf,sessions):
for row in metadataf.itertuples():
asset_id = row[0]
_asset_data=QT_mongo2df(row[-3],row[-2],row[-1])
_asset_data.loc[:, 'date'] = | pd.to_datetime(_asset_data['date']) | pandas.to_datetime |
import pandas as pd
#import openpyxl
from openpyxl import workbook
from openpyxl import load_workbook
import numpy as np
from scipy.stats import spearmanr
from .general_functions import *
class Abundances():
def __init__(self):
self.abundance_df = pd.DataFrame(index=[], columns=[])
self.corr_matrix = None
self.corr_signature = None
self.sample_names = []
self.header_present = False
self.abundance_raw_df = None
def addMasking(self):
""" merges abundance dataframe and taxonomy dataframe """
self.abundance_df['masked'] = [False]*len(self.abundance_df.index)
self.abundance_df['colour'] = ['undefined']*len(self.abundance_df.index)
def addSample(self, sample_name, filename):
""" adds a sample (as one column) to the dataframes for relative and raw counts"""
tax_levels = None
if len(self.abundance_df.columns) == 0:
self.abundance_df = pd.read_csv(filename, header=0, sep='\t') #krona (no header, no index)
cols = list(self.abundance_df.columns)
self.abundance_df = self.abundance_df[cols[0:2] + cols[:1:-1]]
self.tax_levels = self.abundance_df.columns.tolist()[2:]
self.abundance_df = self.abundance_df[self.abundance_df.columns.tolist()[0:2] + self.tax_levels]
self.abundance_df.rename(columns={self.abundance_df.columns[0]:sample_name}, inplace=True)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_raw_df = self.abundance_df.loc[:,[self.abundance_df.columns[1]] + self.tax_levels]
self.abundance_raw_df.rename(columns={self.abundance_raw_df.columns[0]:sample_name}, inplace=True)
self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'
self.abundance_raw_df.index.name = None
self.abundance_df = self.abundance_df.loc[:,[self.abundance_df.columns[0]] + self.tax_levels]
else:
sample_df = pd.read_csv(filename, header=0, sep='\t')
sample_raw_df = sample_df.loc[:,[sample_df.columns[1]]+self.tax_levels]
sample_raw_df.rename(columns={sample_raw_df.columns[0]:sample_name}, inplace=True)
sample_raw_df.index = sample_raw_df[self.tax_levels[0]]+'_'
sample_raw_df.index.name = None
sample_df.rename(columns={sample_df.columns[0]:sample_name}, inplace=True)
sample_df.index = sample_df[self.tax_levels[0]]+'_'
sample_df.index.name = None
self.abundance_df = pd.merge(self.abundance_df, sample_df, how='outer', on=self.tax_levels)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_df.fillna(value=0, inplace=True)
self.abundance_raw_df = pd.merge(self.abundance_raw_df, sample_raw_df, how='outer', on=self.tax_levels)
self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'
self.abundance_raw_df.index.name = None
self.abundance_raw_df.fillna(value=0, inplace=True)
self.abundance_df[sample_name] = self.abundance_df[sample_name].astype(float)
self.abundance_raw_df[sample_name] = self.abundance_raw_df[sample_name].astype(float)
self.sample_names.append(sample_name.strip())
self.abundance_df = self.abundance_df[self.sample_names + self.tax_levels]
self.abundance_raw_df = self.abundance_raw_df[self.sample_names + self.tax_levels]
myindex = list(self.abundance_df.index)
newlist = sorted(set([i for i in myindex if myindex.count(i)>1]))
#problems with the ncbi taxonomy (typos?)
for i in newlist:
self.abundance_df.loc[i,self.sample_names] = self.abundance_df.loc[i].sum(numeric_only=True)
self.abundance_df.drop(i, inplace=True)
self.abundance_raw_df.loc[i,self.sample_names] = self.abundance_raw_df.loc[i].sum(numeric_only=True)
self.abundance_raw_df.drop(i, inplace=True)
return self.tax_levels
def addRelSample(self, sample_name, filename):
""" adds a sample (as one column) to the dataframes for relative counts """
tax_levels = None
if len(self.abundance_df.columns) == 0:
self.abundance_df = pd.read_csv(filename, header=0, sep='\t') #krona (no header, no index)
cols = list(self.abundance_df.columns)
self.abundance_df = self.abundance_df[[cols[0]] + cols[:1:-1]]
self.tax_levels = self.abundance_df.columns.tolist()[1:]
#self.abundance_df = self.abundance_df[self.abundance_df.columns[0] + self.tax_levels]
self.abundance_df.rename(columns={self.abundance_df.columns[0]:sample_name}, inplace=True)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
#self.abundance_df = self.abundance_df.loc[:,[self.abundance_df.columns[0]] + self.tax_levels]
else:
sample_df = pd.read_csv(filename, header=0, sep='\t')
sample_df.rename(columns={sample_df.columns[0]:sample_name}, inplace=True)
sample_df.index = sample_df[self.tax_levels[0]]+'_'
sample_df.index.name = None
self.abundance_df = pd.merge(self.abundance_df, sample_df, how='outer', on=self.tax_levels)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_df.fillna(value=0, inplace=True)
self.abundance_df[sample_name] = self.abundance_df[sample_name].astype(float)
self.sample_names.append(sample_name.strip())
self.abundance_df = self.abundance_df[self.sample_names + self.tax_levels]
myindex = list(self.abundance_df.index)
newlist = sorted(set([i for i in myindex if myindex.count(i)>1]))
#problems with the ncbi taxonomy (typos?)
# for i in newlist:
# self.abundance_df.loc[i,self.sample_names] = self.abundance_df.loc[i].sum(numeric_only=True)
# self.abundance_df.drop(i, inplace=True)
self.abundance_raw_df = None
return self.tax_levels
def addAbsSample(self, sample_name, filename):
""" adds a sample (as one column) to the dataframes for absolute and relative (calculated as percent) counts """
tax_levels = None
if len(self.abundance_df.columns) == 0:
self.abundance_df = pd.read_csv(filename, header=0, sep='\t') #krona (no header, no index)
cols = list(self.abundance_df.columns)
total_count = self.abundance_df[cols[0]].sum()
self.abundance_df = self.abundance_df[[cols[0]] + cols[:1:-1]]
self.tax_levels = self.abundance_df.columns.tolist()[1:]
self.abundance_df = self.abundance_df[[self.abundance_df.columns[0]] + self.tax_levels]
print(abundance_df.head)
print(total_count)
self.abundance_df[cols[0]] = self.abundance_df[cols[0]].divide(total_count)*100
self.abundance_df.rename(columns={self.abundance_df.columns[0]:sample_name}, inplace=True)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_raw_df = self.abundance_df.loc[:,[self.abundance_df.columns[0]] + self.tax_levels]
self.abundance_raw_df.rename(columns={self.abundance_raw_df.columns[0]:sample_name}, inplace=True)
self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'
self.abundance_raw_df.index.name = None
self.abundance_df = self.abundance_df.loc[:,[self.abundance_df.columns[0]] + self.tax_levels]
else:
sample_df = pd.read_csv(filename, header=0, sep='\t')
total_count = sample_df[sample_df.columns[0]].sum()
sample_raw_df = sample_df.loc[:,[sample_df.columns[0]]+self.tax_levels]
sample_raw_df.rename(columns={sample_raw_df.columns[0]:sample_name}, inplace=True)
sample_raw_df.index = sample_raw_df[self.tax_levels[0]]+'_'
sample_raw_df.index.name = None
sample_df[sample_df.columns[0]] = sample_df[sample_df.columns[0]].divide(total_count)*100
sample_df.rename(columns={sample_df.columns[0]:sample_name}, inplace=True)
sample_df.index = sample_df[self.tax_levels[0]]+'_'
sample_df.index.name = None
self.abundance_df = pd.merge(self.abundance_df, sample_df, how='outer', on=self.tax_levels)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_df.fillna(value=0, inplace=True)
self.abundance_raw_df = pd.merge(self.abundance_raw_df, sample_raw_df, how='outer', on=self.tax_levels)
self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'
self.abundance_raw_df.index.name = None
self.abundance_raw_df.fillna(value=0, inplace=True)
self.abundance_df[sample_name] = self.abundance_df[sample_name].astype(float)
self.abundance_raw_df[sample_name] = self.abundance_raw_df[sample_name].astype(float)
self.sample_names.append(sample_name.strip())
self.abundance_df = self.abundance_df[self.sample_names + self.tax_levels]
self.abundance_raw_df = self.abundance_raw_df[self.sample_names + self.tax_levels]
myindex = list(self.abundance_df.index)
newlist = sorted(set([i for i in myindex if myindex.count(i)>1]))
return self.tax_levels
def deselectOfSample(self, names, current_tax_level):
""" deselects/removes species given the name of the species """
index_list = list(self.sample[self.sample['masked'] == False].index)
for name in names:
idx = self.sample[self.sample[current_tax_level]==name].index
self.sample.at[idx, 'masked'] = True
def getAbundanceForOneSample(self, species, sample):
""" gets abundance for one species of one sample """
return self.abundance_df.loc[species,sample]
def getAbundances(self, species):
""" gets the abundances in all samples for a given species (if it is unmarked) """
return self.abundance_df[self.abundance_df[self.tax_levels[0]]==species]
def getDataframe(self):
return self.abundance_df
def getAbsolutDataframe(self):
if self.abundance_raw_df is not None:
return self.abundance_raw_df
def get_corr(self):
""" gets the correlation matrix and the signature(sample names) """
return self.corr_matrix, self.corr_signature
def getCorrelationForSpecies(self, current_species, threshold):
""" calculates correlation for abundances of species (between species) """
grouped = self.groupAllSamples()
if self.corr_matrix is None or self.corr_signature is None or self.corr_signature[0] != grouped.iloc[:,len(self.tax_levels):-1].columns.tolist():
corr_matrix = grouped.iloc[:,len(self.tax_levels):-2]
corr_matrix.index = grouped[self.tax_level]
self.corr_matrix = corr_matrix.transpose().corr(method='spearman')
self.corr_signature = (list(corr_matrix.columns), self.tax_levels[0])
corr_matrix = self.corr_matrix.loc[:,current_species]
text = 'spearman (rank) correlation >= ' + str(threshold) + ':\n'
corr_series = corr_matrix[abs(corr_matrix) >= threshold].sort_values(ascending=False)
corr_matrix = grouped.iloc[:,len(self.tax_levels):-1]
corr_matrix.index = grouped[self.tax_level]
corr_list = []
current_abundance = corr_matrix.loc[current_species,:corr_matrix.columns[-2]]
list_index = []
for name in corr_matrix.index:
new_abundance = corr_matrix.loc[name,:][:-1]
corr = '{0:.3}'.format(current_abundance.corr(new_abundance, method='spearman'))
if corr != 'nan' and abs(float(corr)) >= threshold and current_species != name:
corr_list.append('{0:.3}'.format(current_abundance.corr(new_abundance, method='spearman')))
list_index.append(name)
#rho, pval = spearmanr(current_abundance, new_abundance)
#if rho != 'nan' and abs(float(corr)) >= threshold and current_species != name and pval <= 0.05:
# #corr_list.append('{0:.3}'.format(rho))
# #list_index.append(name)
# print(name + '\t' + str(rho) + '\t' + str(pval))
#for i in xrange(len(corr_list)):
# if corr_list[i] != 'nan' and abs(float(corr_list[i])) >= threshold and current_species != corr_matrix.index[i]:
# print(corr_matrix.index[i] + '\t' + corr_list[i])
corr_series = pd.Series(corr_list, index=list_index)
return text, corr_series
#return text, corr_series.map('{0:.3}'.format)
def corr(self, tax_levels , samples, tax_level, superfamily_groups, min_samples):
""" correlation """
corr_dict = {}
corr_df = pd.DataFrame(columns=('name1', 'name2', 'r', 'p', 'max_name1', 'max_name2'))
tax_level_idx = tax_levels.index(tax_level)
taxlevelnum = len(tax_levels) - tax_level_idx + 1
samplenum = len(samples)
#grouped1 = self.abundance_df.groupby(tax_levels[tax_level_idx:], sort=False, as_index=False).sum()
#print(self.abundance_df[self.abundance_df['masked']==False].head)
grouped1 = self.abundance_df[self.abundance_df['masked']==False].groupby(tax_levels[tax_level_idx:], sort=False, as_index=False).sum()
#for i, idx in enumerate(grouped1.index):
# if grouped1.iloc[i,taxlevelnum:-1].astype(bool).sum() < min(samplenum * 0.2, 3) :
# grouped1.at[i, 'masked'] = True
#grouped1_filtered = grouped1[grouped1['masked'] == False]
#print(len(grouped1_filtered.index))
#group1 = grouped1_filtered
group1 = grouped1[grouped1[tax_levels[-1]] == superfamily_groups[0]]
group2 = grouped1[grouped1[tax_levels[-1]] == superfamily_groups[1]]
group1 = group1.loc[:,tax_levels + samples]
group2 = group2.loc[:,tax_levels + samples]
k = 0
if superfamily_groups[0] == superfamily_groups[1]:
for i, idx in enumerate(group1.index):
abundance1 = group1.iloc[i, taxlevelnum:-1]#-2
if len(abundance1[abundance1 > 0.00]) < min_samples:
#
group2.drop(idx, inplace=True)
continue
name1 = group1.loc[idx, tax_level]
if name1 != '-':
max_name1 = max(abundance1)
for j in range(i+1, len(group2.index)):
jdx = group2.index[j]
abundance2 = group2.iloc[j, taxlevelnum:-1]
if len(abundance2[abundance2 > 0.00]) < min_samples:
continue
name2 = group2.loc[jdx, tax_level]
if name2 != '-':
#rho, pval = spearmanr(abundance1, abundance2)
max_name2 = max(abundance2)
rho, pval = spearmanr(abundance1, abundance2)
corr_df.loc[k] = [name1, name2, rho, pval, max_name1, max_name2]
corr_dict[(name1 ,name2)] = {}
corr_dict[(name1 ,name2)]['r'] = rho
corr_dict[(name1 ,name2)]['p'] = pval
k += 1
else:
for i, idx in enumerate(group1.index):
abundance1 = group1.iloc[i, taxlevelnum:]
if len(abundance1[abundance1 > 0.00]) < min_samples:
#if len(abundance1[abundance1 > 0.00]) == 0:
# group2.drop([idx], inplace=True)
# continue
#group2.drop(idx, inplace=True)
continue
#abundance1[self.sample_names] > 0) * 1
name1 = group1.loc[idx, tax_level]
if name1 != '-':
max_name1 = max(abundance1)
for j, jdx in enumerate(group2.index):
abundance2 = group2.iloc[j, taxlevelnum:]
if len(abundance2[abundance2 > 0.00]) < min_samples:
continue
name2 = group2.loc[jdx, tax_level]
if name2 != '-':
#rho, pval = spearmanr(abundance1, abundance2, max_name1, max_name2)
max_name2 = max(abundance2)
rho, pval = spearmanr(abundance1.values, abundance2.values)
corr_df.loc[k] = [name1, name2, rho, pval, max_name1, max_name2]
corr_dict[(name1 ,name2)] = {}
corr_dict[(name1 ,name2)]['r'] = rho
corr_dict[(name1 ,name2)]['p'] = pval
k += 1
return corr_df
def getMaxAbundanceOfSample(self):
""" gets the maximum abundance of all unmasked species """
try: maximum = max(self.sample[self.sample['masked'] == False]['abundance'])+0.01
except: maximum = 0
return maximum
def getMaxAbundanceOfClrSample(self):
""" gets the maximum abundance of all unmasked species """
#try: maximum = max(self.clr_sample['abundance'])+0.001
try: maximum = max(self.clr_sample)+0.01
except: maximum = 0
return maximum
def getMinAbundanceOfClrSample(self):
""" gets the minimum abundance of all unmasked species """
#try: minimum = min(self.clr_sample['abundance'])-0.001
try: minimum = min(self.clr_sample)-0.01
except: minimum = 0
return minimum
def getPresenceAbsenceDF(self, threshold):
""" gets a dataframe giving the presence/absence of organisms """
binaryAbundance = (self.abundance_df[self.sample_names] > threshold) * 1
binaryAbundance.index = list(self.abundance_df[self.tax_levels[0]])
return binaryAbundance
def getSample(self, sample_name, tax_level):
""" gets sample to work with """
self.sample_name = sample_name
columns = self.tax_levels + [sample_name, 'masked']
self.sample = self.abundance_df[columns]
self.sample = self.sample[self.sample[sample_name] > 0]
self.sample = self.sample.rename(columns = {sample_name: 'abundance'})
self.sample[self.sample['masked']==False]
self.sample.index.name = None
self.tax_level = tax_level
def getClrSample(self, sample_name, tax_level=None):
""" gets clr-tranformed sample to work with """
self.sample_name = sample_name
if tax_level is not None:
self.tax_level = tax_level
df_clr = self.groupClrSamples()
self.clr_sample = df_clr[sample_name]
self.clr_sample = self.clr_sample.rename(columns = {sample_name: 'abundance'})
return self.clr_sample
def getSamplesList(self):
""" gets the list of samples (currently loaded) """
return self.sample_names
def getWorkingSample(self, tax_level, as_index=False):
""" gets sample grouped on current tax_level """
levels = self.tax_levels[self.tax_levels.index(tax_level):]
grouped = self.sample.groupby(levels, sort=False, as_index=as_index).sum()#['abundance']
grouped.index = range(len(grouped.index))
self.tax_level = tax_level
return grouped[grouped['masked'] == False]
def getValuesForColumn(self, columnname):
""" gets as list of the unique entries of a column"""
return list(self.abundance_df[columnname].unique())
def getNotHidden(self):
""" gets a dataframe of containing only the rows which are not masked/hidden """
return self.abudance_df[self.abundance_df['masked']==False]
def groupAllSamples(self, all_levels=None):
""" groups all samples on tax_level """
if all_levels is None:
levels = self.tax_levels[self.tax_levels.index(self.tax_level):]
else:
levels = all_levels
grouped = self.abundance_df.groupby(levels, sort=False, as_index=False).sum()
grouped.index = grouped[self.tax_level] + '_'
grouped.index.name = None
if self.tax_level == self.tax_levels[0]:
grouped['colour'] = list(self.abundance_df['colour'])
else:
grouped['colour'] = ['undefined']*len(grouped.index)
grouped.index = grouped[self.tax_level] + '_'
return grouped[grouped['masked'] == False]
def groupAbsoluteSamples(self):
""" groups absolute abundances """
unmasked_tax = set(list(self.groupAllSamples(all_levels=self.tax_levels)[self.tax_level]))
levels = self.tax_levels[self.tax_levels.index(self.tax_level):]
if self.abundance_raw_df is not None:
grouped = self.abundance_raw_df.groupby(levels, sort=False, as_index=False).sum()
grouped.index = grouped[self.tax_level] + '_'
grouped.index.name = None
return grouped[grouped[self.tax_level].isin(unmasked_tax)]
else:
return None
def groupClrSamples(self):
if self.abundance_raw_df is not None:
df = self.groupAbsoluteSamples()
if df is not None:
df2 = clr_transformed(df.astype('int', errors='ignore'))
return df2
else:
return None
else:
return None
def group_binary_patterns(self):
""" creates a binary pattern """
binary_abundance_df = self.getPresenceAbsenceDF(0)
binary_abundance_df.sort_values(by=list(binary_abundance_df.columns))
pattern_dict= {}
for idx1 in binary_abundance_df.index:
pattern = ' '.join(map(str, list(binary_abundance_df.loc[idx1,:])))
if pattern not in pattern_dict:
pattern_dict[pattern] = []
pattern_dict[pattern].append(idx1)
return pattern_dict
def reset(self):
""" resets options for displaying the graph (includes all species
that are in the sample) """
self.sample['masked'] = [False]*len(self.sample.index)
self.sample['colour'] = ['undefined']*len(self.sample.index)
def selectOfSample(self, indexes):
""" selects species given the indexes"""
index_set = set()
for idx in indexes:
i = list(self.sample[self.sample['masked'] == False].index)[idx]
index_set.add(i)
for ind in list(self.sample[self.sample['masked'] == False].index):
if ind not in index_set:
self.sample.at[ind, 'masked'] = True
return index_set
def set_corr(corr_matrix, corr_signature):
""" sets correlation variables """
self.corr_matrix = corr.matrix
self.corr_signature = corr.signature
def sortSample(self, key, ascending):
""" sorts sample by key """
try:
self.sample[self.sample['masked'] == False].sort_values(by=key, ascending=ascending)
except:
pass
def renewMasking(self, indices, colours_dict):
""" renews the masking and colours """
for idx in self.abundance_df.index:
if idx in indices:
self.abundance_df.loc[idx, 'masked'] = False
else:
self.abundance_df.loc[idx, 'masked'] = True
if idx in colours_dict:
self.abundance_df.loc[idx, 'colour'] = colours_dict[idx]
else:
self.abundance_df.loc[idx, 'colour'] = 'undefined'
def randomForestClassifier(self, train_cols, test_cols, targets, feature_selction_var, min_abundance_threshold, shuffle=False):
""" run random forest classification """
from sklearn.ensemble import RandomForestClassifier
#from sklearn.ensemble import RandomForestRegressor
#train = self.abundance_df.loc[:,train_cols] #train.as_matrix(cols)
train = self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols] #train.as_matrix(cols)
#test = self.abundance_df.loc[:,test_cols] #.as_matrix(test_cols)
test = self.abundance_df[self.abundance_df['masked']==False].loc[:,test_cols] #.as_matrix(test_cols)
#names = list(self.abundance_df.loc[:, 'species'])
names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'])
#most_common_species_set = set()
#for col in train_cols:
# sorted_series = self.abundance_df.loc[:, col].sort_values(ascending=False)[:100]
# most_common_species_set |= set(list(sorted_series.index))
#most_common_species_list = []
#for id0 in most_common_species_set:
# #print(max(self.abundance_df.loc[id0,train_cols]))
# if max(self.abundance_df.loc[id0,train_cols]) >= min_abundance_threshold:
# most_common_species_list.append(id0)
##print(len(most_common_species_list))
#most_common_species_set = set(most_common_species_list)
#train = train.loc[list(most_common_species_set),:]
#test = test.loc[list(most_common_species_set),:]
#names = list(self.abundance_df.loc[list(most_common_species_set),'species'])
#feature selection by variance
from sklearn.feature_selection import VarianceThreshold
sel = VarianceThreshold(threshold=(0.999 * (1 - 0.999)))
if feature_selction_var:
#ds1 = np.transpose(ds10.as_matrix())
#ds1 = sel.fit_transform(np.transpose(ds10.as_matrix()))
#ds2 = np.transpose(ds20.as_matrix())
#train = sel.fit_transform(np.transpose(train.as_matrix()))
train = sel.fit_transform(np.transpose(train.values))
#names = list(self.abundance_df.loc[:, 'species'].as_matrix()[sel.get_support()])
#names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].as_matrix()[sel.get_support()])
names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].values[sel.get_support()])
#test = sel.fit_transform(np.transpose(test.as_matrix()))
test = sel.fit_transform(np.transpose(test.values))
ds10 = np.asmatrix(train)[[i for i, j in enumerate(targets) if j == 0],:]
ds1 = np.transpose(sel.fit_transform(np.transpose(ds10)))
else:
#train = np.transpose(train.as_matrix())
train = np.transpose(train.values)
#test = np.transpose(test.as_matrix())
test = np.transpose(test.values)
ds10 = train.iloc[:,[i for i, j in enumerate(targets) if j == 0]]
#ds1 = np.transpose(ds10.as_matrix())
ds1 = np.transpose(ds10.values)
if shuffle == 'index':
from random import shuffle
shuffle(names)
#rf = RandomForestClassifier(n_estimators=10)
target = targets
#group1 = list(self.abundance_df.loc[:,train_cols].columns[:target.count(0)])
group1 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[:target.count(0)])
#group2 = list(self.abundance_df.loc[:,train_cols].columns[target.count(0):])
group2 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[target.count(0):])
#rf = RandomForestRegressor(n_estimators=1000)#, class_weight="balanced")
rf = RandomForestClassifier(n_estimators=1000) # bootstrap=False
#, max_features=100)#, min_sample_leaf=50)
#rf = RandomForestRegressor(n_estimators=20, max_features=2)
#class_weight="balanced" #{class_label: weight}
#n_estimators=1000,
rf.fit(train, target)
#from sklearn.metrics import roc_auc_score
#for l in leaf:
#model = RandomForestRegressor(min_samples_split=2, max_depth=None, bootstrap=False, min_samples_leaf=2)
# #n_estimator=200, oob_score=True, min_samples_leaf=10,max_features=f,
#model.fit(train,target)
# #print("AUC - ROC : ")
# #print(roc_auc_score(target,model.oob_prediction_))
# #print(model.feature_importances_)
#from sklearn.ensemble import ExtraTreesClassifier
#model = ExtraTreesClassifier()
#model.fit(train, target)
from treeinterpreter import treeinterpreter as ti
prediction, bias, contributions = ti.predict(rf, np.array(train))
#for i in range(len(train)):
# j = 0
# # print(i)
# #print("\tBias (trainset mean)")
# #print(bias[i])
# # print(contributions[0][0])
# #for c, feature in sorted(zip(contributions[i],
# # names),
# # #self.abundance_df.index),
# # key=lambda x: -abs(x[0])):
# for c, feature in zip(contributions[i], list(self.abundance_df.index)):
# if c[0] != 0:
# #print feature, ':\t', "{:.2e}".format(c), '\t', self.abundance_df.loc[feature, 'species']
# if j <10:
# # print()'\t' + self.abundance_df.loc[feature, 'species'], '\t', "{:.2e}".format(c[0]))
# j += 1
totalc = np.mean(contributions, axis=0)
#from sklearn import model_selection
#from sklearn.model_selection import cross_val_score
#clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)
#scores = cross_val_score(clf, X, y)
##compare 2 groups of samples
prediction1, bias1, contributions1 = ti.predict(rf, np.array(ds1))
mean_contri = [0 for i in xrange(len(names))]
for s in xrange(len(ds1)):
for i in xrange(len(names)):
mean_contri[i] += contributions1[s][i][0]
mean_contri = [x/len(ds1)for x in mean_contri]
names_list = []
#for c, org in sorted(zip(mean_contri, list(self.abundance_df.loc[:,'species'])), reverse=True):
for c, org in sorted(zip(mean_contri, names), reverse=True):
if c != 0:
#print(self.abundance_df.loc[i,group1])
#idx = self.abundance_df[self.abundance_df['species'] == org].index.tolist()[0]
idx = self.abundance_df[self.abundance_df['masked']==False][self.abundance_df['species'] == org].index.tolist()[0]
if shuffle:
#print(names.index(org))
#idx = list(self.abundance_df.index)[names.index(org)]
idx = list(self.abundance_df[self.abundance_df['masked']==False].index)[names.index(org)]
#maximum = max(self.abundance_df.loc[idx,group1 + group2])
maximum = max(self.abundance_df[self.abundance_df['masked']==False].loc[idx,group1 + group2])
#print(str(round(c, 3)) + '\t' + org + '\t' + str(round(maximum,3)))
names_list.append([round(c, 3), org, round(maximum,3)])
return names_list
def shape(self):
""" gets the shape(dimensions) of the dataframe """
return self.abundance_df.shape
def save_count_tables(self):
""" """
from pathlib import Path
path = Path(__file__).parent
if self.abundance_df is not None:
self.abundance_df[self.sample_names].to_csv(str(path) + '/relative_counts.csv')
#self.abundance_df[self.abundance_df['masked']==False].loc[:,self.sample_names].to_csv(str(path) + '/relative_counts.csv')
if self.abundance_raw_df is not None:
self.abundance_raw_df[self.sample_names].to_csv(str(path) + '/absolute_counts.csv')
#self.abundance_raw_df[self.abundance_df['masked']==False].loc[:,self.sample_names].to_csv(str(path) + '/absolute_counts.csv')
class MetaData():
#"class to "
def __init__(self, filename, sample_name, abundance_df):
self.abundance_df = abundance_df
#wb = openpyxl.load_workbook(filename)
wb = load_workbook(filename)
for name in wb.get_sheet_names():
sheet = wb.get_sheet_by_name(name)
#if sheet['B1'].value.upper() in sample_name.upper():
sample_names_metadata = []
#for cell in sheet.columns[1]:
for cell in list(wb.active.columns)[1]:
sample_names_metadata.append(cell.value)
meta_dict = {}
for colnum in xrange(3, sheet.max_column+1):
key = sheet.cell(row=1, column=colnum).value.upper()
meta_dict[key] = []
for rownum in xrange(2, sheet.max_row+1):
meta_dict[key].append(sheet.cell(row=rownum, column=colnum).value)
self.meta_df = pd.DataFrame(meta_dict, index=sample_names_metadata[1:])
def getCorrelationForMetaData(self, name, tax_level):
""" correlates species abundance with meta data """
grouped = self.abundance_df.groupAllSamples()
grouped.set_index(tax_level, drop=False, inplace=True)
samplenames = self.abundance_df.getSamplesList()
grouped_series = grouped.loc[name,samplenames]
meta_samples_df = | pd.DataFrame(columns=[], index=[]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
get_ipython().run_line_magic('matplotlib', 'inline')
# LifeExpectancy.csv from the data201 course webpage, and read it into Python, skipping the necessary rows and reading the header. Make the country name be an index. Print the first few rows to ensure that you have it correct.
# In[2]:
# skipping the necessary rows and reading the header
life = | pd.read_csv("C:/Users/dangu/OneDrive/Desktop/data201/LifeExpectancy.csv", skiprows=3, parse_dates=True) | pandas.read_csv |
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
from sklearn.preprocessing import MinMaxScaler
import copy
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from dl_for_env_multi import call_model
# Global variables
PLANT_DIM = 1
EFF_PUMP = 0.9
EFF_ERD = 0.8
# FLOW_FEED = 1000
lookback_size = 5
REWARD_SCALING = 1e-2
class BWTPEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, df, day=lookback_size-1):
# super(StockEnv, self).__init__()
# date increment
self.day = day
self.df = copy.deepcopy(df)
# action_space normalization and the shape is PLANT_DIM
self.action_space = spaces.Box(low=0.0, high=10.0, shape=(PLANT_DIM,))
# Shape = 4: [Current Balance]+[prices]+[owned shares] +[macd]
self.observation_space = spaces.Box(low=0, high=np.inf, shape=(8,))
# load data from a pandas dataframe
self.data = copy.deepcopy(self.df.loc[self.day, :])
# termination
self.terminal = False
# save the total number of trades
self.trades = 0
# initalize state
self.state = [0.0] + \
[self.data.MF_TURBIDITY] + \
[self.data.FEED_TEMPERATURE] + \
[self.data.FEED_TDS] + \
[self.data.FEED_FLOWRATE] + \
[self.data.FEED_PRESSURE] + \
[self.data.CIP] + \
[self.data.FLOWRATE]
# initialize reward and cost
self.reward = 0
self.cost = 0
self.energy_difference=0
self.total_energy_difference = 0
self.total_reward = 0
self.actual_flowrate = 0
self.actual_pressure = 0
self.penalty = 0.0
#self.actual_energy = 0
self.optimize_energy = 0
# self.total_actual_energy = 0
self.total_optimize_energy = 0
self.rewardsum = 0
# memorize the total value, total rewards
self.energy_memory = []
self.rewards_memory = []
self.rewardsum_memory = []
self.energy_difference_memory = []
self.total_energy_difference_memory = []
self.action_container = self.df['FEED_PRESSURE'][0:lookback_size].to_list()
self.total_actual_energy = 0
# self.total_actual_energy = 1502.87059974546
self.action_memory = []
self.memory = []
self.first_step = True
def change_pressure(self, action):
self.action_memory.append(action)
if self.first_step:
self.first_step = False
else:
self.action_container = np.roll(self.action_container, -1)
action = float(action)
self.action_container[-1] = action
actual_flowrate = self.state[7]
actual_pressure = self.state[5]
day = self.day
st = day - (lookback_size - 1)
en = st + (lookback_size-1)
_inputs= self.df.loc[st:en, :]
_inputs['FEED_PRESSURE'] = self.action_container
_flow_rate = _inputs.pop('FLOWRATE')
_inputs.pop('index')
prediction = call_model(_inputs.values.reshape(1, lookback_size, 6), _flow_rate.values[0:-1].reshape(1, lookback_size-1, 1))
self.state[7] = prediction # * 0.5
self.state[5] = float(action)
# energy consuption calculation
act_p = actual_pressure # feed pressure
act_q = actual_flowrate # outflow(treated)
actual_energy =\
((self.state[4] * act_p - EFF_PUMP * (act_p - 3)*(self.state[4]-act_q)) / 36 / EFF_ERD / act_q) + ((act_p - EFF_PUMP * (act_p - 3)) * (self.state[4] - act_q) / EFF_ERD / 36 /act_q)
self.state[0] = ((self.state[4] * self.state[5] - EFF_PUMP * (self.state[5] - 3) * (self.state[4] - self.state[7])) / 36 / EFF_ERD / self.state[7]) + ((self.state[5] - EFF_PUMP * (self.state[5] - 3)) * (self.state[4] - self.state[7]) / EFF_ERD / 36 /self.state[7])
if self.state[0] <0:
self.state[0] = actual_energy
self.state[5] = act_p
self.state[7] = act_q
if self.state[5] <2:
self.state[0] = actual_energy
self.state[5] = act_p
self.state[7] = act_q
self.memory.append([self.state[0], self.state[5], self.state[7], actual_energy])
#if 0.1 < self.state[0] < 0.6: # theoretically the range of SEC should be between 0.1 and 0.6
# self.state[5] = action # pressure
self.optimize_energy = self.state[0]
self.total_optimize_energy += self.optimize_energy
self.total_actual_energy += actual_energy
self.energy_difference = actual_energy- self.optimize_energy
# self.total_energy_difference = self.total_self.actual_energy - self.total_optimize_energy
if self.state[7] < 700:
self.penalty = (700 - self.state[7]) * 0.005
# update held shares
#self.state[index + PLANT_DIM + 1] += min(available_amount, action)
# # update transaction costs
self.cost += self.state[0]*10
self.trades += 1
def step(self, actions):
# actions is a list of floats of length=1
self.terminal = self.day >= len(self.df.index.unique()) - 1
if self.terminal:
#plt.plot(self.energy_memory, 'r')
#plt.savefig('account_value.png')
#plt.close()
# pd.DataFrame(np.array(self.action_memory)).to_csv('action_memory.csv')
end_energy = self.state[0]
self.total_energy_difference = self.total_actual_energy - self.total_optimize_energy
print("previous_total_energy:{}".format(self.energy_memory[0]))
print("end_energy:{}".format(end_energy))
print("total_actual_energy:{}".format(self.total_actual_energy))
print("total_optimize_energy:{}".format(self.total_optimize_energy))
print("total_energy_difference:{}".format(self.total_energy_difference))
df_total_value = pd.DataFrame( self.rewards_memory)
df_total_value.to_csv('reward_memory_3.csv')
df_total_value = pd.DataFrame(self.energy_difference_memory)
df_total_value.to_csv('energy_difference_memory_3.csv')
# df_total_value = pd.DataFrame(self.total_energy_difference)
# df_total_value.to_csv('total_energy_difference_2.csv')
df_total_value = pd.DataFrame(self.energy_memory)
df_total_value.to_csv('energy_memory_3.csv')
df_total_value = pd.DataFrame(self.total_energy_difference_memory)
df_total_value.to_csv('total_energy_difference_memory_3.csv')
df_total_value = pd.DataFrame(self.action_memory)
df_total_value.to_csv('action_memory_3.csv')
print("reward:{}".format(self.reward))
print("rewardsum:{}".format(self.rewardsum))
print("total_cost: ", self.cost)
print("total trades: ", self.trades)
df_rewards = | pd.DataFrame(self.rewards_memory) | pandas.DataFrame |
"""
Functions for comparing and visualizing model performance
"""
import os
import sys
import pdb
import pandas as pd
import numpy as np
import matplotlib
import logging
import json
from collections import OrderedDict
from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.pipeline import mlmt_client_wrapper as mlmt_client_wrapper
from atomsci.ddm.pipeline import model_tracker as trkr
import atomsci.ddm.pipeline.model_pipeline as mp
#matplotlib.style.use('ggplot')
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
logging.basicConfig(format='%(asctime)-15s %(message)s')
nan = np.float32('nan')
client_wrapper = mlmt_client_wrapper.MLMTClientWrapper(ds_client=dsf.config_client())
client_wrapper.instantiate_mlmt_client()
#------------------------------------------------------------------------------------------------------------------
def get_collection_datasets(collection_name):
"""
Returns a list of training (dataset_key, bucket) tuples for models in the given collection.
"""
model_filter = {}
#models = list(trkr.get_full_metadata(model_filter, client_wrapper,
# collection_name=collection_name))
#if models == []:
# print("No matching models returned")
# return
#else:
# print("Found %d matching models" % len(models))
dataset_set = set()
models = trkr.get_metadata(model_filter, client_wrapper,
collection_name=collection_name)
for i, metadata_dict in enumerate(models):
if i % 10 == 0:
print("Looking at model %d" % i)
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
bucket = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
dataset_set.add((dataset_key, bucket))
return sorted(dataset_set)
#------------------------------------------------------------------------------------------------------------------
def extract_collection_perf_metrics(collection_name, output_dir, pred_type='regression'):
"""
Obtain list of training datasets with models in the given collection. Get performance metrics for
models on each dataset and save them as CSV files in the given output directory.
"""
datasets = get_collection_datasets(collection_name)
os.makedirs(output_dir, exist_ok=True)
for dset_key, bucket in datasets:
dset_perf_df = get_training_perf_table(dset_key, bucket, collection_name, pred_type=pred_type)
dset_perf_file = '%s/%s_%s_model_perf_metrics.csv' % (output_dir, os.path.basename(dset_key).replace('.csv', ''), collection_name)
dset_perf_df.to_csv(dset_perf_file, index=False)
print('Wrote file %s' % dset_perf_file)
#------------------------------------------------------------------------------------------------------------------
def get_training_perf_table(dataset_key, bucket, collection_name, pred_type='regression', other_filters = {}):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
"""
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dataset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",}
model_filter.update(other_filters)
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
models = list(trkr.get_full_metadata(model_filter, client_wrapper,
collection_name=collection_name))
if models == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(models))
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
xgb_learning_rate_list = []
xgb_gamma_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
score_dict = {}
for subset in subsets:
score_dict[subset] = []
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for metadata_dict in models:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
# TODO: get_full_metadata() seems to ignore label='best' constraint; below is workaround
#if len(metrics_dicts) > 3:
# raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
splitter_list.append(split_params['splitter'])
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
if model_type == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'xgboost':
xgb_params = metadata_dict['ModelMetadata']['xgbSpecific']
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(xgb_params["xgb_learning_rate"])
xgb_gamma_list.append(xgb_params["xgb_gamma"])
for subset in subsets:
score_dict[subset].append(subset_metrics[subset][metric_type])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key,
featurizer=featurizer_list,
splitter=splitter_list,
max_epochs=max_epochs_list,
best_epoch=best_epoch_list,
learning_rate=learning_rate_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
xgb_learning_rate = xgb_learning_rate_list,
xgb_gamma = xgb_gamma_list))
for subset in subsets:
metric_col = '%s_%s' % (metric_type, subset)
perf_df[metric_col] = score_dict[subset]
sort_metric = '%s_valid' % metric_type
perf_df = perf_df.sort_values(sort_metric, ascending=False)
return perf_df
# ------------------------------------------------------------------------------------------------------------------
def get_best_perf_table(col_name, metric_type, model_uuid=None, metadata_dict=None, PK_pipe=False):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
"""
if metadata_dict is None:
if model_uuid is None:
print("Have to specify either metadatadict or model_uuid")
return
# Right now this subsetting of metrics does not work, so need to do manually below.
model_filter = {"model_uuid": model_uuid,
# "ModelMetrics.TrainingRun.label" : "best"
}
models = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
if models == []:
print("No matching models returned")
return
elif len(models) > 1:
print("Found %d matching models, which is too many" % len(models))
return
metadata_dict = models[0]
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_info['model_uuid'])
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
# workaround for now
# metrics_dicts = [m for m in metrics_dicts if m['label'] == 'best']
# print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
return
if len(metrics_dicts) > 3:
metrics_dicts = [m for m in metrics_dicts if m['label'] == 'best']
# raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_info['model_type'] = model_params['model_type']
model_info['featurizer'] = model_params['featurizer']
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
model_info['splitter'] = split_params['splitter']
if 'split_uuid' in split_params:
model_info['split_uuid'] = split_params['split_uuid']
model_info['dataset_key'] = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
model_info['bucket'] = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
if PK_pipe:
model_info['collection_name']=col_name
model_info['assay_name'] = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata'][
'assay_category']
model_info['response_col'] = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata'][
'response_col']
if model_info['featurizer'] == 'descriptors':
model_info['descriptor_type'] = metadata_dict['ModelMetadata']['DescriptorSpecific']['descriptor_type']
else:
model_info['descriptor_type'] = 'N/A'
try:
model_info['descriptor_type'] = metadata_dict['ModelMetadata']['DescriptorSpecific']['descriptor_type']
except:
model_info['descriptor_type'] = None
try:
model_info['num_samples'] = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata']['num_row']
except:
tmp_df = dsf.retrieve_dataset_by_datasetkey(model_info['dataset_key'], model_info['bucket'])
model_info['num_samples'] = tmp_df.shape[0]
if model_info['model_type'] == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
model_info['max_epochs'] = nn_params['max_epochs']
model_info['best_epoch'] = nn_params['best_epoch']
model_info['learning_rate'] = nn_params['learning_rate']
model_info['layer_sizes'] = ','.join(['%d' % s for s in nn_params['layer_sizes']])
model_info['dropouts'] = ','.join(['%.2f' % d for d in nn_params['dropouts']])
model_info['rf_estimators'] = nan
model_info['rf_max_features'] = nan
model_info['rf_max_depth'] = nan
if model_info['model_type'] == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
model_info['rf_estimators'] = rf_params['rf_estimators']
model_info['rf_max_features'] = rf_params['rf_max_features']
model_info['rf_max_depth'] = rf_params['rf_max_depth']
model_info['max_epochs'] = nan
model_info['best_epoch'] = nan
model_info['learning_rate'] = nan
model_info['layer_sizes'] = nan
model_info['dropouts'] = nan
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
metric_col = '%s_%s' % (metric_type, subset)
model_info[metric_col] = metrics_dict['PredictionResults'][metric_type]
metric_col = 'rms_score_%s' % subset
model_info[metric_col] = metrics_dict['PredictionResults']['rms_score']
return model_info
# ---------------------------------------------------------------------------------------------------------
def get_best_models_info(col_names, bucket, pred_type, PK_pipeline=False, output_dir='/usr/local/data',
shortlist_key=None, input_dset_keys=None, save_results=False, subset='valid',
metric_type=None, selection_type='max', other_filters={}):
"""
Get results for models in the given collection.
"""
top_models_info = []
if metric_type is None:
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
if other_filters is None:
other_filters = {}
if type(col_names) == str:
col_names = [col_names]
for col_name in col_names:
res_dir = os.path.join(output_dir, '%s_perf' % col_name)
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % col_name
if input_dset_keys is None:
dset_keys = dsf.retrieve_dataset_by_datasetkey(shortlist_key, bucket)
# Need to figure out how to handle an unknown column name for dataset_keys
if 'dataset_key' in dset_keys.columns:
dset_keys = dset_keys['dataset_key']
elif 'task_name' in dset_keys.columns:
dset_keys = dset_keys['task_name']
else:
dset_keys = dset_keys.values
else:
if type(input_dset_keys) == str:
dset_keys = [input_dset_keys]
else:
dset_keys = input_dset_keys
for dset_key in dset_keys:
dset_key = dset_key.strip()
try:
# TODO: get dataset bucket
model_filter = {"ModelMetadata.TrainingDataset.dataset_key": dset_key,
"ModelMetadata.TrainingDataset.bucket": bucket,
"ModelMetrics.TrainingRun.label": "best",
'ModelMetrics.TrainingRun.subset': subset,
'ModelMetrics.TrainingRun.PredictionResults.%s' % metric_type: [selection_type, None]
}
model_filter.update(other_filters)
try:
models = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
except Exception as e:
print("Error returned when querying the best model for dataset %s" % dset_key)
print(e)
continue
if models == []:
#print("No matching models returned for dset_key {0} and bucket {1}".format(dset_key, bucket))
continue
elif len(models) > 1:
print("Found %d models with the same %s value, saving all." % (len(models), metric_type))
for model in models:
res_df = pd.DataFrame.from_records(
[get_best_perf_table(col_name, metric_type, metadata_dict=model, PK_pipe=PK_pipeline)])
top_models_info.append(res_df)
except Exception as e:
print(e)
continue
if top_models_info == []:
print("No metadata found")
return
top_models_df = pd.concat(top_models_info, ignore_index=True)
selection_col = '%s_%s' % (metric_type, subset)
if selection_type == 'max':
top_models_df = top_models_df.loc[top_models_df.groupby('dataset_key')[selection_col].idxmax()]
else:
top_models_df = top_models_df.loc[top_models_df.groupby('dataset_key')[selection_col].idxmin()]
#TODO: Update res_dirs
if save_results:
if shortlist_key is not None:
# Not including shortlist key right now because some are weirdly formed and have .csv in the middle
top_models_df.to_csv(os.path.join(res_dir, 'best_models_metadata.csv'), index=False)
else:
for dset_key in input_dset_keys:
shortened_key = dset_key.rstrip('.csv')
top_models_df.to_csv(os.path.join(res_dir, 'best_models_metadata_%s.csv' % shortened_key), index=False)
return top_models_df
'''
#---------------------------------------------------------------------------------------------------------
def get_best_grouped_models_info(collection='pilot_fixed', pred_type='regression', top_n=1, subset='test'):
"""
Get results for models in the given collection.
"""
res_dir = '/usr/local/data/%s_perf' % collection
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % collection
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for res_file in res_files:
try:
if not res_file.endswith(suffix):
continue
res_path = os.path.join(res_dir, res_file)
res_df = pd.read_csv(res_path, index_col=False)
res_df['combo'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
dset_name = res_file.replace(suffix, '')
datasets.append(dset_name)
res_df['dataset'] = dset_name
print(dset_name)
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
res_df['model_type/feat'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
grouped_df = res_df.groupby('model_type/feat').apply(
lambda t: t.head(top_n)
).reset_index(drop=True)
top_grouped_models.append(grouped_df)
top_combo = res_df['model_type/feat'].values[0]
top_combo_dsets.append(top_combo + dset_name.lstrip('ATOM_GSK_dskey'))
top_score = res_df['{0}_{1}'.format(metric_type, subset)].values[0]
top_model_feat.append(top_combo)
top_scores.append(top_score)
num_samples.append(res_df['Dataset Size'][0])
'''
#------------------------------------------------------------------------------------------------------------------
def get_umap_nn_model_perf_table(dataset_key, bucket, collection_name, pred_type='regression'):
"""
Load performance metrics from model tracker for all NN models with the given prediction_type saved in
the model tracker DB under a given collection that were trained against a particular dataset. Show
parameter settings for UMAP transformer for models where they are available.
"""
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dataset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
"ModelMetadata.ModelParameters.model_type" : "NN",
"ModelMetadata.ModelParameters.prediction_type" : pred_type
}
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
models = list(trkr.get_full_metadata(model_filter, client_wrapper,
collection_name=collection_name))
if models == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(models))
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
best_epoch_list = []
max_epochs_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
sort_metric = 'r2_score'
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
sort_metric = 'roc_auc_score'
metrics = ['roc_auc_score', 'prc_auc_score', 'matthews_cc', 'kappa', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
for metadata_dict in models:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
if len(metrics_dicts) > 3:
raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
if model_type != 'NN':
continue
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
feature_transform_type = metadata_dict['ModelMetadata']['TrainingDataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
if 'UmapSpecific' in metadata_dict['ModelMetadata']:
umap_params = metadata_dict['ModelMetadata']['UmapSpecific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
featurizer=featurizer_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list ))
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = '%s_valid' % sort_metric
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_filesystem_perf_results(result_dir, hyper_id=None, dataset_name='GSK_Amgen_Combined_BSEP_PIC50',
pred_type='classification'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a hyperparameter search run.
"""
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
best_epoch_list = []
model_score_type_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'r2_std', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'roc_auc_std', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
if hyper_id is None:
# hyper_id not specified, so let's do all that exist under the given result_dir
subdirs = os.listdir(result_dir)
hyper_ids = list(set(subdirs) - {'logs', 'slurm_files'})
else:
hyper_ids = [hyper_id]
for hyper_id in hyper_ids:
topdir = os.path.join(result_dir, hyper_id, dataset_name)
if not os.path.isdir(topdir):
continue
# Next component of path is a random UUID added by hyperparam script for each run. Iterate over runs.
run_uuids = [fname for fname in os.listdir(topdir) if not fname.startswith('.')]
for run_uuid in run_uuids:
run_path = os.path.join(topdir, run_uuid, dataset_name)
# Next path component is a combination of various model parameters
param_dirs = os.listdir(run_path)
for param_str in param_dirs:
new_path = os.path.join(topdir, run_uuid, dataset_name, param_str)
model_dirs = [dir for dir in os.listdir(new_path) if not dir.startswith('.')]
model_uuid = model_dirs[0]
meta_path = os.path.join(new_path, model_uuid, 'model_metadata.json')
metrics_path = os.path.join(new_path, model_uuid, 'training_model_metrics.json')
if not (os.path.exists(meta_path) and os.path.exists(metrics_path)):
continue
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
with open(metrics_path, 'r') as metrics_fp:
metrics_dict = json.load(metrics_fp)
metrics_list.append(metrics_dict)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dict in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of prediction run metrics for this model
pred_dicts = metrics_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(pred_dicts), model_uuid))
if len(pred_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in pred_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
splitter_list.append(split_params['splitter'])
feature_transform_type = metadata_dict['ModelMetadata']['TrainingDataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
if model_type == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
if 'UmapSpecific' in metadata_dict['ModelMetadata']:
umap_params = metadata_dict['ModelMetadata']['UmapSpecific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list))
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_perf_tables(collection_names, filter_dict={}, prediction_type='regression'):
"""
Load model parameters and performance metrics from model tracker for all models saved in the model tracker DB under
the given collection names. Generate a pair of tables, one for regression models and one for classification, listing:
dataset (assay name, target, parameter, key, bucket)
dataset size (train/valid/test/total)
number of training folds
model type (NN or RF)
featurizer
transformation type
metrics: r2_score, mae_score and rms_score for regression, or ROC AUC for classification
"""
collection_list = []
model_uuid_list = []
time_built_list = []
model_type_list = []
dataset_key_list = []
bucket_list = []
param_list = []
featurizer_list = []
desc_type_list = []
transform_list = []
dset_size_list = []
splitter_list = []
split_strategy_list = []
split_uuid_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
best_epoch_list = []
max_epochs_list = []
learning_rate_list = []
layer_sizes_list = []
dropouts_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
split_uuid_list=[]
if prediction_type == 'regression':
score_types = ['r2_score', 'mae_score', 'rms_score']
else:
# TODO: add more classification metrics later
score_types = ['roc_auc_score', 'prc_auc_score', 'accuracy_score', 'precision', 'recall_score', 'npv', 'matthews_cc']
subsets = ['train', 'valid', 'test']
score_dict = {}
ncmpd_dict = {}
for subset in subsets:
score_dict[subset] = {}
for score_type in score_types:
score_dict[subset][score_type] = []
ncmpd_dict[subset] = []
filter_dict['ModelMetadata.ModelParameters.prediction_type'] = prediction_type
for collection_name in collection_names:
print("Finding models in collection %s" % collection_name)
models = trkr.get_full_metadata(filter_dict, client_wrapper, collection_name=collection_name)
for i, metadata_dict in enumerate(models):
if i % 10 == 0:
print('Processing collection %s model %d' % (collection_name, i))
# Check that model has metrics before we go on
if not 'ModelMetrics' in metadata_dict:
continue
collection_list.append(collection_name)
model_uuid = metadata_dict['model_uuid']
model_uuid_list.append(model_uuid)
time_built = metadata_dict['time_built']
time_built_list.append(time_built)
#print("Got metadata for model UUID %s" % model_uuid)
model_params = metadata_dict['ModelMetadata']['ModelParameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
if 'DescriptorSpecific' in metadata_dict['ModelMetadata']:
desc_type = metadata_dict['ModelMetadata']['DescriptorSpecific']['descriptor_type']
else:
desc_type = ''
desc_type_list.append(desc_type)
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
bucket = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
dataset_key_list.append(dataset_key)
bucket_list.append(bucket)
dset_metadata = metadata_dict['ModelMetadata']['TrainingDataset']['DatasetMetadata']
param = metadata_dict['ModelMetadata']['TrainingDataset']['response_cols'][0]
param_list.append(param)
transform_type = metadata_dict['ModelMetadata']['TrainingDataset']['feature_transform_type']
transform_list.append(transform_type)
split_params = metadata_dict['ModelMetadata']['SplittingParameters']['Splitting']
splitter_list.append(split_params['splitter'])
split_uuid_list.append(split_params['split_uuid'])
split_strategy = split_params['split_strategy']
split_strategy_list.append(split_strategy)
if 'UmapSpecific' in metadata_dict['ModelMetadata']:
umap_params = metadata_dict['ModelMetadata']['UmapSpecific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
if model_type == 'NN':
nn_params = metadata_dict['ModelMetadata']['NNSpecific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
elif model_type == 'RF':
rf_params = metadata_dict['ModelMetadata']['RFSpecific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
elif model_type == 'xgboost':
# TODO: Add xgboost parameters
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
else:
raise Exception('Unexpected model type %s' % model_type)
# Get model metrics for this model
metrics_dicts = metadata_dict['ModelMetrics']['TrainingRun']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['PredictionResults']
if split_strategy == 'k_fold_cv':
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['test']['num_compounds']
else:
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['valid']['num_compounds'] + subset_metrics['test']['num_compounds']
for subset in subsets:
subset_size = subset_metrics[subset]['num_compounds']
for score_type in score_types:
try:
score = subset_metrics[subset][score_type]
except KeyError:
score = float('nan')
score_dict[subset][score_type].append(score)
ncmpd_dict[subset].append(subset_size)
dset_size_list.append(dset_size)
col_dict = dict(
collection=collection_list,
model_uuid=model_uuid_list,
time_built=time_built_list,
model_type=model_type_list,
featurizer=featurizer_list,
descr_type=desc_type_list,
transformer=transform_list,
splitter=splitter_list,
split_strategy=split_strategy_list,
split_uuid=split_uuid_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
learning_rate=learning_rate_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
dataset_bucket=bucket_list,
dataset_key=dataset_key_list,
dataset_size=dset_size_list,
parameter=param_list
)
perf_df = pd.DataFrame(col_dict)
for subset in subsets:
ncmpds_col = '%s_size' % subset
perf_df[ncmpds_col] = ncmpd_dict[subset]
for score_type in score_types:
metric_col = '%s_%s' % (subset, score_type)
perf_df[metric_col] = score_dict[subset][score_type]
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_metadata_table(uuids, collections=None):
if isinstance(uuids,str):
uuids = [uuids]
if isinstance(collections,str):
collections = [collections] * len(uuids)
mlist = []
for idx,uuid in enumerate(uuids):
if collections is not None:
collection_name = collections[idx]
else:
collection_name = trkr.get_model_collection_by_uuid(uuid,client_wrapper)
model_meta = trkr.get_metadata_by_uuid(uuid,client_wrapper=client_wrapper,collection_name=collection_name)
mdl_params = model_meta['ModelMetadata']['ModelParameters']
data_params = model_meta['ModelMetadata']['TrainingDataset']
# Get model metrics for this model
metrics = pd.DataFrame(model_meta['ModelMetrics']['TrainingRun'])
metrics = metrics[metrics['label']=='best']
train_metrics = metrics[metrics['subset']=='train']['PredictionResults'].values[0]
valid_metrics = metrics[metrics['subset']=='valid']['PredictionResults'].values[0]
test_metrics = metrics[metrics['subset']=='test']['PredictionResults'].values[0]
# Try to name the model something intelligible in the table
name = 'NA'
if 'target' in data_params['DatasetMetadata']:
name = data_params['DatasetMetadata']['target']
if (name == 'NA') & ('assay_endpoint' in data_params['DatasetMetadata']):
name = data_params['DatasetMetadata']['assay_endpoint']
if (name == 'NA') & ('response_col' in data_params['DatasetMetadata']):
name = data_params['DatasetMetadata']['response_col']
if name != 'NA':
if 'param' in data_params['DatasetMetadata'].keys():
name = name + ' ' + data_params['DatasetMetadata']['param']
else:
name = 'unknown'
transform = 'None'
if 'transformation' in data_params['DatasetMetadata'].keys():
transform = data_params['DatasetMetadata']['transformation']
if mdl_params['featurizer'] == 'computed_descriptors':
featurizer = model_meta['ModelMetadata']['DescriptorSpecific']['descriptor_type']
else:
featurizer = mdl_params['featurizer']
try:
split_uuid = model_meta['ModelMetadata']['SplittingParameters']['Splitting']['split_uuid']
except:
split_uuid = 'Not Avaliable'
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['ModelMetadata']['NNSpecific']
minfo = {'Name': name,
'Transformation': transform,
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['ModelMetadata']['SplittingParameters']['Splitting']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['ModelMetadata']['RFSpecific']
minfo = {'Name': name,
'Transformation': transform,
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['ModelMetadata']['SplittingParameters']['Splitting']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
mlist.append(OrderedDict(minfo))
return pd.DataFrame(mlist).set_index('Name').transpose()
#------------------------------------------------------------------------------------------------------------------
def get_model_datasets(collection_names, filter_dict={}):
"""
Query the model tracker for all models saved in the model tracker DB under the given collection names. Returns a dictionary
mapping (dataset_key,bucket) pairs to the list of model_uuids trained on the corresponding datasets.
"""
result_dict = {}
for collection_name in collection_names:
if collection_name.endswith('_metrics'):
continue
models = trkr.get_full_metadata(filter_dict, client_wrapper, collection_name=collection_name)
for i, metadata_dict in enumerate(models):
if i % 10 == 0:
print('Processing collection %s model %d' % (collection_name, i))
# Check that model has metrics before we go on
if not 'ModelMetrics' in metadata_dict:
continue
try:
model_uuid = metadata_dict['model_uuid']
dataset_key = metadata_dict['ModelMetadata']['TrainingDataset']['dataset_key']
bucket = metadata_dict['ModelMetadata']['TrainingDataset']['bucket']
result_dict.setdefault((dataset_key,bucket), []).append(model_uuid)
except KeyError:
continue
return result_dict
#-------------------------------------------------------------------------------------------------------------------
def aggregate_predictions(datasets, bucket, col_names, client_wrapper, result_dir):
results = []
for dset_key, bucket in datasets:
for model_type in ['NN', 'RF']:
for split_type in ['scaffold', 'random']:
for descriptor_type in ['mordred_filtered', 'moe']:
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
'ModelMetrics.TrainingRun.subset': 'valid',
'ModelMetrics.TrainingRun.PredictionResults.r2_score': ['max', None],
'ModelMetadata.ModelParameters.model_type': model_type,
'ModelMetadata.ModelParameters.featurizer': 'descriptors',
'ModelMetadata.DescriptorSpecific.descriptor_type': descriptor_type,
'ModelMetadata.SplittingParameters.Splitting.splitter': split_type
}
for col_name in col_names:
model = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
if model:
model = model[0]
result_dir = '/usr/local/data/%s/%s' % (col_name, dset_key.rstrip('.csv'))
result_df = mp.regenerate_results(result_dir, metadata_dict=model)
result_df['dset_key'] = dset_key
actual_col = [col for col in result_df.columns if 'actual' in col][0]
pred_col = [col for col in result_df.columns if 'pred' in col][0]
result_df['error'] = abs(result_df[actual_col] - result_df[pred_col])
result_df['cind'] = pd.Categorical(result_df['dset_key']).labels
results.append(result_df)
results_df = pd.concat(results).reset_index(drop=True)
results_df.to_csv(os.path.join(result_dir, 'predictions_%s_%s_%s_%s.csv' % (dset_key, model_type, split_type, descriptor_type)), index=False)
for featurizer in ['graphconv', 'ecfp']:
model_filter = {"ModelMetadata.TrainingDataset.dataset_key" : dset_key,
"ModelMetadata.TrainingDataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
'ModelMetrics.TrainingRun.subset': 'valid',
'ModelMetrics.TrainingRun.PredictionResults.r2_score': ['max', None],
'ModelMetadata.ModelParameters.model_type': model_type,
'ModelMetadata.ModelParameters.featurizer': featurizer,
'ModelMetadata.SplittingParameters.Splitting.splitter': split_type
}
for col_name in col_names:
model = list(trkr.get_full_metadata(model_filter, client_wrapper, collection_name=col_name))
if model:
model = model[0]
result_dir = '/usr/local/data/%s/%s' % (col_name, dset_key.rstrip('.csv'))
result_df = mp.regenerate_results(result_dir, metadata_dict=model)
result_df['dset_key'] = dset_key
actual_col = [col for col in result_df.columns if 'actual' in col][0]
pred_col = [col for col in result_df.columns if 'pred' in col][0]
result_df['error'] = abs(result_df[actual_col] - result_df[pred_col])
result_df['cind'] = pd.Categorical(result_df['dset_key']).labels
results.append(result_df)
results_df = | pd.concat(results) | pandas.concat |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
| pd.to_datetime("2020-01-03 00:01:00") | pandas.to_datetime |
# Import packages
import os
import json
from datetime import datetime, timedelta
import requests
import jwt
import pandas as pd
from adobe_aam.helpers.headers import *
from adobe_aam.helpers.simplify import *
class Traits:
## https://experienceleague.adobe.com/docs/audience-manager/user-guide/api-and-sdk-code/rest-apis/aam-api-getting-started.html?lang=en#optional-api-query-parameters
@classmethod
def get_many(cls,
## These are all of the Adobe arguments
page=None,
pageSize=None,
sortBy=None,
descending=None,
search=None,
folderId=None,
permissions=None,
includePermissions=None,
ic=None,
dataSourceId=None,
includeDetails=None,
includeMetrics=None,
## These are all of the custom arguments
condense=None
):
## Traits endpoint
request_url = "https://aam.adobe.io/v1/traits/"
if ic:
request_url += "ic:{0}".format(str(ic))
## Required data
request_data = {"page":page,
"pageSize":pageSize,
"sortBy":sortBy,
"descending":descending,
"search":search,
"folderId":folderId,
"permissions":permissions,
"includePermissions":includePermissions,
"dataSourceId":dataSourceId,
"includeDetails":includeDetails,
"includeMetrics":includeMetrics}
## Make request
response = requests.get(url = request_url,
headers = Headers.createHeaders(),
params = request_data)
## Print error code if get request is unsuccessful
if response.status_code != 200:
print(response.content)
else:
## Make a dataframe out of the response.json object
df = pd.DataFrame(response.json())
## Change time columns from unix time to datetime
df['createTime'] = pd.to_datetime(df['createTime'], unit='ms')
df['updateTime'] = pd.to_datetime(df['updateTime'], unit='ms')
if ic:
# Bug: permissions column gets exploded and not sure why. low priority
df = df.drop(columns=['permissions'])
df = df.drop_duplicates()
## This begins the PDM section for additional functionality
## Simplify: limits columns
if condense:
df = simplify(df)
return df
@classmethod
def get_one(cls,
## These are all of the Adobe arguments
sid,
includeMetrics=None,
## These are all of PDM's custom arguments
condense=None
):
## Traits endpoint for specific trait ID
request_url = "https://aam.adobe.io/v1/traits/{0}".format(str(sid))
## Required data
request_data = {"includeMetrics":includeMetrics}
## Make request
response = requests.get(url = request_url,
headers = Headers.createHeaders(),
params = request_data)
## Print error code if get request is unsuccessful
if response.status_code != 200:
print(response.content)
else:
## Make a dataframe out of the response.json object
df = pd.DataFrame.from_dict(response.json(), orient='index')
df = df.transpose()
## Change time columns from unix time to datetime
df['createTime'] = pd.to_datetime(df['createTime'], unit='ms')
df['updateTime'] = pd.to_datetime(df['updateTime'], unit='ms')
## This begins the PDM section for additional functionality
## Simplify: limits columns
if condense:
df = simplify(df)
return df
@classmethod
def get_limits(cls):
## Traits endpoint for limits
request_url = "https://aam.adobe.io/v1/traits/limits"
## Make request
response = requests.get(url = request_url,
headers = Headers.createHeaders())
## Print error code if get request is unsuccessful
if response.status_code != 200:
print(response.content)
else:
## Uses json_normalize function to make data prettier
json_response = json.loads(response.content.decode('utf-8'))
df = pd.json_normalize(json_response)
df = df.transpose()
return df
@classmethod
def create_from_csv(cls, file_path):
## Traits endpoint for create is old demdex URL
request_url = "https://api.demdex.com/v1/traits/"
## Required columns for API call
reqd_cols = | pd.DataFrame(columns=['traitType', 'name', 'dataSourceId', 'folderId', 'traitRule']) | pandas.DataFrame |
import torch
from torchtext.legacy import data
from torchtext.legacy.data import Field, BucketIterator
import pandas as pd
import os
from .NLPClassificationDataset import NLPClassificationDataset
class SSTDataset(NLPClassificationDataset):
def __init__(self, data_path, seed, batch_size, device, split_ratio=[0.7, 0.3]):
# super(QuoraDataset, self).__init__(data_path, seed, batch_size, device, split_ratio)
self.split_ratio = split_ratio
self.data_path = data_path
self.seed = seed
self.device = device
self.batch_size = batch_size
self.ranges = [0, 0.2, 0.4, 0.6, 0.8, 1.0]
self.labels = ['very negative', 'negative', 'neutral', 'positive', 'very positive']
self.label = [0, 1, 2, 3, 4]
self.seq_data = self.load_data(self.data_path)
def get_labels(self):
return self.labels
def load_data(self, sst_path):
sst_sents = pd.read_csv(os.path.join(sst_path, 'datasetSentences.txt'), delimiter='\t')
sst_phrases = pd.read_csv(os.path.join(sst_path, 'dictionary.txt'), delimiter='|', names=['phrase','phrase_id'])
sst_labels = pd.read_csv(os.path.join(sst_path, 'sentiment_labels.txt'), delimiter='|')
sst_sentences_phrases = pd.merge(sst_sents, sst_phrases, how='inner', left_on=['sentence'], right_on=['phrase'])
sst = pd.merge(sst_sentences_phrases, sst_labels, how='inner', left_on=['phrase_id'], right_on=['phrase ids'])[['sentence','sentiment values']]
sst['labels'] = | pd.cut(sst['sentiment values'], bins=self.ranges, labels=self.labels, include_lowest=True) | pandas.cut |
from dataclasses import dataclass
from functools import cache
from typing import Any, Dict, List, cast
import pandas
import pandas as pd
import requests
from tqdm.notebook import tqdm
URL = "https://api.census.gov/data.json"
# documentation: https://www2.census.gov/programs-surveys/acs/tech_docs/subject_definitions/
@dataclass(frozen=True)
class _DatasetsRes:
year: int
dataset: List[str]
is_aggregate: bool
title: str
description: str
@classmethod
def from_json(cls, jsonRes: Dict[str, Any]):
return cls(
cast(int, jsonRes.get("c_vintage")),
cast(List[str], jsonRes.get("c_dataset")),
cast(bool, jsonRes.get("c_isAggregate")),
cast(str, jsonRes.get("title")),
cast(str, jsonRes.get("description")),
)
def list_available_datasets() -> pd.DataFrame:
return __list_available_datasets()
@cache
def __list_available_datasets() -> pd.DataFrame:
res: Dict[str, Any] = requests.get(URL).json() # type: ignore
dataset_dicts: List[Dict[str, str]] = []
available_datasets: List[_DatasetsRes] = [
_DatasetsRes.from_json(datasetJson) for datasetJson in res["dataset"]
]
for dataset in cast(List[_DatasetsRes], tqdm(available_datasets)):
# these won't play nice with the tool
if not dataset.is_aggregate:
continue
dataset_type = ""
survey_type = ""
if len(dataset.dataset) > 0:
dataset_type = dataset.dataset[0]
if len(dataset.dataset) > 1:
survey_type = "/".join(dataset.dataset[1:])
dataset_dicts.append(
cast(
Dict[str, str],
dict(
year=dataset.year,
name=dataset.title,
description=dataset.description,
dataset=dataset_type,
survey=survey_type,
),
)
)
| pandas.set_option("display.max_colwidth", None) | pandas.set_option |
#!/usr/bin/env python
import pandas as pd
from Bio.SeqIO import parse
from pathlib import Path
# file io
def fasta2bed(sm):
with open(sm.input[0], 'r') as fhin, open(sm.output[0], 'w') as fhout:
for record in parse(fhin, "fasta"):
fhout.write("{}\t{}\t{}\n".format(record.id, 0, len(record)))
# statistics
def store_lengths(f, minlen=False):
"""
Reads lengths of contigs from fasta
:param f: fasta file
:param minlen: minimum length to store
:return: pandas DataFrame of lengths
"""
r = {}
for record in parse(f, "fasta"):
if minlen:
if len(record.seq) < minlen:
continue
r[record.id] = len(record.seq)
df = pd.DataFrame(r, index=["length"]).T
return df
def size_distribute(df, lengths=None):
"""
Calculates the distribution of an assembly in length bins
For each <l> in <lengths> calculate for contigs >= <l>:
n = the number of contigs
s = the total length in bp
p = the fraction of lengths / total assembly size
:param df: pandas DataFrame of lengths
:param lengths: intervals at which to calculate stats
:return: pandas DataFrame
"""
if lengths is None:
lengths = [0, 100, 250, 500, 1000, 2500, 5000, 10000, 15000, 20000,
25000, 30000, 35000, 40000, 45000, 50000, 75000, 100000,
125000, 150000, 200000, 250000, 500000]
size_dist = {}
for i, l in enumerate(lengths):
if len(df.loc[df.length >= l]) == 0:
break
n = len(df.loc[df.length >= l])
s = int(df.loc[df.length >= l].sum())
p = int(df.loc[df.length >= l].sum()) / float(df.sum()) * 100
size_dist[i] = {"min_length": l, "num_contigs": n, "total_length": s,
"%": p}
size_dist_df = pd.DataFrame(size_dist).T
size_dist_df = size_dist_df[
["min_length", "num_contigs", "total_length", "%"]]
return size_dist_df
def calculate_n_stats(df):
"""
Calculates n50 and n90 statistics from a list of lengths
:param df: pandas DataFrame of contig lengths
:return:
"""
df.sort_values("length", inplace=True, ascending=True)
size = int(df.sum())
N50_length = N90_length = 0
cumulative = 0
for contig in df.index:
l = df.loc[contig, "length"]
cumulative += l
if float(cumulative) >= 0.5 * size and not N50_length:
N50_length = l
elif float(cumulative) >= 0.1 * size and not N90_length:
N90_length = l
return N50_length, N90_length
def calculate_length_stats(df):
"""
Calculates length statistics from a dataframe
:param df: pandas DataFrame with contig lengths
:return:
"""
contigs = len(df)
total_size = int(df.sum())
min_length = int(df["length"].min())
max_length = int(df["length"].max())
avg_length = float(df["length"].mean())
median_length = float(df["length"].median())
return contigs, total_size, min_length, max_length, avg_length, median_length
def generate_stat_df(contig_lengths):
"""
Generates statistics from a dataframe of contig lengths
:param contig_lengths: pandas DataFrame
:return:
"""
index = ["contigs", "total_size_bp", "min_length", "max_length",
"avg_length", "median_length", "N50_length", "N90_length"]
stat_items = calculate_length_stats(contig_lengths)
n50_length, n90_length = calculate_n_stats(contig_lengths)
stat_df = pd.DataFrame([stat_items[0], stat_items[1], stat_items[2],
stat_items[3], stat_items[4], stat_items[5],
n50_length, n90_length], index=index).T
return stat_df
def stats(sm):
"""
Reads a list of assembly fasta files and generates statistics
:param sm: snakemake object
:return:
"""
stat_result = pd.DataFrame()
sizedist_result = pd.DataFrame()
for f in sm.input.fa:
p = Path(f)
name = p.parent.name
contig_lengths = store_lengths(f)
stat_df = generate_stat_df(contig_lengths)
size_dist = size_distribute(contig_lengths)
stat_df["assembly"] = [name]*len(stat_df)
size_dist["assembly"] = [name]*len(size_dist)
stat_result = | pd.concat([stat_result, stat_df]) | pandas.concat |
# RHR Online Anomaly Detection & Alert Monitoring
######################################################
# Author: <NAME> #
# Email: <EMAIL> #
# Location: Dept.of Genetics, Stanford University #
# Date: Oct 29 2020 #
######################################################
# uses raw heart rate and steps data (this stpes data doesn't have zeroes and need to innfer from hr datetime stamp)
## simple command
# python rhrad_online_alerts.py --heart_rate hr.csv --steps steps.csv
## full command
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv --steps pbb_fitbit_oldProtocol_steps.csv --myphd_id pbb_RHR_online --figure1 pbb_RHR_online_anomalies.pdf --anomalies pbb_RHR_online_anomalies.csv --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 --outliers_fraction 0.1 --random_seed 10 --baseline_window 744 --sliding_window 1 --alerts pbb_RHR_online_alerts.csv --figure2 pbb_RHR_online_alerts.pdf
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv \
# --steps pbb_fitbit_oldProtocol_steps.csv \
# --myphd_id pbb_RHR_online \
# --figure1 pbb_RHR_online_anomalies.pdf \
# --anomalies pbb_RHR_online_anomalies.csv \
# --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 \
# --outliers_fraction 0.1 \
# --random_seed 10 \
# --baseline_window 744 --sliding_window 1
# --alerts pbb_RHR_online_alerts.csv \
# --figure2 pbb_RHR_online_alerts.pdf
import warnings
warnings.filterwarnings('ignore')
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%matplotlib inline
import seaborn as sns
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
####################################
parser = argparse.ArgumentParser(description='Find anomalies in wearables time-series data.')
parser.add_argument('--heart_rate', metavar='', help ='raw heart rate count with a header = heartrate')
parser.add_argument('--steps',metavar='', help ='raw steps count with a header = steps')
parser.add_argument('--myphd_id',metavar='', default = 'myphd_id', help ='user myphd_id')
parser.add_argument('--anomalies', metavar='', default = 'myphd_id_anomalies.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure1', metavar='', default = 'myphd_id_anomalies.pdf', help='save predicted anomalies as a PDF file')
parser.add_argument('--symptom_date', metavar='', default = 'NaN', help = 'symptom date with y-m-d format')
parser.add_argument('--diagnosis_date', metavar='', default = 'NaN', help='diagnosis date with y-m-d format')
parser.add_argument('--outliers_fraction', metavar='', type=float, default=0.1, help='fraction of outliers or anomalies')
parser.add_argument('--random_seed', metavar='', type=int, default=10, help='random seed')
parser.add_argument('--baseline_window', metavar='',type=int, default=744, help='baseline window is used for training (in hours)')
parser.add_argument('--sliding_window', metavar='',type=int, default=1, help='sliding window is used to slide the testing process each hour')
parser.add_argument('--alerts', metavar='', default = 'myphd_id_alerts.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure2', metavar='', default = 'myphd_id_alerts.pdf', help='save predicted anomalies as a PDF file')
args = parser.parse_args()
# as arguments
fitbit_oldProtocol_hr = args.heart_rate
fitbit_oldProtocol_steps = args.steps
myphd_id = args.myphd_id
myphd_id_anomalies = args.anomalies
myphd_id_figure1 = args.figure1
symptom_date = args.symptom_date
diagnosis_date = args.diagnosis_date
RANDOM_SEED = args.random_seed
outliers_fraction = args.outliers_fraction
baseline_window = args.baseline_window
sliding_window = args.sliding_window
myphd_id_alerts = args.alerts
myphd_id_figure2 = args.figure2
####################################
class RHRAD_online:
# Infer resting heart rate ------------------------------------------------------
def resting_heart_rate(self, heartrate, steps):
"""
This function uses heart rate and steps data to infer resting heart rate.
It filters the heart rate with steps that are zero and also 12 minutes ahead.
"""
# heart rate data
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = pd.to_datetime(df_hr.index)
# steps data
df_steps = pd.read_csv(fitbit_oldProtocol_steps)
df_steps = df_steps.set_index('datetime')
df_steps.index.name = None
df_steps.index = pd.to_datetime(df_steps.index)
# merge dataframes
#df_hr = df_hr.resample('1min').mean()
#df_steps = df_steps.resample('1min').mean()
# added "outer" paramter for merge function to adjust the script to the new steps format
#df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True)
df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True, how="outer")
df1 = df1[pd.isnull(df1).any(axis=1)].fillna(0)
df1 = df1.rename(columns={"value_x": "heartrate", "value_y": "steps"})
df1 = df1.resample('1min').mean()
print(myphd_id)
print("Data size (in miutes) before removing missing data")
print(df1.shape)
ax = df1.plot(figsize=(20,4), title=myphd_id)
ax.figure.savefig(myphd_id+'_data.png')
#print(df1)
df1 = df1.dropna(how='any')
df1 = df1.loc[df1['heartrate']!=0]
print("Data size (in miutes) after removing missing data")
print(df1.shape)
#print(df1)
# define RHR as the HR measurements recorded when there were less than two steps taken during a rolling time window of the preceding 12 minutes (including the current minute)
df1['steps'] = df1['steps'].apply(np.int64)
df1['steps_window_12'] = df1['steps'].rolling(12).sum()
df1 = df1.loc[(df1['steps_window_12'] == 0 )]
print(df1['heartrate'].describe())
print(df1['steps_window_12'].describe())
# impute missing data
#df1 = df1.resample('1min').mean()
#df1 = df1.ffill()
print("No.of timesteps for RHR (in minutes)")
print(df1.shape)
return df1
# Pre-processing ------------------------------------------------------
def pre_processing(self, resting_heart_rate):
"""
This function takes resting heart rate data and applies moving averages to smooth the data and
downsamples to one hour by taking the avegare values
"""
# smooth data
df_nonas = df1.dropna()
df1_rom = df_nonas.rolling(400).mean()
# resample
df1_resmp = df1_rom.resample('1H').mean()
df2 = df1_resmp.drop(['steps'], axis=1)
df2 = df2.dropna()
print("No.of timesteps for RHR (in hours)")
print(df2.shape)
return df2
# Seasonality correction ------------------------------------------------------
def seasonality_correction(self, resting_heart_rate, steps):
"""
This function takes output pre-processing and applies seasonality correction
"""
sdHR_decomposition = seasonal_decompose(sdHR, model='additive', freq=1)
sdSteps_decomposition = seasonal_decompose(sdSteps, model='additive', freq=1)
sdHR_decomp = pd.DataFrame(sdHR_decomposition.resid + sdHR_decomposition.trend)
sdHR_decomp.rename(columns={sdHR_decomp.columns[0]:'heartrate'}, inplace=True)
sdSteps_decomp = pd.DataFrame(sdSteps_decomposition.resid + sdSteps_decomposition.trend)
sdSteps_decomp.rename(columns={sdSteps_decomp.columns[0]:'steps_window_12'}, inplace=True)
frames = [sdHR_decomp, sdSteps_decomp]
data = pd.concat(frames, axis=1)
#print(data)
#print(data.shape)
return data
# Train model and predict anomalies ------------------------------------------------------
def online_anomaly_detection(self, data_seasnCorec, baseline_window, sliding_window):
"""
# split the data, standardize the data inside a sliding window
# parameters - 1 month baseline window and 1 hour sliding window
# fit the model and predict the test set
"""
for i in range(baseline_window, len(data_seasnCorec)):
data_train_w = data_seasnCorec[i-baseline_window:i]
# train data normalization ------------------------------------------------------
data_train_w += 0.1
standardizer = StandardScaler().fit(data_train_w.values)
data_train_scaled = standardizer.transform(data_train_w.values)
data_train_scaled_features = pd.DataFrame(data_train_scaled, index=data_train_w.index, columns=data_train_w.columns)
data = pd.DataFrame(data_train_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_train_w = data_1
data_train.append(data_train_w)
data_test_w = data_seasnCorec[i:i+sliding_window]
# test data normalization ------------------------------------------------------
data_test_w += 0.1
data_test_scaled = standardizer.transform(data_test_w.values)
data_scaled_features = pd.DataFrame(data_test_scaled, index=data_test_w.index, columns=data_test_w.columns)
data = pd.DataFrame(data_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_test_w = data_1
data_test.append(data_test_w)
# fit the model ------------------------------------------------------
model = EllipticEnvelope(random_state=RANDOM_SEED,
support_fraction=0.7,
contamination=outliers_fraction).fit(data_train_w)
# predict the test set
preds = model.predict(data_test_w)
#preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
dfs.append(preds)
# Merge predictions ------------------------------------------------------
def merge_test_results(self, data_test):
"""
Merge predictions
"""
# concat all test data (from sliding window) with their datetime index and others
data_test = pd.concat(data_test)
# merge predicted anomalies from test data with their corresponding index and other features
preds = pd.DataFrame(dfs)
preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
data_test_df = pd.DataFrame(data_test)
data_test_df = data_test_df.reset_index()
data_test_preds = data_test_df.join(preds)
return data_test_preds
# Positive Anomalies -----------------------------------------------------------------
"""
Selects anomalies in positive direction and saves in a CSV file
"""
def positive_anomalies(self, data):
a = data.loc[data['anomaly'] == -1, ('index', 'heartrate')]
positive_anomalies = a[(a['heartrate']> 0)]
# Anomaly results
positive_anomalies['Anomalies'] = myphd_id
positive_anomalies.columns = ['datetime', 'std.rhr', 'name']
positive_anomalies.to_csv(myphd_id_anomalies, header=True)
return positive_anomalies
# Alerts ------------------------------------------------------
def create_alerts(self, anomalies, data, fitbit_oldProtocol_hr):
"""
# creates alerts at every 24 hours and send at 9PM.
# visualise alerts
"""
# function to assign different alert names
# summarize hourly alerts
def alert_types(alert):
if alert['alerts'] >=6:
return 'RED'
elif alert['alerts'] >=1:
return 'YELLOW'
else:
return 'GREEN'
# summarize hourly alerts
#anomalies.columns = ['datetime', 'std.rhr', 'name']
anomalies = anomalies[['datetime']]
anomalies['datetime'] = pd.to_datetime(anomalies['datetime'], errors='coerce')
anomalies['alerts'] = 1
anomalies = anomalies.set_index('datetime')
anomalies = anomalies[~anomalies.index.duplicated(keep='first')]
anomalies = anomalies.sort_index()
alerts = anomalies.groupby(pd.Grouper(freq = '24H', base=21)).cumsum()
# apply alert_types function
alerts['alert_type'] = alerts.apply(alert_types, axis=1)
alerts_reset = alerts.reset_index()
#print(alerts_reset)
# save alerts
#alerts.to_csv(myphd_id_alerts, mode='a', header=True)
# summarize hourly alerts to daily alerts
daily_alerts = alerts_reset.resample('24H', on='datetime', base=21, label='right').count()
daily_alerts = daily_alerts.drop(['datetime'], axis=1)
#print(daily_alerts)
# function to assign different alert names
def alert_types(alert):
if alert['alert_type'] >=6:
return 'RED'
elif alert['alert_type'] >=1:
return 'YELLOW'
else:
return 'GREEN'
# apply alert_types function
daily_alerts['alert_type'] = daily_alerts.apply(alert_types, axis=1)
# merge missing 'datetime' with 'alerts' as zero aka GREEN
data1 = data[['index']]
data1['alert_type'] = 0
data1 = data1.rename(columns={"index": "datetime"})
data1['datetime'] = pd.to_datetime(data1['datetime'], errors='coerce')
data1 = data1.resample('24H', on='datetime', base=21, label='right').count()
data1 = data1.drop(data1.columns[[0,1]], axis=1)
data1 = data1.reset_index()
data1['alert_type'] = 0
data3 = pd.merge(data1, daily_alerts, on='datetime', how='outer')
data4 = data3[['datetime', 'alert_type_y']]
data4 = data4.rename(columns={ "alert_type_y": "alert_type"})
daily_alerts = data4.fillna("GREEN")
daily_alerts = daily_alerts.set_index('datetime')
daily_alerts = daily_alerts.sort_index()
# merge alerts with main data and pass 'NA' when there is a missing day instead of 'GREEN'
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr['datetime'] = | pd.to_datetime(df_hr['datetime'], errors='coerce') | pandas.to_datetime |
import pandas as pd
import numpy as np
import re
import datetime as dt
import math
import geopandas as gpd
import h3 # h3 bins from uber
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, minmax_scale, MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import NearestCentroid
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import scipy.cluster.hierarchy as sch
import holidays
from fastai.vision.all import * # Needs latest version, and sometimes a restart of the runtime after the pip installs
from sklearn_extra.cluster import KMedoids
import json
from geopy.distance import geodesic
def clean_weather_data(df_weather):
'''
Fills the missing information by looking at the previous and the following existing values
and then incrementaly distributing the difference over the missing days.
This guarantees a smooth development of the weather data over time.
'''
missing = df_weather[pd.isnull(df_weather).any(1)].index
if len(missing) > 0:
for col in df_weather.columns[1:]:
before = df_weather.loc[missing[0]-1, col]
after = df_weather.loc[missing[-1]+1, col]
diff = after - before
for i in range(len(missing)):
df_weather.loc[missing[i], col] = before+diff/(len(missing)+1)*(i+1)
return df_weather
def add_weather_change(df_weather):
df_weather["change_water_atmosphere"] = 0
df_weather["change_temperature"] = 0
for row in range(df_weather.shape[0]):
if row == 0:
df_weather.loc[row, "change_water_atmosphere"] = 0
df_weather.loc[row, "change_temperature"] = 0
else:
df_weather.loc[row, "change_water_atmosphere"] = df_weather.loc[row, "precipitable_water_entire_atmosphere"] - df_weather.loc[row-1, "precipitable_water_entire_atmosphere"]
df_weather.loc[row, "change_temperature"] = df_weather.loc[row, "temperature_2m_above_ground"] - df_weather.loc[row-1, "temperature_2m_above_ground"]
return df_weather
def join_accident_to_weather(df_accident, df_weather):
'''
Left-joins the accident data to the weather data, resulting in a dataframe containing the weather information
for every day as well as the aggregated accidents.
'''
# Count accidents per day and leftjoin to weather dataframe
df_accident["date"] = df_accident["datetime"].apply(lambda x: x.date())
if type(df_weather.loc[0, "Date"]) is not dt.date:
df_weather["Date"] = df_weather["Date"].apply(lambda x: x.date())
accident_count = df_accident.groupby("date").count()["uid"].reset_index()
df_combined = df_weather.merge(accident_count[["date", "uid"]], left_on="Date", right_on="date", how='left')
# Fill NaNs with zeros
df_combined.fillna(value=0, inplace=True)
# Drop duplicate Date column
df_combined.drop("date", axis=1, inplace=True)
# Rename column
df_combined.rename(columns={"Date":"date", "uid":"accidents"}, inplace=True)
# Adding column with 1 for sundays and holidays, 0 for working days
df_combined["sun_holiday"] = df_combined["date"].apply(lambda x: 1 if (x.weekday() == 6) or (x in holidays.Kenya()) else 0)
# Change type to integer
df_combined["accidents"] = df_combined["accidents"].astype("int")
return df_combined
def scale_pca_weather(df_combined):
'''
Scaling and analysing the principal components of the weather data.
'''
# Scaling
mm_scaler = MinMaxScaler()
X_mm = df_combined[["precipitable_water_entire_atmosphere", "relative_humidity_2m_above_ground",
"specific_humidity_2m_above_ground", "temperature_2m_above_ground"]]
X_mm_scaled = mm_scaler.fit_transform(X_mm)
std_scaler = StandardScaler()
X_std = df_combined[["u_component_of_wind_10m_above_ground", "v_component_of_wind_10m_above_ground",
"change_water_atmosphere", "change_temperature"]]
X_std_scaled = std_scaler.fit_transform(X_std)
X_scaled = pd.DataFrame(np.concatenate((X_mm_scaled, X_std_scaled), axis=1), columns=["precipitable_water_entire_atmosphere", "relative_humidity_2m_above_ground",
"specific_humidity_2m_above_ground", "temperature_2m_above_ground", "u_component_of_wind_10m_above_ground", "v_component_of_wind_10m_above_ground",
"change_water_atmosphere", "change_temperature"])
# Principal componant analysis (PCA)
pca = PCA(n_components=0.99)
pca_decomposition = pca.fit(X_scaled)
X_pca = pca_decomposition.transform(X_scaled)
df_combined_pca = pd.DataFrame(X_pca)
df_combined_pca = df_combined_pca.join(df_combined[["date", "accidents", "sun_holiday"]])
return df_combined_pca
def split_combined(df_combined_pca, predict_period='2019_h2'):
if predict_period == '2019_h1':
X_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 1, 1)][[0, 1, 2, 3, 4, "sun_holiday"]]
y_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 1, 1)]["accidents"]
X_test = df_combined_pca[(df_combined_pca["date"] >= dt.date(2019, 1, 1)) & (df_combined_pca["date"] < dt.date(2019, 7, 1))][[0, 1, 2, 3, 4, "sun_holiday"]]
elif predict_period == '2019_h2':
X_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 7, 1)][[0, 1, 2, 3, 4, "sun_holiday"]]
y_train = df_combined_pca[df_combined_pca["date"] < dt.date(2019, 7, 1)]["accidents"]
X_test = df_combined_pca[(df_combined_pca["date"] >= dt.date(2019, 7, 1)) & (df_combined_pca["date"] < dt.date(2020, 1, 1))][[0, 1, 2, 3, 4, "sun_holiday"]]
return X_train, X_test, y_train
def predict_poly(X_train, X_test, y_train):
poly = PolynomialFeatures(degree=4)
X_train_poly = poly.fit_transform(X_train.drop("sun_holiday", axis=1))
lin_poly = LinearRegression()
lin_poly.fit(X_train_poly, y_train)
X_test_poly = poly.transform(X_test.drop("sun_holiday", axis=1))
return lin_poly.predict(X_test_poly)
def predict_accidents_on_weather(df_accident, df_weather, predict_period='2019_h1'):
'''
Takes the raw data and returns the number of predicted road traffic accidents for every day in the predict period:
First half 2019 : (predict_period='2019_h1')
Second half of 2019 : (predict_period='2019_h2')
'''
df_weather = clean_weather_data(df_weather)
df_weather = add_weather_change(df_weather)
df_combined = join_accident_to_weather(df_accident, df_weather)
df_combined_pca = scale_pca_weather(df_combined)
X_train, X_test, y_train = split_combined(df_combined_pca, predict_period=predict_period)
y_pred = predict_poly(X_train, X_test, y_train)
y_pred = [0 if i < 0 else i for i in y_pred]
return y_pred
def create_crash_df(train_file = '../Inputs/Train.csv'):
'''
loads crash data from input folder into dataframe
'''
crash_df = pd.read_csv(train_file, parse_dates=['datetime'])
return crash_df
def create_temporal_features(df, date_column='datetime'):
'''
Add the set of temporal features the the df based on the datetime column. Returns the dataframe.
'''
dict_windows = {1: "00-03", 2: "03-06", 3: "06-09", 4: "09-12", 5: "12-15",
6: "15-18", 7: "18-21", 8: "21-24"}
dict_months = {1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun",
7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec"}
rainy_season = ["Mar", "Apr", "May", "Oct", "Nov", "Dec"]
df["time"] = df[date_column].apply(lambda x: x.time())
df["time_window"] = df[date_column].apply(lambda x: math.floor(x.hour / 3) + 1)
df["time_window_str"] = df["time_window"].apply(lambda x: dict_windows.get(x))
df["day"] = df[date_column].apply(lambda x: x.day)
df["weekday"] = df[date_column].apply(lambda x: x.weekday())
df["month"] = df[date_column].apply(lambda x: dict_months.get(x.month))
df["half_year"] = df[date_column].apply(lambda x: 1 if x.month<7 else 2)
df["rainy_season"] = df["month"].apply(lambda x: 1 if (x in rainy_season) else 0)
df["year"] = df[date_column].apply(lambda x: x.year)
df["date_trunc"] = df[date_column].apply(lambda x: x.date()) #this does something strange that breaks the code if higher
df["holiday"] = df["date_trunc"].apply(lambda x: 1 if (x in holidays.Kenya()) else 0)
df["weekday"] = df["date_trunc"].apply(lambda x: 7 if (x in holidays.Kenya()) else x.weekday())
return df
def drop_temporal(df):
'''
helper function to remove all the granular temporal columns once they have been used for generating other columns for joining.
'''
df = df.drop(["day", "time_window", "time_window_str", "time_window_str", "month", "year", "weekday", "rainy_season", "date_trunc", "time", "half_year", "holiday"], axis=1)
return df
def split_accident_df(data, strategy, test_size=0.3, random_state=42):
'''
Splits the data set into a train and a test set.
strategy:
random = splits off random indices, using test_size and random_state parameters
year_2019 = splits the days of 2019 off into a test set
percentage_month = splits off the last days of every month to the test set according to the test_size
2nd_half_2018 = oversamples the months from July to December 2018 by about 33%
'''
if strategy == "random":
data = data.sample(frac=1, random_state=random_state).reset_index().drop("index", axis=1)
split_at = round(data.shape[0] * test_size)
data_train = data.iloc[split_at:, :]
data_test = data.iloc[:split_at, :]
elif strategy == "year_2019":
data_train = data[data["datetime"] < "2019-01-01"]
data_test = data[data["datetime"] >= "2019-01-01"]
elif strategy == "percentage_month":
split_at = round(30 * (1-test_size))
data_train = data.loc[data["day"] <= split_at]
data_test = data.loc[data["day"] > split_at]
elif strategy == "2nd_half_2018":
train_samples = round(data.shape[0] * (1-test_size))
test_samples = round(data.shape[0] * test_size)
data_train = data.sample(n=train_samples, weights="half_year", random_state=random_state)
data_test = data.sample(n=test_samples, weights="half_year", random_state=random_state)
return data_train, data_test
def outlier_removal(crash_df, filter=0.00):
if filter == 'hex_bin':
crash_df = assign_hex_bin(crash_df)
hex_bin_filter = ['867a45067ffffff', '867a45077ffffff', '867a4511fffffff',
'867a4512fffffff', '867a45147ffffff', '867a4515fffffff',
'867a45177ffffff', '867a45817ffffff', '867a4584fffffff',
'867a4585fffffff', '867a458dfffffff', '867a458f7ffffff',
'867a45a8fffffff', '867a45b0fffffff', '867a45b17ffffff',
'867a45b67ffffff', '867a45b77ffffff', '867a6141fffffff',
'867a614d7ffffff', '867a616b7ffffff', '867a6304fffffff',
'867a632a7ffffff', '867a63307ffffff', '867a6331fffffff',
'867a6360fffffff', '867a63667ffffff', '867a6396fffffff',
'867a656c7ffffff', '867a65797ffffff', '867a6e18fffffff',
'867a6e1b7ffffff', '867a6e4c7ffffff', '867a6e517ffffff',
'867a6e59fffffff', '867a6e5a7ffffff', '867a6e5b7ffffff',
'867a6e657ffffff', '867a6e737ffffff', '867a6e797ffffff',
'867a6e79fffffff', '867a6e7b7ffffff', '867a6ecf7ffffff',
'867a6ed47ffffff', '867a6ed97ffffff', '867a6eda7ffffff' ]
crash_df = crash_df.loc[~crash_df['h3_zone_6'].isin(hex_bin_filter)]
else:
'''filters top and bottom quantiles of data based on filter input'''
crash_df = crash_df.loc[crash_df['latitude'] < crash_df['latitude'].quantile(1-filter)]
crash_df = crash_df.loc[crash_df['latitude'] > crash_df['latitude'].quantile(filter)]
crash_df = crash_df.loc[crash_df['longitude'] < crash_df['longitude'].quantile(1-filter)]
crash_df = crash_df.loc[crash_df['longitude'] > crash_df['longitude'].quantile(filter)]
return crash_df
def assign_hex_bin(df,lat_column="latitude",lon_column="longitude", hexbin_resolution=6):
'''
Takes lat,lon and creates column with h3 bin name for three levels of granualirity.
'''
df["h3_zone_{}".format(hexbin_resolution)] = df.apply(lambda x: h3.geo_to_h3(x[lat_column], x[lon_column], hexbin_resolution),axis=1)
return df
def plot_centroids(crash_data_df, centroids, cluster='cluster', labels = 'b'):
'''
plots the crash data points from crash_data_df and overlays the ambulance location from centroids.
Can be used in a loop by giving 'cluster' value as a parameter to label the chart with the cluster name.
'''
fig, axs = plt.subplots(figsize=(8, 5))
plt.scatter(x = crash_data_df['longitude'], y=crash_data_df['latitude'], s=1, label='Crash locations', c=labels )
plt.scatter(x = centroids[:,1], y=centroids[:,0], marker="x",
color='r',label='Ambulances locations',s=100)
axs.set_title('Scatter plot : Ambulaces locations vs Crash locations :'+cluster)
plt.xlabel("latitude")
plt.ylabel("longitude")
plt.legend()
plt.show()
def plot_dendrogram(df):
'''Use Dendrogram to determine an optimal number of clusters'''
plt.figure(figsize=(45,18))
plt.title('Androgram')
plt.xlabel('time_buckets_days')
plt.ylabel('Euclidean distances')
dendrogram = sch.dendrogram(sch.linkage(df, method = 'ward'))
plt.show()
def calculate_TW_cluster(crash_df, method='MeanShift', verbose=0):
'''
Takes crash dataframe with temporal features added as input
Function to perform clustering of time windows and assign labels back to crash dataframe.
Output is dataframe with additional column for labels
If verbosity is increased, information about the clusters to printed.
'''
group_stats = crash_df.groupby(['time_window_str', 'weekday'])
group_stats = group_stats.agg({'latitude': [np.mean, np.std],'longitude': [np.mean, np.std, 'count']})
# flatten out groupby object and name columns again
group_stats = group_stats.reset_index()
group_stats.columns = group_stats.columns.get_level_values(0)
group_stats.columns.values[[2,3,4,5,6]] = ['latitude_mean', 'latitude_std',
'longitude_mean', 'longitude_std', 'RTA_count']
X = group_stats.loc[:,['RTA_count']]#, 'latitude_mean', 'latitude_std','longitude_mean', 'longitude_std']]
scaler = StandardScaler()
scale_columns = ['latitude_mean', 'latitude_std','longitude_mean', 'longitude_std']
#X[scale_columns] = scaler.fit_transform(X[scale_columns])
if verbose > 5:
X1 = X.copy()
X1['RTA_count'] = minmax_scale(X1['RTA_count'])
plot_dendrogram(X1)
if method == 'MeanShift':
#X['RTA_count'] = minmax_scale(X['RTA_count'])
ms_model = MeanShift().fit(X)
labels = ms_model.labels_
elif method == 'GMM':
X['RTA_count'] = minmax_scale(X['RTA_count'])
gmm = GaussianMixture(n_components=4, verbose=verbose, random_state=42)
gmm.fit(X)
labels = gmm.predict(X)
else:
display('Select method "MeanShift" or "GMM"')
#return 'error'
labels = pd.DataFrame(labels,columns=['cluster'])
clustered_time_buckets = pd.concat([group_stats,labels], axis=1)
if verbose > 0:
display(clustered_time_buckets.groupby('cluster').agg({'RTA_count': ['count', np.sum]}))
if verbose > 1:
plot_TW_cluster(clustered_time_buckets)
crash_df = crash_df.merge(clustered_time_buckets[['time_window_str', 'weekday','cluster']],
how='left', on=['time_window_str', 'weekday'])
return crash_df
def plot_TW_cluster(clustered_time_buckets):
'''
Displays stripplot to show how different times of the week are assigned to TW clusters.
'''
tb_clusters = sns.FacetGrid(clustered_time_buckets,hue='cluster', height=5)
tb_clusters.map(sns.stripplot,'weekday', 'time_window_str', s=25, order = ['00-03', '03-06', '06-09', '09-12',
'12-15', '15-18', '18-21', '21-24'])
def assign_TW_cluster(weekday, time_window, holiday=0, strategy='baseline'):
'''
Can be used in a lambda function to return the time window cluster for a given day and time window.
e.g. crash_df["cluster"] = crash_df.apply(lambda x: return_TW_cluster(x.weekday, x.time_window_str) ,axis=1)
This is called by the function: create_cluster_feature.
'''
# baseline returns a single value for all time windows so there will only be a single placement set
if strategy == 'baseline':
return 'baseline'
# mean_shift_modified uses the results of the mean shift clustering
# and applies human approach to create 3 simple clusters
if strategy == 'mean_shift_modified':
if weekday == 7:
return 'off_peak'
elif weekday == 6:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'peak'
# saturday_2 adds an additional cluster for middle of the day saturday
elif strategy == 'saturday_2':
if weekday == 7:
return 'off_peak'
elif weekday == 6:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# holiday_6 builds on saturday_2 and adds a new 'day' to the week for holidays
# and a separate cluster for sundays. Total of 6 clusters
elif strategy == 'holiday_6':
if weekday == 7:
return 'holiday'
elif weekday == 6:
return 'sunday'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# has holidays but uses off peak for holidays and sundays
elif strategy == 'holiday_simple':
if weekday == 7:
return 'off_peak_day'
elif weekday == 6:
return 'off_peak_day'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# has holidays but uses off peak for holidays and sundays
elif strategy == 'off_peak_split':
if weekday == 7:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'sunday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 6:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'sunday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'middle'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'saturday_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'saturday_busy'
# Weekend_day is an attempt to improve based on the loss scores of the model
elif strategy == 'weekend_day':
if weekday == 7:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'weekend_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 6:
if time_window in ["06-09", "09-12", "12-15", "15-18", "18-21"]:
return 'weekend_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday in [0,1,2,3,4]:
if time_window in ["06-09"]:
return 'peak'
elif time_window in ["09-12", "12-15", "15-18", "18-21"]:
return 'busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif weekday == 5:
if time_window in ["06-09", "12-15", "15-18", "18-21"]:
return 'weekend_busy'
elif time_window in ["00-03", "03-06", "21-24"]:
return 'off_peak'
elif time_window in ["09-12"]:
return 'weekend_busy'
# no_cluster returns a individual cluster name for each weekday, time window and holiday combination
elif strategy == 'no_cluster':
return (str(weekday)+str(time_window)+str(holiday))
elif strategy == 'time_window':
return time_window
def create_cluster_feature(crash_df, strategy='baseline', verbose=0):
'''
Function takes crash df and creates new column with tw cluster labels.
If verbose is increased, the time window clusters will be visualised.
'''
crash_df['cluster'] = crash_df.apply(lambda x: assign_TW_cluster(weekday=x.weekday, time_window=x.time_window_str, strategy=strategy) ,axis=1)
if verbose > 0:
print(f'{crash_df.cluster.nunique()} clusters created')
if verbose > 1:
tb_clusters = sns.FacetGrid(crash_df,hue='cluster', height=5)
tb_clusters.map(sns.stripplot,'weekday', 'time_window_str', s=20,
order = ['00-03', '03-06', '06-09', '09-12',
'12-15', '15-18', '18-21', '21-24'],
label = 'Time Window Clusters')
return crash_df
def create_cluster_centroids(crash_df_with_cluster, test_df, verbose=0, method='k_means', lr=3e-2, n_epochs=400, batch_size=50):
if method == 'k_means':
centroids_dict = create_k_means_centroids(crash_df_with_cluster, verbose=verbose)
elif method == 'agglomerative':
centroids_dict = create_AgglomerativeClustering_centroids(crash_df_with_cluster, verbose=verbose)
elif method == 'gradient_descent':
centroids_dict = create_gradient_descent_centroids(crash_df_with_cluster, test_df, verbose=verbose,
lr=lr, n_epochs=n_epochs, batch_size=batch_size)
elif method == 'k_medoids':
centroids_dict = create_k_medoids_centroids(crash_df_with_cluster, verbose=verbose)
elif method == 'baseline':
centroids_dict = create_baseline_centroids(crash_df_with_cluster, verbose=verbose)
if verbose > 0:
print(f'{len(centroids_dict)} placement sets created')
return centroids_dict
def create_baseline_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using star grid for placement')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
lat_centroid = list(data_slice.latitude.quantile(q=[1/5,2/5,3/5,4/5]))
lon_centroid = list(data_slice.longitude.quantile(q=[1/4,2/4,3/4]))
centroids=[(lat_centroid[1],lon_centroid[0]),(lat_centroid[2],lon_centroid[0]),
(lat_centroid[0],lon_centroid[1]),(lat_centroid[3],lon_centroid[1]),
(lat_centroid[1],lon_centroid[2]),(lat_centroid[2],lon_centroid[2])]
centroids_dict[i] = np.array(centroids).flatten()
if verbose > 5:
print(centroids)
return centroids_dict
def create_k_means_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using k-means clustering')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
kmeans = KMeans(n_clusters=6, verbose=0, tol=1e-5, max_iter=500, n_init=20 ,random_state=42)
kmeans.fit(data_slice[['latitude','longitude']])
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
centroids_dict[i] = centroids.flatten()
if verbose > 2:
plot_centroids(data_slice, centroids, cluster=i, labels=labels)
if verbose > 5:
print(centroids)
return centroids_dict
def create_k_medoids_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using k-medoids clustering')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
kmedoids = KMedoids(n_clusters=6, init='k-medoids++', max_iter=500, random_state=42)
kmedoids.fit(data_slice[['latitude','longitude']])
labels = kmedoids.labels_
centroids = kmedoids.cluster_centers_
centroids_dict[i] = centroids.flatten()
if verbose > 2:
plot_centroids(data_slice, centroids, cluster=i, labels=labels)
if verbose > 5:
print(centroids)
return centroids_dict
def create_AgglomerativeClustering_centroids(crash_df_with_cluster, verbose=0):
if verbose > 0:
print('using agglomerative clustering')
centroids_dict = {}
for i in crash_df_with_cluster.cluster.unique():
data_slice = crash_df_with_cluster.query('cluster==@i')
hc = AgglomerativeClustering(n_clusters = 6, affinity = 'euclidean', linkage = 'ward')
y_predict = hc.fit_predict(data_slice[['latitude','longitude']])
clf = NearestCentroid()
clf.fit(data_slice[['latitude','longitude']], y_predict)
centroids = clf.centroids_
centroids_dict[i] = centroids.flatten()
if verbose > 5:
plot_centroids(data_slice, centroids, cluster=i)
if verbose > 14:
print(centroids)
return centroids_dict
def create_gradient_descent_centroids(crash_df_with_cluster, test_df, verbose=0, lr=3e-3, n_epochs=400, batch_size=50):
if verbose > 0:
print('using gradient descent clustering')
# Copy dataframes and standardize lat lon values on train and test set
scaler = StandardScaler()
crash_df_scaled = crash_df_with_cluster.copy()
test_df_scaled = test_df.copy()
crash_df_scaled[['latitude', 'longitude']] = scaler.fit_transform(crash_df_scaled[['latitude', 'longitude']])
test_df_scaled[['latitude', 'longitude']] = scaler.transform(test_df_scaled[['latitude', 'longitude']])
centroids_dict = {}
for i in crash_df_scaled.cluster.unique():
data_slice = crash_df_scaled.query('cluster==@i')
test_slice = test_df_scaled.query('cluster==@i')
train_locs = tensor(data_slice[['latitude', 'longitude']].values) # To Tensor
val_locs = tensor(test_slice[['latitude', 'longitude']].values) # To Tensor
# Load crash locs from train into a dataloader
batches = DataLoader(train_locs, batch_size=batch_size, shuffle=True)
# Set up ambulance locations
amb_locs = torch.randn(6, 2)*.5
amb_locs = amb_locs + tensor(data_slice.latitude.mean(), data_slice.longitude.mean())
amb_locs.requires_grad_()
# Set vars
lr=lr
n_epochs = n_epochs
# Store loss over time
train_losses = []
val_losses = []
# Training loop
for epoch in range(n_epochs):
# Run through batches
for crashes in batches:
loss = loss_fn(crashes, amb_locs) # Find loss for this batch of crashes
loss.backward() # Calc grads
amb_locs.data -= lr * amb_locs.grad.data # Update locs
amb_locs.grad = None # Reset gradients for next step
train_losses.append(loss.item())
if verbose > 2:
val_loss = loss_fn(val_locs, amb_locs)
val_losses.append(val_loss.item()) # Can remove as this slows things down
if verbose > 2 and epoch % 100 == 0: # show progress
print(f'Val loss for cluster {i}: {val_loss.item()}')
centroids = amb_locs.detach().numpy()
#show output
if verbose > 5:
plot_centroids(data_slice, centroids, cluster=i)
if verbose > 14:
print(centroids)
if verbose > 9:
plt.figure(num=None, figsize=(16, 10), dpi=80, facecolor='w', edgecolor='k')
plt.plot(train_losses, label='train_loss')
plt.plot(val_losses, c='red', label='val loss')
plt.legend()
#scale back to lat lon
centroids = scaler.inverse_transform(centroids)
centroids_dict[i] = centroids.flatten()
return centroids_dict
def loss_fn(crash_locs, amb_locs):
"""
Used for gradient descent model.
For each crash we find the dist to the closest ambulance, and return the mean of these dists.
"""
# Dists to first ambulance
dists_split = crash_locs - amb_locs[0]
dists = (dists_split[:,0]**2 + dists_split[:,1]**2)**0.5
min_dists = dists
for i in range(1, 6):
# Update dists so they represent the dist to the closest ambulance
dists_split = crash_locs-amb_locs[i]
dists = (dists_split[:,0]**2 + dists_split[:,1]**2)**0.5
min_dists = torch.min(min_dists, dists)
return min_dists.mean()
def centroid_to_submission(centroids_dict, date_start='2019-07-01', date_end='2020-01-01', tw_cluster_strategy='baseline', verbose=0):
'''Takes dictionary of clusters and centroids and creates a data frame in the format needed for submission'''
# Create Date range covering submission period set
dates = pd.date_range(date_start, date_end, freq='3h')
submission_df = pd.DataFrame({'date':dates})
submission_df = create_temporal_features(submission_df,'date')
submission_df["cluster"] = submission_df.apply(lambda x: assign_TW_cluster(x.weekday, x.time_window_str, strategy=tw_cluster_strategy) ,axis=1)
ambulance_columns = ['A0_Latitude', 'A0_Longitude', 'A1_Latitude','A1_Longitude', 'A2_Latitude', 'A2_Longitude',
'A3_Latitude', 'A3_Longitude', 'A4_Latitude', 'A4_Longitude', 'A5_Latitude', 'A5_Longitude']
for i in submission_df["cluster"].unique():
submission_df["placements"] = submission_df["cluster"].apply(lambda x: centroids_dict.get(x))
submission_df[ambulance_columns] = pd.DataFrame(submission_df.placements.tolist(), index=submission_df.index)
submission_df = submission_df.drop('placements', axis=1)
submission_df = drop_temporal(submission_df)
submission_df = submission_df.drop(["cluster"], axis=1)
if verbose > 0:
print('submission dataframe created')
return submission_df
def create_submission_csv(submission_df, crash_source, outlier_filter, tw_cluster_strategy, placement_method, path='../Outputs/', verbose=0):
'''Takes dataframe in submission format and outputs a csv file with matching name'''
# current_time = dt.datetime.now()
current_time = dt.datetime.now()
filename = f'{current_time.year}{current_time.month}{current_time.day}_{crash_source}_{outlier_filter}_{tw_cluster_strategy}_{placement_method}.csv'
submission_df.to_csv(path+filename,index=False)
if verbose > 0:
print(f'{filename} saved in {path}')
def score(train_placements_df, crash_df, test_start_date='2018-01-01', test_end_date='2019-12-31', verbose=0):
'''
Can be used to score the ambulance placements against a set of crashes.
Can be used on all crash data, train_df or holdout_df as crash_df.
'''
test_df = crash_df.loc[(crash_df.datetime >= test_start_date) & (crash_df.datetime <= test_end_date)]
if verbose > 0:
print(f'Data points in test period: {test_df.shape[0]}' )
total_distance = 0
for crash_date, c_lat, c_lon in test_df[['datetime', 'latitude', 'longitude']].values:
row = train_placements_df.loc[train_placements_df.date < crash_date].tail(1)
dists = []
for a in range(6):
dist = ((c_lat - row[f'A{a}_Latitude'].values[0])**2+(c_lon - row[f'A{a}_Longitude'].values[0])**2)**0.5
dists.append(dist)
total_distance += min(dists)
return total_distance
def import_uber_data():
'''Imports the hourly travel times from Uber movement data.
In addition, the hexlusters used by Uber in Nairobi are imported. '''
# Read the JSON file with the hexclusters
file = open('../Inputs/540_hexclusters.json',)
hexclusters = json.load(file)
file.close()
# Find the centroids of the hexbin clusters
cluster_id = []
longitude = []
latitude = []
for i in range(len(hexclusters['features'])):
coords = hexclusters['features'][i]['geometry']['coordinates'][0]
x = [long for long, lat in coords]
y = [lat for long, lat in coords]
x_c = sum(x) / len(x)
y_c = sum(y) / len(y)
cluster_id.append(hexclusters['features'][i]['properties']['MOVEMENT_ID'])
longitude.append(x_c)
latitude.append(y_c)
# Create DataFrame with hexcluster ids and the lat and long values of the centroids
global df_hexclusters
df_hexclusters = pd.DataFrame([cluster_id, longitude, latitude]).transpose()
df_hexclusters.columns = ['cluster_id', 'longitude', 'latitude']
df_hexclusters['cluster_id'] = df_hexclusters['cluster_id'].astype('int')
df_hexclusters = assign_hex_bin(df_hexclusters, 'latitude', 'longitude')
h3res = h3.h3_get_resolution(df_hexclusters.loc[0, 'h3_zone_6'])
# Read the travel times for weekdays
df_tt_hourly_wd = pd.read_csv('../Inputs/nairobi-hexclusters-2018-3-OnlyWeekdays-HourlyAggregate.csv')
# Add lat and long values to the tavel time data
global df_combined_wd
df_combined_wd = df_tt_hourly_wd.merge(df_hexclusters, how='left', left_on='sourceid', right_on='cluster_id')
df_combined_wd.drop(['cluster_id'], axis=1, inplace=True)
df_combined_wd = df_combined_wd.merge(df_hexclusters, how='left', left_on='dstid', right_on='cluster_id', suffixes=('_source', '_dst'))
df_combined_wd.drop(['cluster_id'], axis=1, inplace=True)
#df_combined_wd['dist_air'] = df_combined_wd[['latitude_source', 'longitude_source', 'latitude_dst', 'longitude_dst']].apply(lambda x: get_distance_air(x.latitude_source, x.longitude_source, x.latitude_dst, x.longitude_dst, h3res), axis=1)
#df_combined_wd['avg_speed'] = df_combined_wd['dist_air'] / df_combined_wd['mean_travel_time'] * 3600
# Get average speeds per hour
global avg_speeds_wd
#avg_speeds_wd = df_combined_wd.groupby('hod').mean()['avg_speed']
avg_speeds_wd = [32.309, 31.931, 33.079, 33.651, 32.329, 30.146, 25.374, 23.388, 24.028, 24.589, 23.937, 23.609,
23.485, 23.755, 23.506, 22.334, 20.371, 18.948, 20.007, 21.896, 25.091, 28.293, 29.963, 29.516]
# Read the travel times for weekends
df_tt_hourly_we = pd.read_csv('../Inputs/nairobi-hexclusters-2018-3-OnlyWeekends-HourlyAggregate.csv')
# Add lat and long values to the tavel time data
global df_combined_we
df_combined_we = df_tt_hourly_we.merge(df_hexclusters, how='left', left_on='sourceid', right_on='cluster_id')
df_combined_we.drop(['cluster_id'], axis=1, inplace=True)
df_combined_we = df_combined_we.merge(df_hexclusters, how='left', left_on='dstid', right_on='cluster_id', suffixes=('_source', '_dst'))
df_combined_we.drop(['cluster_id'], axis=1, inplace=True)
#df_combined_we['dist_air'] = df_combined_we[['latitude_source', 'longitude_source', 'latitude_dst', 'longitude_dst']].apply(lambda x: get_distance_air(x.latitude_source, x.longitude_source, x.latitude_dst, x.longitude_dst, h3res), axis=1)
#df_combined_we['avg_speed'] = df_combined_we['dist_air'] / df_combined_we['mean_travel_time'] * 3600
# Get average speeds per hour
global avg_speeds_we
#avg_speeds_we = df_combined_we.groupby('hod').mean()['avg_speed']
avg_speeds_we = [30.955, 31.295, 31.420, 31.653, 31.033, 30.927, 33.035, 31.679, 28.906, 26.834, 25.684, 24.879,
24.587, 23.887, 23.518, 24.960, 25.638, 26.112, 24.846, 23.837, 26.140, 28.012, 29.391, 29.532]
def get_metrics(coord_src, coord_dst, weekend, hour):
'''
Inputs:
* coord_src: H3 hexbin or coordinate as list or tuple of the origin of the trip
* coord_dst: H3 hexbin or coordinate as list or tuple of the destination of the trip
* weekend: 1 if weekend, 0 if weekday
* hour: Hour of the day as integer
Output: Returns a list with the five levels of metics:
* Zindi: Euklidean distance between latitude and longitude values (Zindi challenge)
* Air: Air distance in kilometers
* Road: Road distance in kilometers
* Time: Driving distance in minutes
* Golden: Binary value: False if driving distance below threshold ("Golden Hour"), True if above
'''
if type(coord_src) == str:
lat_src = h3.h3_to_geo(coord_src)[0]
long_src = h3.h3_to_geo(coord_src)[1]
h3res = h3.h3_get_resolution(coord_src)
elif type(coord_src) == list or tuple:
lat_src = coord_src[0]
long_src = coord_src[1]
h3res = 0
if type(coord_dst) == str:
lat_dst = h3.h3_to_geo(coord_dst)[0]
long_dst = h3.h3_to_geo(coord_dst)[1]
elif type(coord_dst) == list or tuple:
lat_dst = coord_dst[0]
long_dst = coord_dst[1]
metric = {}
# Zindi score
metric['Zindi'] = get_distance_zindi(lat_src, long_src, lat_dst, long_dst)
# Air distance
distance_air = get_distance_air(lat_src, long_src, lat_dst, long_dst, h3res)
metric['Air'] = distance_air
# Approximated road distance
detour_coef = 1.3 # Known as Henning- or Hanno-coefficient
metric['Road'] = distance_air * detour_coef
# Travel time from Uber movement data
travel_time = get_distance_time(lat_src, long_src, lat_dst, long_dst, weekend, hour, h3res)
metric['Time'] = travel_time
# 'Golden hour'-threshold classification
golden_hour = 60 # Minutes
metric['Golden'] = travel_time > golden_hour
return metric
def get_distance_zindi(lat_src, long_src, lat_dst, long_dst):
'''
Returns the Euklidean distance between latitude and longitude values like in the Zindi-score.
'''
return ((lat_src - lat_dst)**2 + (long_src - long_dst)**2) ** 0.5
def get_distance_air(lat_src, long_src, lat_dst, long_dst, h3res):
'''
Returns the Euklidean distance between two pairs of coordinates in km.
If a distance between two points within a single cluster has to be calculated,
the average distance of all possible distances within one cluster is returned.
'''
distance_air = geodesic((lat_src, long_src), (lat_dst, long_dst)).km
if distance_air == 0:
area = h3.hex_area(resolution = h3res)
radius = (area / math.pi) ** 0.5
distance_air = 128 / (45 * math.pi) * radius
return distance_air
def get_distance_time(lat_src, long_src, lat_dst, long_dst, weekend, hour, h3res):
'''
Returns the time that is needed to cover the road distance between two pairs of coordinates in minutes based on the Uber movement data.
'''
hex_src = h3.geo_to_h3(lat=lat_src, lng=long_src, resolution=h3res)
hex_dst = h3.geo_to_h3(lat=lat_dst, lng=long_dst, resolution=h3res)
if weekend == 1:
travel_times = df_combined_we[(df_combined_we['h3_zone_6_source'] == hex_src) & \
(df_combined_we['h3_zone_6_dst'] == hex_dst) & \
(df_combined_we['hod'] == hour) \
]['mean_travel_time']
else:
travel_times = df_combined_wd[(df_combined_wd['h3_zone_6_source'] == hex_src) & \
(df_combined_wd['h3_zone_6_dst'] == hex_dst) & \
(df_combined_wd['hod'] == hour) \
]['mean_travel_time']
if len(travel_times) > 0:
travel_time = sum(travel_times) / len(travel_times) / 60
else:
# len(travel_times) == 0 means that no travel times exist for this connection in the Uber movement data
# Get air distance between two original coordinates
orig_dist = get_distance_air(lat_src, long_src, lat_dst, long_dst, h3res)
# Divide air distance through average speed
if weekend == 1:
travel_time = orig_dist / avg_speeds_we[hour] * 60
else:
travel_time = orig_dist / avg_speeds_wd[hour] * 60
return travel_time
def score_adv(train_placements_df, crash_df, test_start_date='2018-01-01', test_end_date='2019-12-31', verbose=0):
'''
Advanced version of the standard score function. Does return a dictionary with five entries.
First entry is the 'Zindi' score just like in the score function. The other values are 'Air', 'Road', 'Time' and 'Golden'.
Can be used to score the ambulance placements against a set of crashes.
Can be used on all crash data, train_df or holdout_df as crash_df.
'''
try:
df_combined_wd
except NameError:
import_uber_data()
test_df = crash_df.loc[(crash_df.datetime >= test_start_date) & (crash_df.datetime <= test_end_date)]
if verbose > 0:
print(f'Data points in test period: {test_df.shape[0]}' )
total_distance = {'Zindi': 0, 'Air': 0, 'Road': 0, 'Time': 0, 'Golden': 0}
for crash_date, c_lat, c_lon in test_df[['datetime', 'latitude', 'longitude']].values:
row = train_placements_df.loc[train_placements_df.date < crash_date].tail(1)
if crash_date.weekday() in (6, 7):
weekend = 1
else:
weekend = 0
hour = crash_date.hour
dists = []
for a in range(6):
dist = get_metrics((row[f'A{a}_Latitude'].values[0], row[f'A{a}_Longitude'].values[0]), (c_lat, c_lon), weekend, hour)
dists.append(dist)
min_dist = dists[np.argmin([x['Time'] for x in dists])]
for x in total_distance:
total_distance[x] += min_dist[x]
return total_distance
def ambulance_placement_pipeline(input_path='../Inputs/', output_path='../Outputs/', crash_source_csv='Train',
outlier_filter=0, holdout_strategy='random', holdout_test_size=0.3,
test_period_date_start='2019-01-01', test_period_date_end='2019-07-01',
tw_cluster_strategy='saturday_2', placement_method='k_means', verbose=0,
lr=3e-2, n_epochs=400, batch_size=50):
'''
load crash data (from train or prediction) and apply feautre engineering, run tw clustering (based on strategy choice)
create ambulance placements, create output file.
'''
# load crash data into dataframe
crash_df = create_crash_df(train_file = input_path+crash_source_csv+'.csv')
# in case of loading file with hex bins instead of lat/long
if 'latitude' not in crash_df.columns:
crash_df['latitude'] = crash_df.hex_bins.apply(lambda x : h3.h3_to_geo(x)[0])
crash_df['longitude'] = crash_df.hex_bins.apply(lambda x : h3.h3_to_geo(x)[1])
crash_df.drop("hex_bins", axis=1, inplace=True)
# create individual date and time features from date column
crash_df = create_temporal_features(crash_df)
# split data into train and test sets
train_df, test_df = split_accident_df(data=crash_df, strategy=holdout_strategy,
test_size=holdout_test_size)
# remove outliers from test set based on lat and lon
train_df = outlier_removal(train_df, filter=outlier_filter)
# apply time window cluster labels to df based on strategy specified
train_df = create_cluster_feature(train_df, strategy=tw_cluster_strategy, verbose=verbose)
# Run clustering model to get placement set centroids for each TW cluster
test_df_with_clusters = create_cluster_feature(test_df, strategy=tw_cluster_strategy, verbose=0)
centroids_dict = create_cluster_centroids(train_df, test_df=test_df_with_clusters, verbose=verbose, method=placement_method,
lr=lr, n_epochs=n_epochs, batch_size=batch_size)
# create df in format needed for submission
train_placements_df = centroid_to_submission(centroids_dict, date_start='2018-01-01', date_end='2019-12-31',
tw_cluster_strategy=tw_cluster_strategy)
# Run scoring functions
# If using score
if verbose > 0:
print(f'Total size of test set: {test_df.shape[0]}')
test_score = score(train_placements_df, test_df, test_start_date=test_period_date_start,
test_end_date=test_period_date_end)
if verbose > 0:
print(f'Total size of train set: {crash_df.shape[0]}')
train_score = score(train_placements_df,train_df,
test_start_date=test_period_date_start, test_end_date=test_period_date_end)
if verbose > 0:
print(f'Score on test set: {test_score / max(test_df.shape[0],1)}')
print(f'Score on train set: {train_score / train_df.shape[0] } (avg distance per accident)')
# If using score_adv:
if verbose == 2:
test_score = score_adv(train_placements_df, test_df, test_start_date=test_period_date_start,
test_end_date=test_period_date_end)
train_score = score_adv(train_placements_df,train_df,
test_start_date=test_period_date_start, test_end_date=test_period_date_end)
for x in test_score:
test_score[x] = test_score[x] / max(test_df.shape[0],1)
print(f'Score on test set: {test_score}')
for x in train_score:
train_score[x] = train_score[x] / max(train_df.shape[0],1)
print(f'Score on train set: {train_score} (avg distance per accident)')
# Create file for submitting to zindi
submission_df = centroid_to_submission(centroids_dict, date_start='2019-07-01', date_end='2020-01-01',
tw_cluster_strategy=tw_cluster_strategy)
create_submission_csv(submission_df, crash_source=crash_source_csv, outlier_filter=outlier_filter,
tw_cluster_strategy=tw_cluster_strategy, placement_method=placement_method, path=output_path ,verbose=verbose)
### Prediction functions from here on
def convert_h3_to_lat_lon(df):
"""
Convert hex bins back to latitude and longitude
"""
df['latitude'] = df.hex_bins.apply(lambda x: h3.h3_to_geo(x)[0])
df['longitude'] = df.hex_bins.apply(lambda x: h3.h3_to_geo(x)[1])
df = df.drop("hex_bins", axis=1)
return df
def create_pred_template(df):
'''Based on hex bin resolution creates an empty data frame for each 3 hour time window for each hex bin.
This results in a n * 2 dataframe (columns: time_windows, hex_bins) where number of rows equals hex_bins * 4369.
4369 is the result of days between start and end date (in days) * 8 time windows per day (24 / 3 hours)'''
#Create dataframe to get the accurate amount of 3-hour time windows for the desired time frame
date_start = '2018-01-01'
date_end = '2019-07-01'
dates = pd.date_range(date_start, date_end, freq='3h')
all_days_df = pd.DataFrame(dates, columns=["dates"])
time_windows = list(all_days_df["dates"])
len_windows = all_days_df.shape[0]
list_unique_hexbins = list(df["h3_zone_6"].unique())
list_bins_per_window = []
list_time_windows = []
for i in range(0, len(list_unique_hexbins)):
list_bins_per_window += len_windows * [list_unique_hexbins[i]]
list_time_windows += time_windows
input_df = {"time_windows": list_time_windows, "hex_bins": list_bins_per_window}
df_pred_template = pd.DataFrame(data=input_df)
return df_pred_template
def rta_per_time_window_hex_bin(df):
'''
Add up RTA's per time window and hex bin
'''
df["time_window_key"] = df["datetime"].apply(lambda x: str(x.year) + "-" + str(x.month) + "-" + str(x.day) + "-" + str(math.floor(x.hour / 3)))
df_tw_hex = df.groupby(["time_window_key", "h3_zone_6"]).agg({"uid": "count"}).reset_index()
col_names = ["time_window_key"] + ["hex_bins"] + ["RTA"]
df_tw_hex.columns = col_names
return df_tw_hex
def fill_overall_df(df_pred_template, df_tw_hex):
'''
Join road traffic accidents onto empty data frame that consists of time windows (8 per day) for all days (1.5 years) for all hex bins.
For combinations with no accidents, NaNs will be converted into 0.
'''
df_pred_template["time_window_key"] = df_pred_template["time_windows"].apply(lambda x: str(x.year) + "-" + str(x.month) + "-" + str(x.day) + "-" + str(math.floor(x.hour / 3)))
df_merged = pd.merge(df_pred_template, df_tw_hex, on=["time_window_key", "hex_bins"], how="outer")
df_merged = df_merged.fillna(0)
list_of_c = list(df_merged.columns)
list_of_c[0] = "datetime"
df_merged.columns = list_of_c
return df_merged
def generate_outlier_list(df, frequency_cutoff=1):
"""
Based on the minimum frequency of occurrence, cut off all hex bins that do not exceed that value over 1.5 years. Returns list of hex bins to exclude.
"""
if frequency_cutoff == 0:
return []
else:
df_outliers = df.groupby("hex_bins")
df_outliers = df_outliers.agg({'RTA': np.count_nonzero})
df_outliers = df_outliers.reset_index()
df_outliers.columns = ["hex_bins", "RTA_nonzero"]
df_freq_outliers = df_outliers.loc[df_outliers["RTA_nonzero"] <= frequency_cutoff]
# Get list of frequency outliers
list_freq_outliers = list(df_freq_outliers["hex_bins"].values)
return list_freq_outliers
def filter_df_for_pred_a(df, list_freq_outliers):
"""
Exclude frequency outliers according to list and drop all hex bin / time window combinations with 0 RTA's
"""
df_pred_a = df.loc[~df["h3_zone_6"].isin(list_freq_outliers)]
df_pred_a = df_pred_a.reset_index()
df_pred_a = df_pred_a.drop(["uid", "latitude", "longitude", "time", "time_window", "time_window_str", "day", "weekday", "month", "half_year", "rainy_season",
"year", "date_trunc", "holiday", "time_window_key", "index"], axis=1)
df_pred_a.columns = ["datetime", "hex_bins"]
return df_pred_a
def filter_df_for_pred_b(df_merged, list_freq_outliers):
"""
Exclude frequency outliers according to list and drop all hex bin / time window combinations with 0 RTA's
"""
# Filters overall dataframe to exclude hex bins with only one RTA occurrence in the whole timeframe (according to input list)
df_merged = df_merged.loc[~df_merged["hex_bins"].isin(list_freq_outliers)]
# Also filters out all hex bin and time window combinations where no RTA occurred
df_pred_b = df_merged.loc[df_merged["RTA"] > 0]
return df_pred_b
def clean_pred_b(df_pred_b):
"""Dropping all redundant rows, fixing indices and making sure the time windows are hit."""
# Remove some redundant rows and fix indices
df_predictions = df_pred_b.drop(["time_window_key", "RTA"], axis=1)
df_predictions = df_predictions.reset_index()
df_predictions.drop("index", axis=1, inplace=True)
# Add 1 minute to have the RTA's lie inside the time window rather than on the verge, sort values and reset the index
df_predictions["datetime"] = df_predictions["datetime"].apply(lambda x: x + pd.Timedelta(minutes=1))
df_predictions = df_predictions.sort_values(by="datetime").reset_index()
# Drop redundant columns
df_predictions = df_predictions.drop("index", axis=1)
return df_predictions
def create_samples(df, list_freq_outliers):
"""
Creates a sort of distribution from which hex bin and time window combinations can be drawn, subject to the predicted RTA's per day
"""
dict_windows = {1: "00-03", 2: "03-06", 3: "06-09", 4: "09-12", 5: "12-15",
6: "15-18", 7: "18-21", 8: "21-24"}
df["time_window"] = df["datetime"].apply(lambda x: math.floor(x.hour / 3) + 1)
df["time_window_str"] = df["time_window"].apply(lambda x: dict_windows.get(x))
df["weekday"] = df["datetime"].apply(lambda x: x.weekday())
# Filtering for hex bins with only one occurrence
df_filter = df.loc[~df["hex_bins"].isin(list_freq_outliers)]
# Prepare data frame for sample generation
df_freq = df_filter.groupby(["hex_bins", "weekday", "time_window_str"])
df_samples = df_freq.agg({'RTA': [np.count_nonzero]})
df_samples = df_samples.reset_index()
df_samples.columns = ["hex_bins", "weekday", "time_window", "RTA_freq"]
return df_samples
def generate_predictions(df, predicted_rta):
"""
Takes a dataframe containing the RTA frequency per weekday and time window and the predicted RTA's per day and turns this into a prediction dataframe.
"""
df_monday = df.loc[df["weekday"] == 0].sort_values(by="RTA_freq", ascending=False)
df_tuesday = df.loc[df["weekday"] == 1].sort_values(by="RTA_freq", ascending=False)
df_wednesday = df.loc[df["weekday"] == 2].sort_values(by="RTA_freq", ascending=False)
df_thursday = df.loc[df["weekday"] == 3].sort_values(by="RTA_freq", ascending=False)
df_friday = df.loc[df["weekday"] == 4].sort_values(by="RTA_freq", ascending=False)
df_saturday = df.loc[df["weekday"] == 5].sort_values(by="RTA_freq", ascending=False)
df_sunday = df.loc[df["weekday"] == 6].sort_values(by="RTA_freq", ascending=False)
# Split overall predictions into predictions per weekday
lst_mon = predicted_rta[0::7]
lst_tue = predicted_rta[1::7]
lst_wed = predicted_rta[2::7]
lst_thu = predicted_rta[3::7]
lst_fri = predicted_rta[4::7]
lst_sat = predicted_rta[5::7]
lst_sun = predicted_rta[6::7]
# The evaluation period 2019-07-01 to 2019-12-31 conveniently starts with a Monday but end with a Tuesday - hence the loop has to run
# one iteration more for Monday and Tuesday.
# This generates a list of lists of predictions for each weekday
monday_bins = tuesday_bins = wednesday_bins = thursday_bins = friday_bins = saturday_bins = sunday_bins = []
monday_tw = tuesday_tw = wednesday_tw = thursday_tw = friday_tw = saturday_tw = sunday_tw = []
for i in range(len(lst_mon)):
monday_bins.append(list(*[df_monday["hex_bins"][0:lst_mon[i]]]))
monday_tw.append(list(*[df_monday["time_window"][0:lst_mon[i]]]))
tuesday_bins.append(list(*[df_tuesday["hex_bins"][0:lst_tue[i]]]))
tuesday_tw.append(list(*[df_tuesday["time_window"][0:lst_tue[i]]]))
for i in range(len(lst_wed)):
wednesday_bins.append(list(*[df_wednesday["hex_bins"][0:lst_wed[i]]]))
wednesday_tw.append(list(*[df_wednesday["time_window"][0:lst_wed[i]]]))
thursday_bins.append(list(*[df_thursday["hex_bins"][0:lst_thu[i]]]))
thursday_tw.append(list(*[df_thursday["time_window"][0:lst_thu[i]]]))
friday_bins.append(list(*[df_friday["hex_bins"][0:lst_fri[i]]]))
friday_tw.append(list(*[df_friday["time_window"][0:lst_fri[i]]]))
saturday_bins.append(list(*[df_saturday["hex_bins"][0:lst_sat[i]]]))
saturday_tw.append(list(*[df_saturday["time_window"][0:lst_sat[i]]]))
sunday_bins.append(list(*[df_sunday["hex_bins"][0:lst_sun[i]]]))
sunday_tw.append(list(*[df_sunday["time_window"][0:lst_sun[i]]]))
# Turn list of lists into an overall list for each weekday's predictions
flat_monday_bins = [item for sublist in monday_bins for item in sublist]
flat_monday_tw = [item for sublist in monday_tw for item in sublist]
flat_tuesday_bins = [item for sublist in tuesday_bins for item in sublist]
flat_tuesday_tw = [item for sublist in tuesday_tw for item in sublist]
flat_wednesday_bins = [item for sublist in wednesday_bins for item in sublist]
flat_wednesday_tw = [item for sublist in wednesday_tw for item in sublist]
flat_thursday_bins = [item for sublist in thursday_bins for item in sublist]
flat_thursday_tw = [item for sublist in thursday_tw for item in sublist]
flat_friday_bins = [item for sublist in friday_bins for item in sublist]
flat_friday_tw = [item for sublist in friday_tw for item in sublist]
flat_saturday_bins = [item for sublist in saturday_bins for item in sublist]
flat_saturday_tw = [item for sublist in saturday_tw for item in sublist]
flat_sunday_bins = [item for sublist in sunday_bins for item in sublist]
flat_sunday_tw = [item for sublist in sunday_tw for item in sublist]
# Generate list with hex bins and time windows as input for prediction
flat_bins = flat_monday_bins + flat_tuesday_bins + flat_wednesday_bins + flat_thursday_bins + flat_friday_bins + flat_saturday_bins + flat_sunday_bins
flat_tw = flat_monday_tw + flat_tuesday_tw + flat_wednesday_tw + flat_thursday_tw + flat_friday_tw + flat_saturday_tw + flat_sunday_tw
# Generate list with day of the week entries for each prediction as input for dataframe
weekdays = [0] * sum(lst_mon) + [1] * sum(lst_tue) + [2] * sum(lst_wed) + [3] * sum(lst_thu) + [4] * sum(lst_fri) + [5] * sum(lst_sat) + [6] * sum(lst_sun)
# Generate list with week entries for each prediction as input for dataframe
list_of_days_list = [lst_mon, lst_tue, lst_wed, lst_thu, lst_fri, lst_sat, lst_sun]
lst_weeks = []
for lst_days in list_of_days_list:
i = 0
for number in lst_days:
lst_weeks += [i] * number
i += 1
# Create dataframe
df_pred_c = pd.DataFrame(list(zip(flat_bins, flat_tw, weekdays, lst_weeks)), columns=["hex_bins", "time_window", "weekday", "week"])
return df_pred_c
def generate_predictions_first_half_2019(df, predicted_rta):
"""
Takes a dataframe containing the RTA frequency per weekday and time window and the predicted RTA's per day and turns this into a prediction dataframe.
"""
df_monday = df.loc[df["weekday"] == 0].sort_values(by="RTA_freq", ascending=False)
df_tuesday = df.loc[df["weekday"] == 1].sort_values(by="RTA_freq", ascending=False)
df_wednesday = df.loc[df["weekday"] == 2].sort_values(by="RTA_freq", ascending=False)
df_thursday = df.loc[df["weekday"] == 3].sort_values(by="RTA_freq", ascending=False)
df_friday = df.loc[df["weekday"] == 4].sort_values(by="RTA_freq", ascending=False)
df_saturday = df.loc[df["weekday"] == 5].sort_values(by="RTA_freq", ascending=False)
df_sunday = df.loc[df["weekday"] == 6].sort_values(by="RTA_freq", ascending=False)
# Split overall predictions into predictions per weekday
lst_mon = predicted_rta[6::7]
lst_tue = predicted_rta[0::7]
lst_wed = predicted_rta[1::7]
lst_thu = predicted_rta[2::7]
lst_fri = predicted_rta[3::7]
lst_sat = predicted_rta[4::7]
lst_sun = predicted_rta[5::7]
# The evaluation period 2019-07-01 to 2019-12-31 conveniently starts with a Monday but end with a Tuesday - hence the loop has to run
# one iteration more for Monday and Tuesday.
# This generates a list of lists of predictions for each weekday
monday_bins = tuesday_bins = wednesday_bins = thursday_bins = friday_bins = saturday_bins = sunday_bins = []
monday_tw = tuesday_tw = wednesday_tw = thursday_tw = friday_tw = saturday_tw = sunday_tw = []
for i in range(len(lst_mon)):
monday_bins.append(list(*[df_monday["hex_bins"][0:lst_mon[i]]]))
monday_tw.append(list(*[df_monday["time_window"][0:lst_mon[i]]]))
for i in range(len(lst_wed)):
tuesday_bins.append(list(*[df_tuesday["hex_bins"][0:lst_tue[i]]]))
tuesday_tw.append(list(*[df_tuesday["time_window"][0:lst_tue[i]]]))
wednesday_bins.append(list(*[df_wednesday["hex_bins"][0:lst_wed[i]]]))
wednesday_tw.append(list(*[df_wednesday["time_window"][0:lst_wed[i]]]))
thursday_bins.append(list(*[df_thursday["hex_bins"][0:lst_thu[i]]]))
thursday_tw.append(list(*[df_thursday["time_window"][0:lst_thu[i]]]))
friday_bins.append(list(*[df_friday["hex_bins"][0:lst_fri[i]]]))
friday_tw.append(list(*[df_friday["time_window"][0:lst_fri[i]]]))
saturday_bins.append(list(*[df_saturday["hex_bins"][0:lst_sat[i]]]))
saturday_tw.append(list(*[df_saturday["time_window"][0:lst_sat[i]]]))
sunday_bins.append(list(*[df_sunday["hex_bins"][0:lst_sun[i]]]))
sunday_tw.append(list(*[df_sunday["time_window"][0:lst_sun[i]]]))
# Turn list of lists into an overall list for each weekday's predictions
flat_monday_bins = [item for sublist in monday_bins for item in sublist]
flat_monday_tw = [item for sublist in monday_tw for item in sublist]
flat_tuesday_bins = [item for sublist in tuesday_bins for item in sublist]
flat_tuesday_tw = [item for sublist in tuesday_tw for item in sublist]
flat_wednesday_bins = [item for sublist in wednesday_bins for item in sublist]
flat_wednesday_tw = [item for sublist in wednesday_tw for item in sublist]
flat_thursday_bins = [item for sublist in thursday_bins for item in sublist]
flat_thursday_tw = [item for sublist in thursday_tw for item in sublist]
flat_friday_bins = [item for sublist in friday_bins for item in sublist]
flat_friday_tw = [item for sublist in friday_tw for item in sublist]
flat_saturday_bins = [item for sublist in saturday_bins for item in sublist]
flat_saturday_tw = [item for sublist in saturday_tw for item in sublist]
flat_sunday_bins = [item for sublist in sunday_bins for item in sublist]
flat_sunday_tw = [item for sublist in sunday_tw for item in sublist]
# Generate list with hex bins and time windows as input for prediction
flat_bins = flat_monday_bins + flat_tuesday_bins + flat_wednesday_bins + flat_thursday_bins + flat_friday_bins + flat_saturday_bins + flat_sunday_bins
flat_tw = flat_monday_tw + flat_tuesday_tw + flat_wednesday_tw + flat_thursday_tw + flat_friday_tw + flat_saturday_tw + flat_sunday_tw
# Generate list with day of the week entries for each prediction as input for dataframe
weekdays = [0] * sum(lst_mon) + [1] * sum(lst_tue) + [2] * sum(lst_wed) + [3] * sum(lst_thu) + [4] * sum(lst_fri) + [5] * sum(lst_sat) + [6] * sum(lst_sun)
# Generate list with week entries for each prediction as input for dataframe
list_of_days_list = [lst_mon, lst_tue, lst_wed, lst_thu, lst_fri, lst_sat, lst_sun]
lst_weeks = []
for lst_days in list_of_days_list:
i = 0
for number in lst_days:
lst_weeks += [i] * number
i += 1
# Create dataframe
df_pred_c = pd.DataFrame(list(zip(flat_bins, flat_tw, weekdays, lst_weeks)), columns=["hex_bins", "time_window", "weekday", "week"])
return df_pred_c
def reduce_to_time_windows(df, predict_period):
"""
Takes a data frame of predicted RTA's and brings it into the correct format for clustering.
"""
# Set start of prediction period
if predict_period == '2019_h2':
start = pd.to_datetime("2019-07-01")
if predict_period == '2019_h1':
start = | pd.to_datetime("2019-01-01") | pandas.to_datetime |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = | pd.Series([], dtype="float") | pandas.Series |
"""Load raw data and organise into format useful for model"""
import os
import pickle
import numpy as np
import pandas as pd
class RawData:
"""Load raw data to be used in model"""
def __init__(self, data_dir, scenarios_dir, seed=10):
# Paths to directories
# --------------------
# Core data directory
self.data_dir = data_dir
# Network data
# ------------
# Nodes
self.df_n = pd.read_csv(os.path.join(self.data_dir,
'egrimod-nem-dataset-v1.3',
'akxen-egrimod-nem-dataset-4806603',
'network',
'network_nodes.csv'), index_col='NODE_ID')
# AC edges
self.df_e = pd.read_csv(os.path.join(self.data_dir,
'egrimod-nem-dataset-v1.3',
'akxen-egrimod-nem-dataset-4806603',
'network', 'network_edges.csv'), index_col='LINE_ID')
# HVDC links
self.df_hvdc_links = pd.read_csv(os.path.join(self.data_dir,
'egrimod-nem-dataset-v1.3',
'akxen-egrimod-nem-dataset-4806603',
'network',
'network_hvdc_links.csv'), index_col='HVDC_LINK_ID')
# AC interconnector links
self.df_ac_i_links = pd.read_csv(os.path.join(self.data_dir,
'egrimod-nem-dataset-v1.3',
'akxen-egrimod-nem-dataset-4806603',
'network',
'network_ac_interconnector_links.csv'), index_col='INTERCONNECTOR_ID')
# AC interconnector flow limits
self.df_ac_i_limits = pd.read_csv(os.path.join(self.data_dir,
'egrimod-nem-dataset-v1.3',
'akxen-egrimod-nem-dataset-4806603',
'network',
'network_ac_interconnector_flow_limits.csv'), index_col='INTERCONNECTOR_ID')
# Generators
# ----------
# Generating unit information
self.df_g = pd.read_csv(os.path.join(self.data_dir,
'egrimod-nem-dataset-v1.3',
'akxen-egrimod-nem-dataset-4806603',
'generators',
'generators.csv'), index_col='DUID', dtype={'NODE': int})
# Perturb short-run marginal costs (SRMCs) so all are unique.
# (add uniformly distributed random number between 0 and 2 to each SRMC. Set seed so this randomness
# can be reproduced)
np.random.seed(seed)
self.df_g['SRMC_2016-17'] = self.df_g['SRMC_2016-17'] + np.random.uniform(0, 2, self.df_g.shape[0])
# Load scenario data
# ------------------
with open(os.path.join(scenarios_dir, 'weekly_scenarios.pickle'), 'rb') as f:
self.df_scenarios = pickle.load(f)
class OrganisedData(RawData):
"""Organise data to be used in mathematical program"""
def __init__(self, data_dir, scenarios_dir):
# Load model data
super().__init__(data_dir, scenarios_dir)
def get_admittance_matrix(self):
"""Construct admittance matrix for network"""
# Initialise dataframe
df_Y = | pd.DataFrame(data=0j, index=self.df_n.index, columns=self.df_n.index) | pandas.DataFrame |
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import nvstrings
from utils import assert_eq
def test_timestamp2int():
s = nvstrings.to_device(["2019-03-20T12:34:56Z", "2020-02-29T23:59:59Z"])
s1 = | pd.Series(["2019-03-20T12:34:56Z", "2020-02-29T23:59:59Z"]) | pandas.Series |
import os
import numpy as np
import pandas as pd
import glob
import imp
import rnaseq_barcode.flow as flow
# Set the experiment constants from folder name
dirname = os.getcwd().split('/')[-1]
DATE = int(dirname.split('_')[0])
RUN_NO = int(dirname.split('_')[1][1:])
USERNAME = 'nmccarty3'
gating_fraction = 0.4
# Load all files.
files = glob.glob(f'../../../data/flow/csv/{DATE}*_r{RUN_NO}*.csv')
# Set up the DataFrame
colnames = ['date', 'username', 'phenotype', 'operator', 'strain', 'IPTGuM',
'mean_FITC_H']
df = pd.DataFrame([], columns=colnames)
for f in files:
print(f)
# Get the identifying finformation.
date, _, phenotype, operator, strain, conc = f.split('/')[-1].split('_')
conc = float(conc.split('uM')[0])
if (strain != 'auto') and (strain != 'delta'):
rep = int(strain.split('R')[-1])
else:
rep = 0
# Load in the data
data = | pd.read_csv(f) | pandas.read_csv |
# -*- coding: utf-8 -*-
import sys
import pandas as pd
import numpy as np
import copy
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import KernelPCA
from sklearn.kernel_ridge import KernelRidge
from .plot import hier_heatmap
def assign_val(dataset, valueset, var, name):
''' It takes two datasets and map values from one to the other.
-dataset: Pandas DataFrame to where the values are going to be added.
-valueset: Pandas DataFrame to where the values are going to be taken.
-var: String of the value taken in valueset
-name: String. New name of the column in dataset. If the name is already in the Dframe it will overwrite values.'''
if dataset.index[0] in valueset.index:
dataset.loc[dataset.index, name] = valueset.loc[dataset.index, var]
else:
dataset.loc[:,'SUBID'] = np.array([i.split('-')[0]+'-'+i.split('-')[1] for i in dataset.index])
dataset.loc[:,name] = valueset.loc[dataset['SUBID'], var].values
dataset = dataset.drop('SUBID', axis = 1)
sys.stderr.write(str(var)+' values assigned.\n')
return dataset
def linear_cov_reads_mat(dset, labels, meta, var, cov_matrix):
'''Calculates linear covariates for a Dataset based on nucleotide resolution RNA-seq
-dset: Pandas DataFrame with RNA-seq expression values.
-labels: DataFrame used for assigning variables on meta.
-meta: DataFrame of potential covariates.
-cov_matrix: Dataframe were store R^2 values.'''
y_model = copy.deepcopy(dset)
x_model = copy.deepcopy(labels)
cov = copy.deepcopy(meta)
cov_list = cov.columns
#x_model = x_model[x_model[var] == tissue]
y_model = y_model[y_model.index.isin(x_model.index)]
pca = PCA(n_components = 10, random_state = 0)
pc = pca.fit_transform(y_model)
x_=copy.deepcopy(x_model)
for w in cov_list:
#sys.stderr.write(tissue+" "+str(w)+"\n")
x_model = copy.deepcopy(x_)
covariate = pd.DataFrame(cov.loc[:,w])
if w.startswith('MH') and (cov[w].dtype == 'float64'):
covariate[w] = covariate.loc[:,w].astype('category').cat.codes
x_model = assign_val(x_model, covariate,w, 0)
x_model = pd.DataFrame(x_model.loc[:,0])
x_model = pd.get_dummies(x_model)
lm = LinearRegression()
lm.fit(x_model, pc)
r2 = lm.score(x_model, pc)
cov_matrix.loc[w, tissue] = r2
elif covariate[w].dtype == object:
covariate[w] = covariate.loc[:,w].astype('category').cat.codes
x_model = assign_val(x_model, covariate,w, 0)
x_model = pd.DataFrame(x_model.loc[:,0])
x_model = pd.get_dummies(x_model)
lm = LinearRegression()
lm.fit(x_model, pc)
r2 = lm.score(x_model, pc)
cov_matrix.loc[w, tissue] = r2
elif covariate[w].dtype == 'int64' and w != 'AGE':
covariate[w] = covariate.loc[:,w].astype('category').cat.codes
x_model = assign_val(x_model, covariate,w, 0)
x_model = pd.DataFrame(x_model.loc[:,0])
x_model = pd.get_dummies(x_model)
lm = LinearRegression()
lm.fit(x_model, pc)
r2 = lm.score(x_model, pc)
cov_matrix.loc[w, tissue] = r2
else:
x_model = assign_val(x_model, covariate,w, 0)
x_model = pd.DataFrame(x_model.loc[:,0])
if x_model[0].max() != 0.0:
x_model = x_model/x_model.max()
lm = LinearRegression()
lm.fit(x_model.values.reshape(-1,1), pc)
r2 = lm.score(x_model.values.reshape(-1,1), pc)
cov_matrix.loc[w, tissue] = r2
return cov_matrix
def linear_covariate_mat(dset, labels, meta, tissue, cov_matrix):
'''Calculates linear covariates for a Dataset based on gene expression data.
-dset: Pandas DataFrame with RNA-seq expression values.
-labels: DataFrame used for assigning variables on meta.
-meta: DataFrame of potential covariates.
-cov_matrix: Dataframe were store R^2 values.'''
y_model = copy.deepcopy(dset)
x_model = copy.deepcopy(labels)
cov = copy.deepcopy(meta)
cov_list = cov.columns
x_model = x_model[x_model[0] == tissue]
y_model = y_model[y_model.index.isin(x_model.index)]
pca = PCA(n_components = 10, random_state = 0)
pc = pca.fit_transform(y_model)
x_=copy.deepcopy(x_model)
for w in cov_list:
sys.stderr.write(tissue +" "+w+"\n")
x_model = copy.deepcopy(x_)
covariate = pd.DataFrame(cov.loc[:,w])
if w.startswith('MH') and (cov[w].dtype == 'float64'):
covariate[w] = covariate.loc[:,w].astype('category').cat.codes
x_model = assign_val(x_model, covariate,w, 0)
x_model = | pd.get_dummies(x_model) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 16 19:49:36 2018
@author: wolfpack
"""
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
import os
import sys
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
dirname = os.path.dirname(__file__)
sys.path.append("../data_parser")
import DataParser
#import data
subjects = ["Religion","Sinhala","English","Mathematics","Science","History","Geography","Citizenship Education","Health","Tamil","Art","PTS"]
marks_all = DataParser.get_marks(subjects, index='no')
#handle missing values
marks_all = DataParser.handle_missing_values(marks_all, how='-1', is_nan = True)
marks_all = DataParser.handle_missing_values(marks_all, how='-1', is_nan = False)
marks_all = marks_all.replace({-1:np.nan})
marks_all = marks_all.fillna(method='bfill', axis=1)
marks_all = marks_all.fillna(method='ffill', axis=1)
marks_all = marks_all.dropna(axis=0)
"""
PCA
"""
col_all = marks_all.columns
principalDf = pd.DataFrame()
subs_ex_ratios = []
for sub_year in range (int(len(col_all)/3)):
if(sub_year%3 == 0):
col = col_all[sub_year*3].split(".")[0]+"_6"
elif(sub_year%3 == 1):
col = col_all[sub_year*3].split(".")[0]+"_7"
elif(sub_year%3 == 2):
col = col_all[sub_year*3].split(".")[0]+"_8"
marks_year_std = marks_all.iloc[:,sub_year*3:(sub_year*3)+3].copy()
marks_year_std = StandardScaler().fit_transform(marks_year_std)
pca = PCA(n_components=1)
principalComponents = pca.fit_transform(marks_year_std)
principalDf[col] = pd.DataFrame(data = principalComponents, columns = [col])
subs_ex_ratios.append(pca.explained_variance_ratio_)
sub_explained = pd.DataFrame()
pca_cols = principalDf.columns
sub_explained["Subject"] = pd.DataFrame(data = pca_cols, columns = ["Subject"])
sub_explained["Explained ratio"] = pd.DataFrame(data = subs_ex_ratios, columns = ["Explained ratio"])
grade_6 = pd.DataFrame()
grade_7 = pd.DataFrame()
grade_8 = | pd.DataFrame() | pandas.DataFrame |
import gc
import os
import pandas as pd
import geopandas as gpd
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(1, "/Volumes/Macintosh HD/Users/nicholasmartino/Google Drive/Python/urban-scraper")
from Converter import polygon_grid
from SB0_Variables import *
from matplotlib import rc
from mpl_toolkits.axes_grid1 import make_axes_locatable
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
import plotly.offline as po
from datetime import datetime
from UrbanZoning.City.Fabric import Neighbourhood, Parcels
fm.fontManager.ttflist += fm.createFontList(['/Volumes/Samsung_T5/Fonts/roboto/Roboto-Light.ttf'])
rc('font', family='Roboto', weight='light')
class ModeShifts:
def __init__(self, baseline, modes, block_gdf=None, random_seeds=1, directory=os.getcwd(), suffix='', c_maps=None, plot=True, memory=False, shares_gdfs=None, city_name=None):
"""
:param scenarios: list of scenarios
:param modes: list of modes
:param random_seeds: number of random seeds that was ran for each scenario
:param baselines: dict {'random_seed': gdf} of GeoDataFrames with column "{mode}_{scenario}_rf_{rs}_n" representing the mode shares
:param directory: child directory to load and save files
:param suffix: suffix when reading scenario files
"""
self.exp = shares_gdfs.keys()
self.modes = modes
self.r_seeds = random_seeds
self.dir = directory
if not os.path.exists(f'{self.dir}/ModeShifts'): os.mkdir(f'{self.dir}/ModeShifts')
self.out_dir = f'{self.dir}/ModeShifts'
self.baseline = baseline
self.grid_gdf = polygon_grid(self.baseline, cell_size=30)
self.suffix = suffix
# self.fig_size = (3 * len(self.get_files(0).keys()), 12)
self.cmaps = c_maps
self.plot = plot
self.min = -30
self.max = 30
self.memory = memory
self.shares_gdfs = shares_gdfs
self.city_name = city_name
self.block = block_gdf
return
def get_files(self, rs):
return {exp: gpd.read_feather(f'{self.dir}/Regression/test_{exp}_s{rs}{self.suffix}.feather') for exp in self.exp}
def mean_rs(self):
"""
Get the mean of all random seeds
:return:
"""
gdf = self.grid_gdf.copy()
for exp in self.exp:
for mode in self.modes:
d_gdf = gdf.loc[:, [col for col in gdf.columns if f'd_{mode}_{exp}' in col]]
rf_gdf = gdf.loc[:, [col for col in gdf.columns if (f'{mode}_{exp}_rf' in col) & ('_n' in col)]]
gdf[f"{mode}_{exp}"] = rf_gdf.mean(axis=1)
gdf[f'{mode}_{exp}_max'] = rf_gdf.max(axis=1)
gdf[f'{mode}_{exp}_min'] = rf_gdf.min(axis=1)
gdf[f'{mode}_{exp}_med'] = rf_gdf.median(axis=1)
gdf[f'd_{mode}_{exp}'] = d_gdf.mean(axis=1)
gdf[f'd_{mode}_{exp}_max'] = d_gdf.max(axis=1)
gdf[f'd_{mode}_{exp}_min'] = d_gdf.min(axis=1)
gdf[f'd_{mode}_{exp}_med'] = d_gdf.median(axis=1)
return gdf
def calculate_delta(self, da_baseline=False):
gdf = self.grid_gdf.copy()
grid_gdf = self.grid_gdf.copy()
# Spatial join from parcels to grid
print("Joining from parcels to grid")
for rs in range(self.r_seeds):
# Generate proxy files
if self.shares_gdfs is None: proxy_files = self.get_files(rs)
else: proxy_files = self.shares_gdfs
for exp, file in proxy_files.items():
if self.shares_gdfs is None: proxy_gdf = proxy_files[exp]
else:
try: proxy_gdf = self.shares_gdfs[exp][rs]
except: proxy_gdf = self.shares_gdfs[exp]
proxy_gdf.crs = 26910
# Join geometry from block layer if it doesn't exist
if 'geometry' not in proxy_gdf.columns:
proxy_gdf['geometry'] = self.block['geometry']
# Join baseline data
for mode in self.modes:
proxy_gdf[f"{mode}_e0_rf_{rs}_n"] = self.baseline[f"{mode}_rf_{rs}_n"]
proxy_gdf[f"{mode}_{exp}_rf_{rs}_n"] = proxy_gdf[f"{mode}_rf_{rs}_n"]
proxy_gdf = proxy_gdf.drop(f"{mode}_rf_{rs}_n", axis=1)
base_cols = [i for mode in self.modes for i in [f"{mode}_{exp}_rf_{rs}_n", f"{mode}_e0_rf_{rs}_n"]]
grid_gdf = gpd.sjoin(
grid_gdf.loc[:, [col for col in grid_gdf if 'index_' not in col]],
proxy_gdf.loc[:, list(set(base_cols).difference(set(grid_gdf.columns)))+["geometry"]],
how='left'
).drop_duplicates('geometry')
# Calculate delta from E0
for mode in self.modes:
# grid_gdf[f"{mode}_{exp}_rf_{rs}_n"] = grid_gdf[f"{mode}_rf_{rs}_n"]
shift = ((grid_gdf[f"{mode}_{exp}_rf_{rs}_n"] - grid_gdf[f"{mode}_e0_rf_{rs}_n"]) / grid_gdf[f"{mode}_e0_rf_{rs}_n"])
grid_gdf[f"d_{mode}_{exp}_s{rs}"] = shift
for mode in self.modes:
grid_gdf[f'd_{mode}_e0'] = 0
gdf = grid_gdf.copy()
# Calculate average of random seeds on E0
for mode in self.modes:
gdf[f"{mode}_e0"] = grid_gdf.loc[:, [col for col in grid_gdf.columns if f'{mode}_e0' in col]].mean(axis=1)
if da_baseline:
print("Getting baseline from dissemination areas")
# Get baseline mode share from the real place
gdf['id'] = gdf.index
da = gpd.read_file(f"{directory}{self.city_name}.gpkg", layer='land_dissemination_area')
overlay = gpd.sjoin(
gdf.loc[:, [col for col in gdf.columns if col not in ['index_left', 'index_right']]],
da.loc[:, ['walk', 'bike', 'bus', 'drive', 'geometry']]).groupby('id').mean()
overlay['geometry'] = gdf['geometry']
overlay['walk_e0'] = overlay['walk']
overlay['bike_e0'] = overlay['bike']
overlay['active_e0'] = overlay['walk'] + overlay['bike']
overlay['transit_e0'] = overlay['bus']
overlay['drive_e0'] = overlay['drive']
overlay = pd.concat([overlay, gdf.loc[list(set(gdf.index).difference(set(overlay.index))), :]])
print(f"{self.city_name}:\n{overlay.loc[:, ['walk_e0', 'bike_e0', 'transit_e0', 'drive_e0']].mean()}")
# Sum mode share and mode shift
for mode in self.modes:
for exp in self.exp:
for rs in range(self.r_seeds):
overlay[f"{mode}_{exp}_rf_{rs}"] = (overlay[f"{mode}_e0"] * overlay[f"d_{mode}_{exp}_s{rs}"]) + overlay[f"{mode}_e0"]
# Normalize mode shares to 0-1
for exp in self.exp:
for rs in range(self.r_seeds):
total = overlay.loc[:, [f"{mode}_{exp}_rf_{rs}" for mode in self.modes]].sum(axis=1)
for mode in self.modes: overlay[f"{mode}_{exp}_rf_{rs}_n"] = overlay[f"{mode}_{exp}_rf_{rs}"] / total
if 'index_left' in overlay.columns: overlay = overlay.drop('index_left', axis=1)
if 'index_right' in overlay.columns: overlay = overlay.drop('index_right', axis=1)
overlay.crs = gdf.crs
# overlay = overlay.dropna(axis=1)
overlay = overlay.dropna(axis=1, how='all')
return overlay
else:
return gdf
def get_all_data(self, emissions=False):
all_shifts = pd.DataFrame()
for j, mode in enumerate(self.modes):
if mode == 'transit': people = 'riders'
elif mode == 'drive': people = 'drivers'
else: people = None
for i, exp in enumerate(['e0'] + list(self.exp)):
mode_shifts = pd.DataFrame()
mode_shifts['Block'] = self.block.index
mode_shifts['Experiment'] = exp
mode_shifts['Mode'] = mode.title()
mode_shifts[f'Share'] = self.block[f'{mode}_{exp}']
mode_shifts['∆'] = self.block[f'd_{mode}_{exp}']
mode_shifts['Order'] = i + j
if emissions:
if mode in ['drive', 'transit']:
mode_shifts['Emissions'] = self.block[f'{mode}_em_{exp}']
mode_shifts['Emissions/Cap.'] = self.block[f'{mode}_em_{exp}']/self.block[f'{people}_{exp}']
else:
mode_shifts['Emissions'] = 0
mode_shifts['Emissions/Cap.'] = 0
all_shifts = pd.concat([all_shifts, mode_shifts])
all_shifts = all_shifts.fillna(0)
return all_shifts
def plot_grid_map(self):
grid_gdf = self.grid_gdf.copy()
# Setup plot results
main = 9
widths = [main for i in self.exp] + [1]
heights = [main for i in self.modes]
# Re-aggregate data from grid to blocks
print("\nJoining results from grid to parcels and blocks")
for rs in range(self.r_seeds):
ax5 = {}
fig1, ax = plt.subplots(nrows=len(self.modes), ncols=len(self.exp), figsize=self.fig_size)
fig2, ax2 = plt.subplots(nrows=len(self.modes), ncols=len(self.exp), figsize=self.fig_size)
fig5 = plt.figure(constrained_layout=True, figsize=self.fig_size)
gs = fig5.add_gridspec(len(self.modes), (len(self.exp) + 1), width_ratios=widths, height_ratios=heights)
for i, (exp, file) in enumerate(self.get_files(rs).items()):
print(f"> Joining {exp}")
proxy_gdf = self.get_files(rs)[exp]
proxy_gdf['i'] = proxy_gdf.index
for j, (mode, cmap) in enumerate(zip(self.modes, self.cmaps)):
ax5[j] = {}
print(f"\nPlotting results for {mode}")
# Calculate mean and median
mean = grid_gdf[f'd_{mode}_{exp}_s{rs}'].mean()
median = grid_gdf[f'd_{mode}_{exp}_s{rs}'].median()
if self.plot:
# Plot histograms
print(f"> Plotting {mode} histograms for {exp} on random seed {rs}")
ax[j][i].hist(grid_gdf[f"d_{mode}_{exp}_s{rs}"])
ax[j][i].set_title(f"{exp.upper()}, {mode.upper()}")
ax[j][i].axvline(mean, color='b', linestyle='--')
ax[j][i].axvline(median, color='b', linestyle='-')
# Plot grid maps
print(f"> Plotting {mode} raster for {exp} on random seed {rs}")
cols = [f"d_{e}_{mode}_s{rs}" for e in self.exp]
# vmin = min(grid_gdf.loc[:, cols].min())
# vmax = max(grid_gdf.loc[:, cols].max())
grid_gdf.plot(f"d_{mode}_{exp}_s{rs}", ax=ax2[j][i], legend=True, vmin=self.min, vmax=self.max, cmap=cmap)
ax2[j][i].set_title(f"{exp}, {mode.upper()} | {round(mean, 1)}%")
ax2[j][i].set_axis_off()
# Plot average grid maps
ax5[j][i] = fig5.add_subplot(gs[j, i])
all_mean = grid_gdf[f'd_{mode}_{exp}'].mean()
print(f"> Plotting {mode} raster for {exp}")
cols = [f"d_{e}_{mode}_s{rs}" for e in self.exp]
# vmin = min(grid_gdf.loc[:, cols].min())
# vmax = max(grid_gdf.loc[:, cols].max())
grid_gdf.plot(f"d_{mode}_{exp}", ax=ax5[j][i], legend=False, vmin=self.min, vmax=self.max, cmap=cmap)
ax5[j][i].set_title(f"{exp}, {mode.upper()} | MEAN: {round(all_mean, 1)}%")
ax5[j][i].set_axis_off()
# Plot colormap legend
if i == len(self.exp)-1:
ax5[j][i+1] = fig5.add_subplot(gs[j, i+1])
divider = make_axes_locatable(ax5[j][i+1])
leg_ax = divider.append_axes(position="right", size="100%", pad="0%", add_to_figure=False)
array = np.arange(self.min, self.max)
show = leg_ax.imshow([array], cmap=cmap, aspect='auto')
cb = fig5.colorbar(show, cax=ax5[j][i+1])
cb.set_label('Change from baseline (%)')
# ax5[j][i+1].set_axis_off()
# Export plots and maps to files
print("Saving blocks")
# block_gdf.to_file(f'{directory}/Sandbox/Hillside Quadra/Urban Blocks - Seed {rs}.geojson', driver='GeoJSON')
if self.plot:
plt.tight_layout()
fig1.savefig(f'{self.out_dir}/{sandbox} - Mode Shifts - Histogram - Seed {rs}.png')
fig2.savefig(f'{self.out_dir}/{sandbox} - Mode Shifts - Raster Map - Seed {rs}.png')
fig5.savefig(f'{self.out_dir}/{sandbox} - Mode Shifts - Raster Map - Mean.png')
gc.collect()
return grid_gdf
def join_blocks(self):
grid_gdf = self.grid_gdf.copy()
if self.block is None:
block_gdf = Neighbourhood(parcels=Parcels(pd.concat(self.shares_gdfs.values()))).generate_blocks()
else:
block_gdf = self.block.copy()
print("Joining results to block")
block_gdf['i'] = block_gdf.index
b_geom = block_gdf['geometry']
group_by = gpd.sjoin(block_gdf, grid_gdf.loc[:, ['geometry'] + [col for col in grid_gdf.columns if col not in block_gdf.columns]]).groupby('i', as_index=False)
block_gdf = gpd.GeoDataFrame(group_by.mean())
block_gdf = block_gdf.drop('index_right', axis=1)
block_gdf['geometry'] = b_geom
return block_gdf
def plot_block_map(self):
grid_gdf = self.grid_gdf.copy()
block_gdf = self.join_blocks()
# Setup plot results
main = 9
widths = [main for i in self.exp] + [1]
heights = [main for i in self.modes]
fig4, ax4 = plt.subplots(nrows=len(self.modes), ncols=len(self.exp), figsize=self.fig_size)
fig5 = plt.figure(constrained_layout=True, figsize=self.fig_size)
gs = fig5.add_gridspec(len(self.modes), (len(self.exp) + 1), width_ratios=widths, height_ratios=heights)
for rs in range(self.r_seeds):
ax5 = {}
cols = []
for i, (exp, file) in enumerate(self.get_files(rs).items()):
for j, (mode, cmap) in enumerate(zip(self.modes, self.cmaps)):
ax5[j] = {}
cols.append(f"d_{mode}_{exp}")
cols.append(f"{mode}_{exp}_rf_n")
# Calculate mean and median
mean = grid_gdf[f'd_{mode}_{exp}_s{rs}'].mean()
median = grid_gdf[f'd_{mode}_{exp}_s{rs}'].median()
if self.plot:
# Plot block maps
print(f"> Plotting {mode} blocks for {exp} on random seed {rs}")
block_gdf.plot(f"d_{mode}_{exp}_s{rs}", ax=ax4[j][i], legend=True, vmin=self.min, vmax=self.max, cmap=cmap)
ax4[j][i].set_title(f"{exp}, {mode.upper()} | MEAN: {round(mean, 1)}%")
ax4[j][i].set_axis_off()
# Plot average grid maps
ax5[j][i] = fig5.add_subplot(gs[j, i])
all_mean = grid_gdf[f'd_{mode}_{exp}'].mean()
print(f"> Plotting {mode} raster for {exp}")
cols = [f"d_{e}_{mode}_s{rs}" for e in self.exp]
grid_gdf.plot(f"d_{mode}_{exp}", ax=ax5[j][i], legend=False, vmin=self.min, vmax=self.max, cmap=cmap)
ax5[j][i].set_title(f"{exp}, {mode.upper()} | MEAN: {round(all_mean, 1)}%")
ax5[j][i].set_axis_off()
# Plot colormap legend
if i == len(self.exp)-1:
ax5[j][i+1] = fig5.add_subplot(gs[j, i+1])
divider = make_axes_locatable(ax5[j][i+1])
leg_ax = divider.append_axes(position="right", size="100%", pad="0%", add_to_figure=False)
array = np.arange(self.min, self.max)
show = leg_ax.imshow([array], cmap=cmap, aspect='auto')
cb = fig5.colorbar(show, cax=ax5[j][i+1])
cb.set_label('Change from baseline (%)')
# ax5[j][i+1].set_axis_off()
if self.plot:
fig4.savefig(f'{self.out_dir}/{sandbox} - Mode Shifts - Block Map - Seed {rs}.png')
fig5.savefig(f'{self.out_dir}/{sandbox} - Mode Shifts - Block Map - Mean.png')
block_gdf.crs = self.grid_gdf.crs
# block_gdf = gpd.GeoDataFrame(
# gpd.sjoin(block_gdf, self.grid_gdf.loc[:, ['geometry'] + all_cols]).groupby('i', as_index=False).mean())
# block_gdf = block_gdf.drop('index_right', axis=1)
# block_gdf['geometry'] = b_geom
block_gdf.to_file(f'{self.out_dir}/Mode Shifts - Urban Blocks.geojson', driver='GeoJSON')
return block_gdf
def plot_blocks_box(self):
fig = px.box(
data_frame=self.get_all_data(),
x="Mode",
y=f'∆',
facet_col='Experiment',
points='all'
)
fig.show()
po.plot(fig, filename=f'{self.out_dir}/BoxPlot {datetime.now()}.html')
return fig
def get_pop_count(self):
print("Joining resident counts from parcels to blocks")
for exp in experiments:
gdf = proxy_files[exp.title()]
gdf.columns = [col.lower() for col in gdf.columns]
gdf[f'population_{exp}'] = gdf['population, 2016']
blocks_gdf['id'] = blocks_gdf.index
# Spatial join to blocks
joined_population = gpd.sjoin(
blocks_gdf, gdf.loc[:, [f'population_{exp}', 'geometry']]) \
.groupby('id', as_index=False).sum()
# Merge to initial blocks layer
blocks_gdf = blocks_gdf.merge(
joined_population.loc[:, [f'population_{exp}', 'id']], on='id')
print("Estimating number of people that use each mode")
blocks_gdf.columns = [col.lower() for col in blocks_gdf.columns]
for mode in modes:
# Iterate over experiments to calculate the number of people that shifted to each mode
for exp in experiments:
# Method based on mode shifts
blocks_gdf[f"pop_{mode}_{exp}"] = blocks_gdf[f'population_{exp}'] * (
1 + (blocks_gdf[f'd_{exp}_{mode}'] / 100))
# Method based on predicted mode share
blocks_gdf[f"pop_{mode}_{exp}"] = blocks_gdf[f'population_{exp}'] * blocks_gdf[f'{mode}_{exp}_rf_n']
return blocks_gdf
def calculate_mode_shifts(base_gdf, city_name, shares_gdfs=None, da_baseline=False):
print("Calculating mode shifts")
ms = ModeShifts(
baseline=base_gdf,
modes=modes,
# block_gdf=gpd.read_file(f'{directory}Sandbox/{sandbox}/{sandbox} Sandbox.gpkg', layer='land_parcels_e0'),
random_seeds=r_seeds,
plot=False,
c_maps=['PiYG', 'PuOr', 'coolwarm'],
shares_gdfs=shares_gdfs,
city_name=city_name,
)
ms.grid_gdf = ms.calculate_delta(da_baseline=da_baseline)
ms.grid_gdf = ms.mean_rs()
ms.block = ms.join_blocks()
active = False
if active:
# Sum walk and bike modes to get active transport shift
for m in modes:
if m in ['walk', 'bike']:
for e in ms.exp:
for rs in range(ms.r_seeds):
ms.grid_gdf[f"d_active_{e}_s{rs}"] = ms.grid_gdf[f"d_{e}_walk_s{rs}"] + ms.grid_gdf[
f"d_{e}_bike_s{rs}"]
# Calculate average of all random seeds
d = ms.grid_gdf.loc[:, [col for col in ms.grid_gdf.columns if f'd_{e}_{m}' in col]]
rf = ms.grid_gdf.loc[:, [col for col in ms.grid_gdf.columns if f'{m}_{e}_rf' in col]]
ms.grid_gdf[f"active_{e}"] = rf.mean(axis=1)
ms.grid_gdf[f'active_{e}_max'] = rf.max(axis=1)
ms.grid_gdf[f'active_{e}_min'] = rf.min(axis=1)
ms.grid_gdf[f'active_{e}_med'] = rf.median(axis=1)
ms.grid_gdf[f'd_active_{e}'] = d.mean(axis=1)
ms.grid_gdf[f'd_active_{e}_max'] = d.max(axis=1)
ms.grid_gdf[f'd_active_{e}_min'] = d.min(axis=1)
ms.grid_gdf[f'd_active_{e}_med'] = d.median(axis=1)
return ms
if __name__ == '__main__':
all_data = | pd.DataFrame() | pandas.DataFrame |
from pprint import pprint
import pandas as pd
from tabulate import tabulate
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from documentation.examples import sys_model_A, sys_model_B
from cadCAD import configs
exec_mode = ExecutionMode()
# Single Process Execution using a Single System Model Configuration:
# sys_model_A
sys_model_A = [configs[0]]
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
sys_model_A_simulation = Executor(exec_context=single_proc_ctx, configs=sys_model_A)
sys_model_A_raw_result, sys_model_A_tensor_field = sys_model_A_simulation.execute()
sys_model_A_result = | pd.DataFrame(sys_model_A_raw_result) | pandas.DataFrame |
from os import makedirs, path
from typing import Union
import pandas as pd
from .filetype import FileType
class DataReader(object):
def __init__(self):
"""
Stores all dataframes and provides methods to feed data into the dataframes.
"""
self.bus_lines = pd.DataFrame(columns=['id', 'name', 'color', 'card_only', 'category'])
self.bus_line_shapes = pd.DataFrame(columns=['id', 'bus_line_id', 'latitude', 'longitude'])
self.bus_stops = pd.DataFrame(columns=['number', 'name', 'type', 'latitude', 'longitude'])
self.itineraries = pd.DataFrame(columns=['id', 'bus_line_id', 'direction'])
self.itinerary_stops = pd.DataFrame(columns=['itinerary_id', 'sequence_number', 'stop_number'])
self.bus_lines_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'day_type',
'time', 'adaptive'])
self.vehicles_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'vehicle_id',
'time'])
self.itinerary_stops_extra = pd.DataFrame(columns=['itinerary_id', 'itinerary_name', 'bus_line_id',
'itinerary_stop_id', 'stop_name', 'stop_name_short',
'stop_name_abbr', 'bus_stop_id', 'sequence_number', 'type',
'special_stop'])
self.itinerary_distances = pd.DataFrame(columns=['itinerary_stop_id', 'itinerary_next_stop_id', 'distance_m'])
self.companies = pd.DataFrame(columns=['id', 'name'])
self.itinerary_stops_companies = pd.DataFrame(columns=['itinerary_stop_id', 'company_id'])
self.vehicle_log = pd.DataFrame(columns=['timestamp', 'vehicle_id', 'bus_line_id', 'latitude', 'longitude'])
self.points_of_interest = pd.DataFrame(columns=['name', 'description', 'category', 'latitude', 'longitude'])
def feed_data(self, file: Union[bytes, str], data_type: FileType):
"""
Feeds data into the reader's internal dataframes.
:param file: File which contains the data.
If a *bytes* object is provided, the object will be interpreted as the actual decompressed content of the file.
Alternatively, if a *str* object is provided, the object will be interpreted as the path to a file in the user's
operating system. Supports the same compression types supported by pandas.
:param data_type: Type of data. See :class:`FileType` for available types
"""
# User provided raw binary data or file path (both are supported by pandas)
if isinstance(file, bytes) or isinstance(file, str):
# pd.read_json can take a long time. Therefore, we only read the file if the data_type parameter is valid.
if data_type == FileType.LINHAS:
file_data = pd.read_json(file)
self._feed_linhas_json(file_data)
elif data_type == FileType.POIS:
file_data = pd.read_json(file)
self._feed_pois_json(file_data)
elif data_type == FileType.PONTOS_LINHA:
file_data = pd.read_json(file)
self._feed_pontos_linha_json(file_data)
elif data_type == FileType.SHAPE_LINHA:
file_data = pd.read_json(file)
self._feed_shape_linha_json(file_data)
elif data_type == FileType.TABELA_LINHA:
file_data = pd.read_json(file)
self._feed_tabela_linha_json(file_data)
elif data_type == FileType.TABELA_VEICULO:
file_data = pd.read_json(file)
self._feed_tabela_veiculo_json(file_data)
elif data_type == FileType.TRECHOS_ITINERARIOS:
file_data = pd.read_json(file)
self._feed_trechos_itinerarios_json(file_data)
elif data_type == FileType.VEICULOS:
file_data = | pd.read_json(file, lines=True) | pandas.read_json |
#--------------------------------------------------------------- Imports
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
import os
from pathlib import Path
import string
import pandas as pd
import numpy as np
import seaborn as sns
import panel as pn
from panel.interact import interact, interactive, fixed, interact_manual
from panel import widgets
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
pn.extension('plotly')
from pytrends.request import TrendReq
#--------------------------------------------------------------- Environment
# Loads .env
load_dotenv()
# Sets Alpaca API key and secret
alpaca_key = os.getenv('ALPACA_API_KEY')
alpaca_secret = os.getenv('ALPACA_API_SECRET')
# Creates the Alpaca API object
alpaca = tradeapi.REST(alpaca_key, alpaca_secret, api_version = "v2")
timeframe = "1D"
start = pd.Timestamp('2016-05-26', tz = 'US/Pacific').isoformat()
end = pd.Timestamp('2021-06-6', tz = 'US/Pacific').isoformat()
#--------------------------------------------------------------- Global Variables
pytrend = TrendReq()
sectors = [
'Communications',
'Consumer Discretionary',
'Consumer Staples',
'Energy',
'Financial',
'Healthcare',
'Industrial',
'Information Technology',
'Materials',
'Real Estate',
'Utilities'
]
beta = ['Min', 'Max', 'Median', 'Mutual Fund']
z_field = ['Close', 'Volume']
sector_tickers = {
'Communications':
{'Min': 'VZ', 'Max': 'LYV', 'Median': 'TMUS', 'Mutual Fund': 'VOX'},
'Consumer Discretionary':
{'Min': 'NVR', 'Max': 'F', 'Median': 'HLT', 'Mutual Fund': 'VCR'},
'Consumer Staples':
{'Min': 'CLX', 'Max': 'SYY', 'Median': 'PM', 'Mutual Fund': 'VDC'},
'Energy':
{'Min': 'COG', 'Max': 'OXY', 'Median': 'SLB', 'Mutual Fund': 'VDE'},
'Financial':
{'Min': 'CBOE', 'Max': 'LNC', 'Median': 'BAC', 'Mutual Fund': 'VFH'},
'Healthcare':
{'Min': 'DGX', 'Max': 'ALGN', 'Median': 'CAH', 'Mutual Fund': 'VHT'},
'Industrial':
{'Min': 'DGX', 'Max': 'TDG', 'Median': 'DE', 'Mutual Fund': 'VIS'},
'Information Technology':
{'Min': 'ORCL', 'Max': 'ENPH', 'Median': 'NTAP', 'Mutual Fund': 'VGT'},
'Materials':
{'Min': 'NEM', 'Max': 'FCX', 'Median': 'AVY', 'Mutual Fund': 'VAW'},
'Real Estate':
{'Min': 'PSA', 'Max': 'SPG', 'Median': 'UDR', 'Mutual Fund': 'VNQ'},
'Utilities':
{'Min': 'ED', 'Max': 'AES', 'Median': 'SRE', 'Mutual Fund': 'VPU'}
}
member_picks = {
'Boomer': ['VDC', 'VNQ', 'VOX', 'VAW'],
'Stonks': ['GME', 'AMC', 'PSLV', 'BB'],
'Pro Gamer': ['AAPL', 'TSLA', 'AMC', 'WMT'],
'Real American': ['LMT', 'TAP', 'PM', 'HAL']
}
#--------------------------------------------------------------- Functions
# Generates Correlation Heatmap of Sector Mutual Funds & Index
def df_to_plotly(df):
return {'z': df.values.tolist(),
'x': df.columns.tolist(),
'y': df.index.tolist()}
@interact(Beta = beta)
def heatmap(Beta):
df = pd.DataFrame()
sp_file = Path('../Data/SP500.csv')
sp_df = pd.read_csv(sp_file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df['SP500'] = sp_df['Close']
for k, v in sector_tickers.items():
ticker = sector_tickers[k][Beta]
file = Path('../Data/{}.csv'.format(ticker))
ticker_df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df[k] = ticker_df['Close']
df = df.pct_change()
df.dropna(inplace = True)
corr = df.corr()
fig = go.Figure(data=go.Heatmap(
df_to_plotly(corr),
colorscale='blues'))
fig.update_layout(title = 'Heatmap',width=1000, height=500)
return fig
# Generates Candlestick Chart of Sector Ticker
@interact(Sector = sectors, Beta = beta)
def candlestick(Sector, Beta):
ticker = sector_tickers[Sector][Beta]
file = Path('../Data/{}.csv'.format(ticker))
df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
fig = go.Figure(data=[go.Candlestick(
x = df.index,
open = df['Open'],
high = df['High'],
low = df['Low'],
close = df['Close']
)])
fig.update_layout(title = ticker, width=1000, height=500)
return fig
# Generates Comparison Line Graph of Sector Ticker & SPY
@interact(Sector = sectors, Beta = beta)
def v_spy(Sector, Beta):
ticker = sector_tickers[Sector][Beta]
file = Path('../Data/{}.csv'.format(ticker))
df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
spy = pd.read_csv('../Data/SPY.csv', infer_datetime_format=True, parse_dates=True, index_col='Date')
fig = make_subplots()
trace1 = go.Scatter(
x = df.index,
y = df['Close'],
mode = 'lines',
name = ticker)
trace2 = go.Scatter(
x = spy.index,
y = spy['Close'],
mode = 'lines',
name = 'SPY')
fig.add_trace(trace1)
fig.add_trace(trace2)
fig.update_yaxes(range=[0, 700])
fig.update_layout(title = ticker + " versus SPY", width=1000, height=500)
return fig
# Generates Comparison Line Graph of Sector Ticker and its Google Search Interest
@interact(Sector = sectors, Beta = beta, Column = z_field)
def trend(Sector, Beta, Column):
ticker = sector_tickers[Sector][Beta]
file = Path('../Data/{}.csv'.format(ticker))
pytrend.build_payload(kw_list=[ticker], timeframe='today 5-y')
trend_df = pytrend.interest_over_time().rename_axis('Date')
trend_df.index = pd.to_datetime(trend_df.index)
df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
overlay = pd.merge(trend_df, df[Column], how = 'outer', left_index = True, right_index=True)
overlay = overlay.loc['2020-06-05':]
overlay.fillna(method = 'ffill', inplace = True)
fig = make_subplots(specs=[[{"secondary_y": True}]])
trace1 = go.Scatter(
x = overlay.index,
y = overlay[Column],
mode = 'lines',
name = Column)
trace2 = go.Scatter(
x = overlay.index,
y = overlay[ticker],
mode = 'lines',
name = 'Search Interest')
fig.add_trace(trace1, secondary_y=False)
fig.add_trace(trace2, secondary_y=True)
fig.update_yaxes(range=[overlay[Column].min()-(overlay[Column].std()*.2),overlay[Column].max()+(overlay[Column].std()*.2)], secondary_y=False)
fig.update_yaxes(range=[0,100], secondary_y=True)
fig.update_layout(title = ticker + " Closing Price vs Search Interest", width=1000, height=500)
return fig
# Builds Portfolio from 3 Tickers via Alpaca API, Displays Returns
@interact(Stock_1 = 'GOOG', Amount_1 = (0, 10000), Stock_2 = 'MSFT', Amount_2 = (0, 10000), Stock_3 = 'GME', Amount_3 = (0, 10000))
def api_call(Stock_1, Amount_1, Stock_2, Amount_2, Stock_3, Amount_3):
x1 = Stock_1.upper()
x2 = Stock_2.upper()
x3 = Stock_3.upper()
tickers = [x1, x2, x3]
df = alpaca.get_barset(
tickers,
timeframe,
start = start,
end = end,
limit = 1000
).df
close = pd.DataFrame()
close[x1] = df[x1]['close']
close[x2] = df[x2]['close']
close[x3] = df[x3]['close']
portfolio_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import shap
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
class Boba_Model_Diagnostics():
def __init__(self):
pass
def run_model_diagnostics(self, model, X_train, X_test, y_train, y_test, target):
self.get_model_stats(model, X_train, X_test, y_train, y_test, target)
self.plot_shap_imp(model,X_train)
self.plot_shap_bar(model,X_train)
self.residual_plot(model,X_test,y_test,target)
self.residual_density_plot(model,X_test,y_test,target)
self.identify_outliers(model, X_test, y_test,target)
self.residual_mean_plot(model,X_test,y_test,target)
self.residual_variance_plot(model,X_test,y_test,target)
self.PVA_plot(model,X_test,y_test,target)
self.inverse_PVA_plot(model,X_train,y_train,target)
self.estimates_by_var(model,X_train,y_train,target,'Age')
self.error_by_var(model,X_train,y_train,target,'Age')
self.volatility_by_var(model,X_train,y_train,target,'Age')
def get_model_stats(self, model, X_train, X_test, y_train, y_test, target):
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
test_RMSE = np.sqrt(mean_squared_error(y_test, test_pred)),
test_R2 = model.score(X_test,y_test),
test_MAE = mean_absolute_error(y_test, test_pred),
train_RMSE = np.sqrt(mean_squared_error(y_train, train_pred)),
train_R2 = model.score(X_train,y_train),
train_MAE = mean_absolute_error(y_train, train_pred),
df = pd.DataFrame(data = {'RMSE': np.round(train_RMSE,4),
'R^2': np.round(train_R2,4),
'MAE': np.round(train_MAE,4)}, index = ['train'])
df2 = pd.DataFrame(data = {'RMSE': np.round(test_RMSE,4),
'R^2': np.round(test_R2,4),
'MAE': np.round(test_MAE,4)}, index = ['test'])
print("Model Statistics for {}".format(target))
print('-'*40)
print(df)
print('-'*40)
print(df2)
print('-'*40)
def plot_shap_imp(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train)
plt.show()
def plot_shap_bar(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train, plot_type='bar')
plt.show()
def feature_imp(self,model,X_train,target):
sns.set_style('darkgrid')
names = X_train.columns
coef_df = pd.DataFrame({"Feature": names, "Importance": model.feature_importances_},
columns=["Feature", "Importance"])
coef_df = coef_df.sort_values('Importance',ascending=False)
coef_df
fig, ax = plt.subplots()
sns.barplot(x="Importance", y="Feature", data=coef_df.head(20),
label="Importance", color="b",orient='h')
plt.title("XGB Feature Importances for {}".format(target))
plt.show()
def residual_plot(self,model, X_test, y_test,target):
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
fig, ax = plt.subplots()
ax.scatter(pred, residuals)
ax.plot([pred.min(), pred.max()], [0, 0], 'k--', lw=4)
ax.set_xlabel('Predicted')
ax.set_ylabel('Residuals')
plt.title("Residual Plot for {}".format(target))
plt.show()
def residual_density_plot(self,model, X_test, y_test,target):
sns.set_style('darkgrid')
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
sns.distplot(residuals)
plt.title("Residual Density Plot for {}".format(target))
plt.show()
def residual_variance_plot(self, model, X_test, y_test,target):
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp[target], 10))['residuals'].std()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Variance plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Variance")
plt.show()
except:
pass
def residual_mean_plot(self, model, X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = | pd.Series(pred,index=X_test.index) | pandas.Series |
# -*- coding: utf-8 -*-
'''
Analysis module for analysis of angle-dependence
Author:
<NAME>,
Max Planck Institute of Microstructure Physics, Halle
Weinberg 2
06120 Halle
<EMAIL>
'''
''' Input zone '''
# ____________________________________________________________________________
# SETTINGS
# Data
'''
"selectFileType"
How to select input files:
Mode 0: Select each file seperately through UI
Mode 1: Select file that specifies all file locations
Mode 2: Give file locations file in code (need to know what you are doing)
'''
selectFileType = 2
'''
"analysisMode":
Requirements for different modes:
a) Lineshape analysis (frequency-dependence)
b) AMR calibration
c) Irf calibration
d) PHE and AHE calibration
Mode 0:
Plotting mode. Requires only angle-dependent data
Mode 1:
"c-free" fitting. V_amr is a fitting parameter and Vs and Va are fitted
simulatneously to ensure Vamr is the same for both fits.
Requirement: a)
Mode 2:
Quantitative fitting. Torques have quantitative meaning.
Requirements: a)-c)
Mode 3:
Semi-quantitative fitting with generalized Karimeddiny artifact description.
Requirements: a)-c)
Mode 4:
Semi-quantitative fitting with generalized Karimeddiny artifact descirption
in XX and XY direction.
Requirements: a)-d)
'''
analysisMode = 4
'''
"Vset_mode":
Only for analysisMode 4.
Specify which data to use for fitting.
0: Vsxx, Vaxx, Vsxy
1: Vsxx, Vaxx, Vaxy
'''
Vset_mode = 0
voltageMagnitude = 'mu' # V
flipSign = False
fit_phi_offset = False # Only implements for c-free mode
fit_comps_list = ['xyz'] # Select assumed torque components
assume_arts = True
norm_to = 'yFL' # Only for mode 1. Specify which torque component to normalize to.
plotPhiMode = 1 # 0: raw angle, 1: shifted angle
delta_phi = 45 # distance between angle tick values (deg)
plotDpi = 600
saveData = True
''' Input zone ends here. '''
# ____________________________________________________________________________
# CODE
import tkinter as tk
from tkinter import filedialog
import pandas as pd
import matplotlib.pyplot as plt
from files import File
from plots import GenPlot, BoxText
from helpers.file_handling import read_csv_Series
import numpy as np
import modules.stfmrAnglePlotFitting as apf
from modules.stfmrAnglePlotFittingCFree import angleDepFittingCFree, get_norm_torques
from modules.stfmrKarimeddinyFitting import V_Karimeddiny_fitting, get_norm_torques_karimed, calc_Ru
from modules.stfmrKarimeddinyHallFitting import V_Karimeddiny_Hall_fitting, get_norm_torques_karimed, calc_Ru
import stfmrHelpers.stfmrAnglePlotFitHelpers as aph
from units import rad2deg
from stfmrHelpers.stfmrAnglePlotUIHelper import get_ipFileLocationsFilesFromUI
if selectFileType == 0:
ipFileLocationsFiles = [get_ipFileLocationsFilesFromUI(analysisMode)]
elif selectFileType == 1:
root = tk.Tk()
root.withdraw()
ipFileLocationsFiles = [File(filedialog.askopenfilename(parent=root,
title='Choose .csv file with input files locations'))]
elif selectFileType == 2:
ipFileLocationsFiles = [
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2959-2\220131\D1_0deg\02_angle-dependence\fittingOutput\angleDependence\MA2959-2-D1_angleDep_input_files.csv'),
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2959-2\220131\D3_45deg\01_angle-dependence\fittingOutput\angleDependence\MA2959-2-D3_angleDep_input_files.csv'),
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2960-2\220202\D1_0deg\003_angle-dependence\fittingOutput\angleDependence\MA2960-2-D1_angleDep_input_files.csv'),
# File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2960-2\220203\D4_90deg\002_angle-dependence\pos_field\fittingOutput\angleDependence\MA2960-2-D4_angleDep_input_files.csv')
File(r'D:\owncloud\0_Personal\ANALYSIS\Mn3SnN\ST-FMR\MA2959-2\220131\D1_0deg\02_angle-dependence\fittingOutput\angleDependence\MA2959-2-D1_angleDep_input_files.csv')
]
else:
raise ValueError(f'Select files type "{selectFileType}" not defined')
inputFiles = []
ipFileLocations = []
for ipFileLocationsFile in ipFileLocationsFiles:
# Get input file locations
ipFileLocations = read_csv_Series(ipFileLocationsFile.fileDirName)
ipAngleDepFittingSummaryFile = File(ipFileLocations['angle dependence fitting summary'])
# Get input data
inputData = pd.read_csv(ipAngleDepFittingSummaryFile.fileDirName,index_col=False)
if analysisMode == 4:
# Get additional data from XY measurement
ipAngleDepFittingXYSummaryFile = File(ipFileLocations['angle dependence fitting summary transversal'])
inputDataXY = pd.read_csv(ipAngleDepFittingXYSummaryFile.fileDirName,index_col=False)
# Extract important collumns
if voltageMagnitude == 'mu':
y_label = 'V ($\mu$V)'
voltageDivider = 1e-6
if plotPhiMode == 0:
try:
x = inputData['Angle (deg)']
except:
try:
x = inputData['fieldAngle (deg)']
except:
raise ValueError
x_label = '$\phi$ (deg)'
Vs = inputData['Vsym (V)']
Vas = inputData['Vas (V)']
if analysisMode == 4:
Vsxx = Vs
Vaxx = Vas
Vsxy = inputDataXY['Vsym (V)']
Vaxy = inputDataXY['Vas (V)']
elif plotPhiMode == 1:
x = inputData.sort_values(by='fieldAngle (deg)')['fieldAngle (deg)']
x_label = '$\phi$ (deg)'
Vs = inputData.sort_values(by='fieldAngle (deg)')['Vsym (V)']
Vas = inputData.sort_values(by='fieldAngle (deg)')['Vas (V)']
# Extract fixed parameters
I = float(inputData['Current (mA)'][0])
P = float(inputData['rf Power (dBm)'][0])
f = float(inputData['Frequency (GHz)'][0])
# Flip sign if defined
if flipSign == True:
Vs *= -1
Vas *= -1
# _________________________________________________________________________
# ANALYSIS MODE 0
if analysisMode == 0:
# Simple data plotting without fit
fig, ax = plt.subplots()
ax.scatter(x, Vs, label='Vs')
ax.scatter(x, Vas, label='Vas')
plt.plot(x, Vs)
plt.plot(x, Vas)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend()
ax.set_xticks(np.arange(0, 361, delta_phi))
ax.set_title('I = {} mA, f = {} GHz, P = {} dBm'.format(I, f, P))
outputFileSubdir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/plot-only'
outputFile = File(outputFileSubdir, ipAngleDepFittingSummaryFile.fileNameWOExt + '_anglePlot.png')
outputFile.makeDirIfNotExist()
if saveData is True:
fig.savefig(outputFile.fileDirName, bbox_inches="tight", dpi=plotDpi)
# _________________________________________________________________________
# ANALYSIS MODE 1
elif analysisMode == 1:
''' c-free fitting '''
opFileDir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/c-free'
opFileParams = File(opFileDir, 'fitparams_summary.csv')
opParamsSum = pd.DataFrame()
for fit_comps in fit_comps_list:
title = 'I = {} mA, f = {} GHz, P = {} dBm \nAssumed components: {}'.format(I, f, P, fit_comps)
phiDepPlt = GenPlot(title=title, xlabel=x_label, ylabel=y_label, dpi=plotDpi)
phiDepPlt.ax.set_xticks(np.arange(0, 361, delta_phi))
phiDepPlt.scatter(x, Vs/voltageDivider, label='Vs_data')
phiDepPlt.scatter(x, Vas/voltageDivider, label='Va_data')
x_plt = np.linspace(0, 360, 100)
cps = aph.get_cps(1, ipFileLocationsFile)
fitting_output = angleDepFittingCFree(x, x_plt, Vs, Vas, cps, fit_comps, fit_phi_offset, do_check_fit=False)
params, params_dict, Vs_fit, Vs_plt, Va_fit, Va_plt = fitting_output
torques, torques_norm = get_norm_torques(params, norm_to)
if not params_dict['Vamr_s'] == params_dict['Vamr_a']:
raise # They are forced to be the same
Vamr = params_dict['Vamr_s']
if not params_dict['phi0_s'] == params_dict['phi0_a']:
raise
Vamr = params_dict['Vamr_s']
phi0 = rad2deg(params_dict['phi0_s'])
phiDepPlt.plot(x_plt, Vs_plt/voltageDivider, label=f'Vs_fit_{fit_comps}')
phiDepPlt.plot(x_plt, Va_plt/voltageDivider, label=f'Va_fit_{fit_comps}')
box = BoxText(1.03, 1)
box.add_text('Fitted params:')
box.add_empty_line()
box.add_param('Vamr', Vamr, rep='e')
box.add_param('phi0', phi0)
for key, param in torques.items():
box.add_param(key, param)
for key, param in torques_norm.items():
box.add_param(key, param)
phiDepPlt.make_boxtext(box)
opFileFig = File(opFileDir, 'plt_'+fit_comps+'.png')
opFileFig.makeDirIfNotExist()
phiDepPlt.report(opFileFig.fileDir, opFileFig.fileName, saveData=True)
opParams = pd.Series(params_dict|torques_norm)
opParams['fit_comps'] = fit_comps
opParamsSum = opParamsSum.append(opParams, ignore_index=True)
opParamsSum = opParamsSum.set_index('fit_comps')
if saveData is True:
opParamsSum.to_csv(opFileParams.fileDirName, index=True)
# _________________________________________________________________________
# ANALYSIS MODE 2
elif analysisMode == 2:
''' Quantitative fitting of angle-dependent data '''
opFileDir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/full-quantitative'
opFileParams = File(opFileDir, 'fitparams_summary.csv')
opParamsSum = pd.DataFrame()
for fit_comps in fit_comps_list:
fig, ax = plt.subplots()
ax.scatter(x, Vs/voltageDivider, label='Vs')
ax.scatter(x, Vas/voltageDivider, label='Vas')
x_plt = np.linspace(0, 360, 100)
cps = aph.get_cps(2, ipFileLocationsFile)
params, Vs_fit, Vs_plt, Va_fit, Va_plt = apf.opt_V_ana_full(fit_comps, x, Vs, Vas, x_plt, cps)
sotr = apf.get_sotr(params, cps) # spin torque ratios
def calc_r2(y, y_fit):
ss_res = np.sum((y - y_fit) ** 2) # residual sum of squares
ss_tot = np.sum((y - np.mean(y)) ** 2) # total sum of squares
return 1 - (ss_res / ss_tot) # r-squared (coefficient of determination)
Vs_r2 = calc_r2(Vs, Vs_fit)
Va_r2 = calc_r2(Vas, Va_fit)
ax.plot(x_plt, Vs_plt/voltageDivider, label='Vs fit ('+fit_comps+', $R^2=${:.3f})'.format(Vs_r2))
ax.plot(x_plt, Va_plt/voltageDivider, label='Vas fit ('+fit_comps+', $R^2=${:.3f})'.format(Va_r2))
# if norm_torques == True:
# params_norm = aph.norm_torques(params)
# boxtext = 'Torques (norm): \n\n'
# params = params_norm
# else:
boxtext = 'Torques: \n\n'
for key in params:
comp = key.split('_')[1]
boxtext += comp
# if norm_torques is True:
# boxtext += ' = {:.2f}'.format(params[key])
# else:
boxtext += ' = {:.1f} $\mu$T/rad'.format(params[key]*1e6)
boxtext += '\n'
boxtext = boxtext[:-1]
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax.text(1.03, 1, boxtext, verticalalignment='top',
transform=ax.transAxes, bbox=props, fontsize=10)
ax.set_title('I = {} mA, f = {} GHz, P = {} dBm \nAssumed components: {}'.format(I, f, P, fit_comps))
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend()
ax.set_xticks(np.arange(0, 361, delta_phi))
opFileFig = File(opFileDir, 'plt_'+fit_comps+'.png')
opFileFig.makeDirIfNotExist()
if saveData is True:
fig.savefig(opFileFig.fileDirName, bbox_inches="tight", dpi=plotDpi)
opFileCurves = File(opFileDir,'curve_'+fit_comps+'.csv')
opFileCurves.makeDirIfNotExist()
opCurves = pd.DataFrame()
opCurves['phi_plt (deg)'] = x_plt
opCurves['Vs_plt (muV)'] = Vs_plt
opCurves['Va_plt (muV)'] = Va_plt
if saveData is True:
opCurves.to_csv(opFileCurves.fileDirName, index=False)
opParams = pd.Series({**params, **sotr})
opParams['fit_comps'] = fit_comps
opParams['Vs_r2'] = Vs_r2
opParams['Va_r2'] = Va_r2
opParamsSum = opParamsSum.append(opParams, ignore_index=True)
opParamsSum = opParamsSum.set_index('fit_comps')
if saveData is True:
opParamsSum.to_csv(opFileParams.fileDirName, index=True)
# _________________________________________________________________________
# ANALYSIS MODE 3
elif analysisMode == 3:
''' Semi-quantitative fitting with generalized Karimeddiny artifact description '''
opFileDir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/karimeddiny'
opFileParams = File(opFileDir, 'fitparams_summary.csv')
opParamsSum = pd.DataFrame()
for fit_comps in fit_comps_list:
title = 'I = {} mA, f = {} GHz, P = {} dBm \nAssumed components: {}, assume artifacts: {}'.format(I, f, P, fit_comps, assume_arts)
phiDepPlt = GenPlot(title=title, xlabel=x_label, ylabel=y_label, dpi=plotDpi)
phiDepPlt.ax.set_xticks(np.arange(0, 361, delta_phi))
phiDepPlt.scatter(x, Vs/voltageDivider, label='Vs_data')
phiDepPlt.scatter(x, Vas/voltageDivider, label='Va_data')
x_plt = np.linspace(0, 360, 100)
# Get constant parameters
cps = aph.get_cps(3, ipFileLocationsFile)
# Fit
fitting_output = V_Karimeddiny_fitting(fit_comps, x, Vs, Vas, x_plt, cps, assume_arts=assume_arts)
params, params_dict, Vs_fit, Vs_plt, Va_fit, Va_plt = fitting_output
torques, torques_norm = get_norm_torques_karimed(params, norm_to)
# Fit quality:
Ru_s = calc_Ru(Vs, Vs_fit)
Ru_a = calc_Ru(Vas, Va_fit)
# Plot
phiDepPlt.plot(x_plt, Vs_plt/voltageDivider, label=f'Vs_fit_{fit_comps}')
phiDepPlt.plot(x_plt, Va_plt/voltageDivider, label=f'Va_fit_{fit_comps}')
box = BoxText(1.03, 1)
box.add_text('Fitted params:')
box.add_empty_line()
box.add_param('Tart', params_dict['Tart'], rep='e')
for key, param in torques.items():
box.add_param(key, param)
for key, param in torques_norm.items():
box.add_param(key, param)
box.add_empty_line()
box.add_param('Ru_s', Ru_s*100, unit=' %', rep='f')
box.add_param('Ru_a', Ru_a*100, unit=' %', rep='f')
phiDepPlt.make_boxtext(box)
opFileFig = File(opFileDir, f'plt_{fit_comps}_arts={assume_arts}.png')
opFileFig.makeDirIfNotExist()
phiDepPlt.report(opFileFig.fileDir, opFileFig.fileName, saveData=True)
opParams = pd.Series(params_dict|torques_norm)
opParams['fit_comps'] = fit_comps
opParams['assume_arts'] = assume_arts
opParams['Ru_s'] = Ru_s
opParams['Ru_a'] = Ru_a
opParamsSum = opParamsSum.append(opParams, ignore_index=True)
opParamsSum = opParamsSum.set_index('fit_comps')
if saveData is True:
opParamsSum.to_csv(opFileParams.fileDirName, index=True)
# _________________________________________________________________________
# ANALYSIS MODE 4
elif analysisMode == 3:
''' Semi-quantitative fitting with generalized Karimeddiny artifact description in XX and XY direction '''
opFileDir = ipAngleDepFittingSummaryFile.fileDir + '/angleDependence/karimeddiny'
opFileParams = File(opFileDir, 'fitparams_summary.csv')
opParamsSum = pd.DataFrame()
for fit_comps in fit_comps_list:
title = 'I = {} mA, f = {} GHz, P = {} dBm \nAssumed components: {}, assume artifacts: {}'.format(I, f, P, fit_comps, assume_arts)
phiDepPlt = GenPlot(mode='vstack-share-x', title=title, xlabel=x_label, dpi=plotDpi)
phiDepPlt.ax[0].set_xticks(np.arange(0, 361, delta_phi))
phiDepPlt.scatter(x, Vsxx/voltageDivider, axis=0, label='Vsxx_data')
phiDepPlt.scatter(x, Vaxx/voltageDivider, axis=0, label='Vaxx_data')
phiDepPlt.scatter(x, Vsxy/voltageDivider, axis=1, label='Vsxy_data')
phiDepPlt.scatter(x, Vaxy/voltageDivider, axis=1, label='Vay_data')
x_plt = np.linspace(0, 360, 100)
# Get constant parameters
cps = aph.get_cps(4, ipFileLocationsFile)
# Fit
fitting_output = V_Karimeddiny_Hall_fitting(fit_comps, x, Vset_mode, Vs, Vas, x_plt, cps, assume_arts=assume_arts)
params, params_dict, Vsxx_fit, Vsxx_plt, Vaxx_fit, Vaxx_plt, Vsxy_fit, Vsxy_plt, Vaxy_fit, Vaxy_plt = fitting_output
torques, torques_norm = get_norm_torques_karimed(params, norm_to)
# Fit quality:
Ru_sxx = calc_Ru(Vsxx, Vsxx_fit)
Ru_axx = calc_Ru(Vaxx, Vaxx_fit)
Ru_sxy = calc_Ru(Vsxy, Vsxy_fit)
Ru_axy = calc_Ru(Vaxy, Vaxy_fit)
# Plot
phiDepPlt.plot(x_plt, Vsxx_plt/voltageDivider, axis=0, label=f'Vsxx_fit_{fit_comps}')
phiDepPlt.plot(x_plt, Vaxx_plt/voltageDivider, axis=0, label=f'Vaxx_fit_{fit_comps}')
phiDepPlt.plot(x_plt, Vsxy_plt/voltageDivider, axis=1, label=f'Vsxy_fit_{fit_comps}')
phiDepPlt.plot(x_plt, Vaxy_plt/voltageDivider, axis=1, label=f'Vaxy_fit_{fit_comps}')
box = BoxText(1.03, 1)
box.add_text('Fitted params:')
box.add_empty_line()
box.add_param('Tart', params_dict['Tart'], rep='e')
for key, param in torques.items():
box.add_param(key, param)
for key, param in torques_norm.items():
box.add_param(key, param)
box.add_empty_line()
box.add_param('Ru_sxx', Ru_sxx*100, unit=' %', rep='f')
box.add_param('Ru_axx', Ru_axx*100, unit=' %', rep='f')
box.add_param('Ru_sxy', Ru_sxy*100, unit=' %', rep='f')
box.add_param('Ru_axy', Ru_axy*100, unit=' %', rep='f')
phiDepPlt.make_boxtext(box)
opFileFig = File(opFileDir, f'plt_{fit_comps}_arts={assume_arts}.png')
opFileFig.makeDirIfNotExist()
phiDepPlt.report(opFileFig.fileDir, opFileFig.fileName, saveData=True)
opParams = | pd.Series(params_dict|torques_norm) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script provide functions that read the preprocessed NOAA-GridSat-B1
dataset and transform with the pre-trained PCA model.
The PCA model is pre-trained with IncrementalPCA from sklearn, and stored with joblib.
'''
import numpy as np
import pandas as pd
import os, argparse, logging
from sklearn.decomposition import PCA, IncrementalPCA
import joblib, csv
__author__ = "<NAME>"
__copyright__ = "Copyright 2019~2022, DataQualia Lab Co. Ltd."
__credits__ = ["<NAME>"]
__license__ = "Apache License 2.0"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = '2021-06-08'
# Utility functions
def list_preprocessed_gridsatb1_files(dir, suffix='.npy', to_remove=['.npy']):
''' To scan through the sapecified dir and get the corresponding file with suffix. '''
import os
import pandas as pd
xfiles = []
for root, dirs, files in os.walk(dir, followlinks=True): # Loop through the directory
for fn in files:
if fn.endswith(suffix): # Filter files with suffix
timestamp = fn
for s in to_remove: # Removing prefix and suffix to get time-stamp
timestamp = timestamp.replace(s,'')
xfiles.append({'timestamp':timestamp, 'xuri':os.path.join(root, fn)})
return(pd.DataFrame(xfiles).sort_values('timestamp').reset_index(drop=True))
# Binary reader
def read_preprocessed_gridsatb1(furi):
import numpy as np
return(np.load(furi))
def read_multiple_preprocessed_noaagridsatb1(flist, flatten=False):
''' This method reads in a list of NOAA-GridSat-B1 images and returns a numpy array. '''
import numpy as np
data = []
for f in flist:
tmp = np.load(f)
if flatten:
tmp = tmp.flatten()
data.append(tmp)
return(np.array(data))
# Incremental PCA
def transform_ipca_batch(ipca, finfo, batch_size=1024):
''' Use pretrained PCA to transform the data batch. '''
# Loop through finfo
nSample = len(finfo)
batch_start = 0
batch_end = batch_size
batch_count = 0
# Process the first batch
proj = None
#
while batch_start < nSample:
logging.debug('Starting batch: '+str(batch_count))
# Check bound
limit = min(batch_end, nSample)
# Read batch data
data = read_multiple_preprocessed_noaagridsatb1(finfo['xuri'].iloc[batch_start:limit], flatten=True)
logging.debug(data.shape)
# increment
batch_start = limit
batch_end = limit + batch_size
batch_count += 1
# Partial fit with batch data
if proj is None:
proj = ipca.transform(data)
else:
proj = np.vstack((proj,ipca.transform(data)))
return(proj)
#-----------------------------------------------------------------------
def main():
# Configure Argument Parser
parser = argparse.ArgumentParser(description='Performing Incremental PCA on NOAA-GridSat-B1 data.')
parser.add_argument('--datapath', '-i', help='the directory containing preprocessed NOAA-GridSat-B1 data in npy format.')
parser.add_argument('--output', '-o', help='the prefix of output files.')
parser.add_argument('--logfile', '-l', default=None, help='the log file.')
parser.add_argument('--model_file', '-m', help='the joblib file storing the pre-trained model.')
parser.add_argument('--batch_size', '-b', default=1024, type=int, help='the batch size.')
args = parser.parse_args()
# Set up logging
if not args.logfile is None:
logging.basicConfig(level=logging.DEBUG, filename=args.logfile, filemode='w')
else:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
# Get data files
datainfo = list_preprocessed_gridsatb1_files(args.datapath)
logging.info('Scanning data files: '+str(datainfo.shape[0]))
# Load Pre-trained PCA Model
ipca = joblib.load(args.model_file)
logging.info("Loading pre-trained PCA model from: "+ str(args.model_file)+", model dimension: "+str(ipca.components_.shape))
# Preparing output
proj = transform_ipca_batch(ipca, datainfo, batch_size=args.batch_size)
projections = | pd.DataFrame(proj) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# + jupyter={"source_hidden": true}
import os
print(os.getcwd())
import sys
import copy
import shutil
from pathlib import Path
from contextlib import contextmanager
# import pickle; import os
import pickle
import json
import pandas as pd
import numpy as np
from ase import io
from ase.visualize import view
import plotly.graph_objects as go
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.analysis import local_env
# #########################################################
from misc_modules.pandas_methods import drop_columns
from methods import read_magmom_comp_data
import os
import sys
from IPython.display import display
import pandas as pd
| pd.set_option("display.max_columns", None) | pandas.set_option |
""" ***************************************************************************
# * File Description: *
# * Using data downloaded from Yahoo Finace, we construct visual tools *
# * to confirm stock market trends. *
# * *
# * The contents of this script are: *
# * 1. Importing Libraries *
# * 2. Helper Functions: Use to read data *
# * 3. Read data *
# * 4a. Visualize Data: Line Plot *
# * 4b. Visualize Data: Prepare data for Candlestick Chart *
# * 4b. Visualize Data: Make Candlestick Chart *
# * 5a. Simple Moving Average *
# * 5b. Exponential Moving Average *
# * 5c. Popular SMA and EMA *
# * 5d. Candlesticks with Moving Averages *
# * 6a. Candlestick charts, Moving Averages, and Volume: Crunching the numbers*
# * 6b. Candlestick charts, Moving Averages, and Volume: Figure *
# * *
# * --------------------------------------------------------------------------*
# * AUTHORS(S): <NAME> *
# * --------------------------------------------------------------------------*
# * DATE CREATED: Sept 2, 2019 *
# * --------------------------------------------------------------------------*
# * NOTES: *
# * ************************************************************************"""
###############################################################################
# 1. Importing Libraries #
###############################################################################
# For reading, processing, and visualizing data
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.dates as mdates
import datetime
# To make candlestick charts
from mpl_finance import candlestick_ohlc
# For reading files
from os import listdir
###############################################################################
# 2. Helper Functions: Use to read data #
###############################################################################
def get_data(data_prefix, start_date, end_date):
"""Reads all the files located in the data_prefix directory.
Parameters
----------
data_prefix : str
String object describing path to where the data is located.
start_date : str
String object describing the first date to consider
end_date : str
String object describing the last date to consider
Returns
----------
data: dict
Dictionary object where the keys of the dictionary are the file
names (without the file extension) and each entry a Pandas
Dataframe object that contains the data of the file denoted by
the key.
Example
-------
To read the data between 2014-01-01 and 2018-01-01 contained in the files
stored in the folder specified by data_prefix:
# Path to directory where the data is saved
data_prefix = "C:\\Users\\Pancho\\Documents\\StockMarketData"
# Earliest and latest date to consider
start_date = "2014-01-01" %"Year-Month-Day"
end_date = "2018-01-01"
# Read data
data = get_data(data_prefix, start_date, end_date)
Author Information
------------------
<NAME>
LinkedIn: <https://www.linkedin.com/in/frank-ceballos/>
Date: August, 24, 2019
"""
# Get file names in directory
file_names = listdir(data_prefix)
# Initiliaze data directory that will contain all the data.
data = {}
# Get data
for file_name in file_names:
# Read data
df = pd.read_csv(data_prefix + file_name)
# Set mask to select dates
mask = (df["Date"] > start_date) & (df["Date"] <= end_date)
# Select data between start and end date
df = df.loc[mask]
# Get timestamps
dates = [ | pd.Timestamp(date) | pandas.Timestamp |
# Import packages
import pandas as pd
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
temp1 = pd.read_csv('/home/pi/LABS/IOTFinal/TempData1.csv', skiprows=0)
temp1.head
temp1.info()
#---------------------------------------------------------------------------------------------
#To fix the date problem, divide the dates
df1=temp1.loc[0:1143,:]
df2=temp1.loc[1144:,:]
#-------------------------------------------------------------------------------------------
#converting the dates to python data types
df1['Date'] = pd.to_datetime(df1['Date'])
df2['Date'] = pd.to_datetime(df2['Date'])
#---------------------------------------------------------------------------------------
#Appending the two datasets
dataset=df1.append(df2)
#--------------------------------------------------------------------------------------
#divide into train and validation set
#2344 - 2000-01-01
#2487 - 2011-12-01
#2488 - 2012-01-01
#2507 - 2013-08-01
#train = dataset.loc[2344:2487:][:int(1*(len(dataset.loc[2344:2487:])))]
#test = dataset.loc[2488:2507:][int(1*(len(dataset.loc[2488:2507:]))):]
train=dataset.loc[2344:2487,:]
test=dataset.loc[2488:,:]
#-----------------------------------------------------------------------------------------------------------------
#building the model
from pyramid import auto_arima
model = auto_arima(train['Avg_Temp'], trace=True, error_action='ignore', suppress_warnings=True)
#-------------------------------------------------------------------------------------------------------
forecast = model.predict(n_periods=len(test['Avg_Temp']))
forecast = | pd.DataFrame(forecast,index = test['Avg_Temp'].index,columns=['Prediction']) | pandas.DataFrame |
import datetime
import pandas as pd
import pandas.util.testing as pdt
from chmp.ds import (
timeshift_index,
to_start_of_day,
to_start_of_week,
to_start_of_year,
to_time_in_day,
to_time_in_week,
to_time_in_year,
)
def test_to_date():
s = pd.Series(pd.to_datetime(["2011-01-08 11:23:51", "2018-09-11 13:20:05"]))
expected = pd.Series(pd.to_datetime(["2011-01-08", "2018-09-11"]))
actual = to_start_of_day(s)
pdt.assert_series_equal(actual, expected)
def test_time_in_day():
s = pd.Series(pd.to_datetime(["2011-01-08 11:23:51", "2018-09-11 13:20:05"]))
expected = pd.Series(pd.to_timedelta(["11h 23m 51s", "13h 20m 5s"]))
actual = to_time_in_day(s)
pdt.assert_series_equal(actual, expected)
def test_to_start_of_week():
s = pd.Series( | pd.to_datetime(["2011-01-08 11:23:51", "2018-09-11 13:20:05"]) | pandas.to_datetime |
import re
import unicodedata
import pandas as pd
from collections import Counter
from .table import BaseTable
class DataFrameTable(BaseTable):
"""
This class represents a jupytab-ready table that exposes a Pandas DataFrame.
"""
def __init__(self, alias, dataframe=None, refresh_method=None, include_index=False):
"""
alias -- Descriptive name of the table, that will be displayed in Tableau.
dataframe -- Pandas DataFrame to be accessed from Tableau (may be None if a
refresh_method is provided).
refresh_method -- Optional method callback that will be called every time
Tableau needs to access the data (for instance when the DataSource is refreshed).
It takes no argument and must return a DataFrame with the same column layout
(schema) as the original DataFrame (if any).
include_index -- Add Index as column(s) in the output data to Tableau.
"""
BaseTable.__init__(self, alias=alias)
self._dataframe = dataframe
self._refresh_method = refresh_method
self._include_index = include_index
self._index_separator = '_'
self.types_mapping = {
'object': 'string',
'int64': 'int',
'float64': 'float',
'datetime64[ns]': 'datetime',
'bool': 'bool'
}
@staticmethod
def clean_column_name(col):
"""Remove all forbidden characters from column names"""
# Try to preserve accented characters
cleaned_col = unicodedata.normalize('NFD', str(col)) \
.encode('ascii', 'ignore') \
.decode("utf-8")
# Remove all non matching chars for Tableau WDC
cleaned_col = re.sub(r'[^A-Za-z0-9_]+', '_', cleaned_col)
return cleaned_col
@staticmethod
def replace_duplicated_column_name(cols):
"""Replace duplicated columns names"""
cols_count_dict = dict(Counter(cols))
# Filter unique items
cols_count_dict = {key: value for (key, value) in cols_count_dict.items() if value > 1}
unique_cols = list()
for col in reversed(cols):
idx = cols_count_dict.get(col, 0)
unique_cols.insert(0, col if idx == 0 else col + '_' + str(idx))
cols_count_dict[col] = idx - 1
return unique_cols
def get_schema(self, key):
self.refresh(only_if_undefined=True)
columns = [
{
'id': '.'.join(filter(None, key)) if isinstance(key, tuple) else key,
'dataType':
self.types_mapping[str(value)] if str(value) in self.types_mapping else 'string'
}
for key, value in (self._prepare_dataframe()).dtypes.items()
]
return {
'id': key,
'alias': self._alias,
'columns': columns
}
def _prepare_dataframe(self, slice_from=None, slice_to=None):
# Guarantee valid range for slicing
if slice_from is None or slice_from < 0:
slice_from = 0
if slice_to is None:
slice_to = len(self._dataframe)
if slice_from > slice_to:
raise IndexError(f"From ({slice_from}) can not be greater than To ({slice_to})")
# Apply slicing to dataframe
if slice_from < len(self._dataframe):
# If slicing is in dataframe range
output_df = self._dataframe.iloc[slice_from: min(slice_to, len(self._dataframe))]
else:
# If slicing is outside dataframe range, return an empty dataframe
output_df = | pd.DataFrame(columns=self._dataframe.columns) | pandas.DataFrame |
from typing import Iterable, Dict, Any, Union, Type, T
import os
import json
import torch
import pandas as pd
import vae_lm.nn.utils as util
import torch.distributed as dist
import vae_lm.training.ddp as ddp
import vae_lm.training.utils as training_util
from loguru import logger
from abc import ABC, abstractmethod
from torch.nn.parallel import DistributedDataParallel
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
# Torch NLP Utils
from torch_nlp_utils.common import Registrable
from torch_nlp_utils.data import DataIterator, CollateBatch
from torch_nlp_utils.callbacks import EarlyStopping, SaveCheckpoint
# Modules
from vae_lm.nn.optimizer import Optimizer
from vae_lm.nn.lr_scheduler import LRScheduler
from vae_lm.models.base import VAELmModel
class Trainer(ABC, Registrable):
def __init__(
self,
model: VAELmModel,
epochs: int,
serialization_dir: str,
distributed: bool = False,
cuda_device: Union[int, torch.device] = -1,
local_rank: int = 0,
world_size: int = 1,
patience: int = None,
grad_norm: float = 5.0,
grad_clip: float = 2.0,
validation_metric: str = "-loss",
num_checkpoints: int = None,
sampling_parameters: Dict[str, Any] = None,
) -> None:
self._model = model
self._epochs = epochs
self._rank = local_rank
self._is_master = self._rank == 0
self._world_size = world_size
self._distributed = distributed
self._cuda_device = util.int_to_device(cuda_device)
self._serialization_dir = serialization_dir
if self._distributed:
self._pytorch_model = DistributedDataParallel(
module=model,
device_ids=(
None if self._cuda_device == torch.device("cpu") else [self._cuda_device]
),
find_unused_parameters=True,
)
else:
self._pytorch_model = model
if patience is not None and patience > 0:
self._metric_patience = EarlyStopping(patience=patience, metric=validation_metric)
else:
self._metric_patience = None
# Create Checkpointer saver only on master
if self._is_master:
self._save_checkpoint = SaveCheckpoint(
directory=os.path.join(serialization_dir, "models"),
keep_num_checkpoints=num_checkpoints,
)
self._grad_norm = grad_norm
self._grad_clip = grad_clip
self._sampling_parameters = sampling_parameters
@property
def cuda_device(self) -> int:
return self._cuda_device
@property
def serialization_dir(self) -> str:
return self._serialization_dir
def is_distributed(self) -> bool:
return self._distributed
@abstractmethod
def _train_batch(self, batch: CollateBatch) -> Dict[str, Any]:
pass
@abstractmethod
def _validate_batch(self, batch: CollateBatch) -> Dict[str, Any]:
pass
@abstractmethod
def _enrich_metrics(self, metrics: Dict[str, Any]) -> Dict[str, Any]:
pass
@abstractmethod
def _get_save_dict(self, **extra_params) -> Dict[str, Any]:
pass
@ddp.on_epoch_end
def _run_epoch(
self, dataloader_tqdm: Iterable[CollateBatch], for_training: bool = True
) -> float:
num_batches = 0
total_loss = 0
batch_outputs = self._train_batch if for_training else self._validate_batch
for batch in dataloader_tqdm:
try:
metrics, done_early = batch_outputs(batch)
except Exception as error:
raise training_util.TorchBatchError(message=str(error), batch=batch.as_dict())
total_loss += metrics["batch-loss"]
num_batches += 1
if done_early:
break
if self._is_master:
metrics["loss"] = total_loss / num_batches
description = training_util.description_from_metrics(metrics)
dataloader_tqdm.set_description(description, refresh=False)
return total_loss / num_batches, done_early
def _fit(self, dataloader: DataIterator, is_train: bool = True) -> Dict[str, Any]:
dataloader_tqdm = util.tqdm_dataloader(dataloader, is_master=self._is_master)
epoch_loss = self._run_epoch(dataloader_tqdm, for_training=is_train)
# Let all workers finish their epoch before computing
# the final statistics for the epoch.
if self._distributed:
dist.barrier()
metrics = self._model.get_metrics(reset=True)
metrics["loss"] = epoch_loss
metrics = self._enrich_metrics(metrics)
return metrics
def train(
self,
train_dataloader: DataIterator,
validation_dataloader: DataIterator,
) -> Dict[str, float]:
for epoch in range(self._epochs):
# Train
self._pytorch_model.train()
logger.info("Training")
train_metrics = self._fit(train_dataloader)
# Log metrics only on master with run_on_rank_zero decorator
training_util.log_metrics(
mode_str="Training",
info={"epoch": epoch},
metrics=train_metrics,
)
# Validation
logger.info("Validation")
validation_metrics = self.evaluate(validation_dataloader, info={"epoch": epoch})
if self._metric_patience:
self._metric_patience(validation_metrics)
# Save model state only on master
if self._is_master:
self._save_checkpoint(
validation_metrics,
is_best_so_far=self._metric_patience.improved if self._metric_patience else True,
save_dict=self._get_save_dict(**validation_metrics),
)
# Wait for master process to save new checkpoint
if self._distributed:
dist.barrier()
if self._metric_patience.should_stop if self._metric_patience else False:
logger.success("Patience reached. Stop training.")
logger.info(
"Best metrics: {}".format(
json.dumps(self._metric_patience.best_metrics, ensure_ascii=False, indent=2)
)
)
break
return self._metric_patience.best_metrics if self._metric_patience else validation_metrics
@torch.no_grad()
def calc_mutual_info(self, dataloader: DataIterator) -> float:
self._pytorch_model.eval()
dataloader_tqdm = util.tqdm_dataloader(dataloader, is_master=self._is_master)
mi = 0
num_examples = 0
for batch in dataloader_tqdm:
# We only need src_tokens
src_tokens = batch.to_device(device=self._cuda_device, non_blocking=True)["src_tokens"]
mutual_info = self._model.calc_mutual_info(src_tokens).item()
mi += mutual_info
num_examples += 1
dataloader_tqdm.set_description(f"mutual-info: {mi / num_examples:.4f}", refresh=False)
return mi / num_examples
@torch.no_grad()
def evaluate(
self,
dataloader: DataIterator,
desc="Validation",
info: Dict[str, Union[float, int, str]] = None,
) -> Dict[str, float]:
self._pytorch_model.eval()
metrics = self._fit(dataloader, is_train=False)
# Calculate mutual info
metrics["mutual-info"] = self.calc_mutual_info(dataloader)
# Add samples from the prior if needed
if self._sampling_parameters is not None:
metrics["samples"] = self._construct_samples_dataframe()
# Log metrics only on master with run_on_rank_zero decorator
training_util.log_metrics(mode_str=desc, info=info, metrics=metrics)
# Pop samples as we do not want save them in the checkpoint
metrics.pop("samples")
return metrics
def _construct_samples_dataframe(self) -> pd.DataFrame:
"""Construct DataFrame of samples from the prior."""
samples, samples_log_prob = self._model.sample(**self._sampling_parameters)
samples = self._model.make_output_human_readable(samples)
df_dict = {"texts": [], "log_probs": []}
for sample, log_prob in zip(samples["texts"], samples_log_prob.tolist()):
df_dict["texts"].extend(sample)
df_dict["log_probs"].extend([log_prob] * len(sample))
return | pd.DataFrame(df_dict) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
| tm.assert_series_equal(base == ser, exp) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import unittest
import pytest
import numpy as np
import pandas as pd
from plaidcloud.utilities import frame_manager
from plaidcloud.utilities.frame_manager import coalesce
__author__ = "<NAME>"
__copyright__ = "© Copyright 2009-2014, Tartan Solutions, Inc"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
nan = np.nan
# Test to see that 2 data frames are equal
# http://stackoverflow.com/questions/14224172/equality-in-pandas-dataframes-column-order-matters
def assertFrameEqual(df1, df2, **kwargs):
""" Assert that two dataframes are equal, ignoring ordering of columns
Args:
df1 (`pandas.DataFrame`): The DataFrame to compare against `df2`
df2 (`pandas.DataFrame`): The DataFrame to compare against `df1`
**kwargs (dict): A dict to pass to `pandas.util.testing.assert_frame_equal`
"""
from pandas.util.testing import assert_frame_equal
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
class TestFrameManager(unittest.TestCase):
"""These tests validate the data model methods"""
def setUp(self):
"Constructs a test environment if necessary"
self.df = frame_manager.pd.DataFrame([('Andrew', 31, 500), ('Optimus', 30, 1000), ('Iron Man', 51, 1250), ('Batman', 75, 50), ('Andrew', 31, 2500)], columns=['Name', 'Age', 'Points'])
# duplicate
self.df2 = frame_manager.pd.DataFrame([('Andrew', 31, 500), ('Optimus', 30, 1000), ('Iron Man', 51, 1250), ('Batman', 75, 50), ('Andrew', 31, 2500)], columns=['Name', 'Age', 'Points'])
self.df9 = frame_manager.pd.DataFrame([('Andrew', 31, 5), ('Optimus', 30, 10), ('Iron Man', 51, 12), ('Batman', 75, 11)], columns=['Name', 'age', 'Level'])
# Deadpool is villain aged 23... not listed
self.df3 = frame_manager.pd.DataFrame([(30, 'Autobot'), (51, 'Superhero'), (75, 'Superhero'), (23, 'Villain')], columns=['Age', 'Title'])
self.df_blank = frame_manager.pd.DataFrame()
self.df_mon_val = frame_manager.pd.DataFrame([('Jan', 5), ('Feb', 10), ('Mar', 15), ('Jan', 20), ('Feb', 25), ('Mar', 30)], columns = ['mon', 'val'])
self.df6 = frame_manager.pd.DataFrame([(30, 'Autobot', 2354, 0), (30, 'Decepticon', 18, 0), (51, 'Superhero', 234, 0), (75, 'Superhero', 897, 0), (23, 'Villain', 46546, 0)], columns=['Age', 'Title', 'DropMe', 'Points'])
# def test_get_frame_model_path(self):
# pass
# def test_get_frame_zone_path(self):
# pass
# def test_load_frame(self):
# pass
# def test_load_frame_meta(self):
# pass
# def test_clear_frame(self):
# pass
# def test_clear_zone_frame(self):
# pass
# def test_load_zone_frame(self):
# pass
# def test_load_zone_frame_meta(self):
# pass
# def test_save_frame(self):
# pass
# def test_get_tmp_frame_path(self):
# pass
# def test_compress_frame(self):
# pass
# def test_uncompress_frame(self):
# pass
# def test_append_frame(self):
# #x = frame_manager.append_frame(
# pass
def test_describe(self):
"""Tests to verify descriptive statistics about data frame"""
x = frame_manager.describe(self.df)
self.assertEqual(x['Age']['max'], max(self.df['Age']))
self.assertEqual(x['Points']['min'], min(self.df['Points']))
self.assertEqual(x['Age']['mean'], np.mean(self.df['Age']))
self.assertEqual(x['Points']['mean'], np.mean(self.df['Points']))
def test_count_unique(self):
"""Tests to verify count of distinct records in data frame"""
x = frame_manager.count_unique('Name', 'Points', self.df)
y = self.df.groupby('Name').count()['Age']['Andrew']
z = self.df.groupby('Name').count()['Age']['Iron Man']
self.assertEqual(x['Andrew'], y)
self.assertEqual(x['Iron Man'], z)
def test_sum(self):
"""Tests to verify sum of records in data frame"""
x = frame_manager.sum('Name', self.df)
y = self.df.groupby('Name').sum()
self.assertEqual(x['Points']['Andrew'], y['Points']['Andrew'])
self.assertEqual(x['Age']['Batman'], y['Age']['Batman'])
def test_std(self):
"""Tests to verify standard deviation of records in data frame"""
x = frame_manager.std('mon', self.df_mon_val)
y = self.df_mon_val.groupby('mon').std()
assertFrameEqual(x, y)
def test_mean(self):
"""Tests to verify mean of records in data frame"""
x = frame_manager.mean('Name', self.df)
y = self.df.groupby(['Name']).mean()
self.assertEqual(x['Points'][1], y['Points'][1])
def test_count(self):
"""Tests to verify count of records in data frame"""
x = frame_manager.count('Name', self.df)
y = self.df.groupby('Name').count()
self.assertEqual(x['Points'][1], y['Points'][1])
def test_inner_join(self):
"""Tests to verify inner join capability"""
x = frame_manager.inner_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'inner', ['Age'])
assertFrameEqual(x, y)
def test_outer_join(self):
"""Tests to verify outer join capability"""
x = frame_manager.outer_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'outer', ['Age'])
assertFrameEqual(x, y)
def test_left_join(self):
"""Tests to verify left join capability"""
x = frame_manager.left_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'left', ['Age'])
assertFrameEqual(x, y)
def test_right_join(self):
"""Tests to verify right join capability"""
x = frame_manager.right_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'right', ['Age'])
assertFrameEqual(x, y)
# def test_memoize(self):
# pass
# def test_geo_distance(self):
# pass
# def test_geo_location(self):
# pass
# def test_trailing_negative(self):
# pass
def test_now(self):
"""Tests to verify current time"""
x = frame_manager.now()
y = frame_manager.utc.timestamp()
self.assertEqual(x, y)
# def test_concat(self):
# df2 = self.df
# x = frame_manager.concat([self.df, df2], [self.df])
# print x
# def test_covariance(self):
# pass
# def test_correlation(self):
# pass
# def test_apply_agg(self):
# pass
# def test_distinct(self):
# pass
# def test_find_duplicates(self):
# pass
# def test_sort(self):
# pass
# def test_replace_column(self):
# pass
def test_replace(self):
"""Tests to verify replacement using dictionary key/value combinations"""
replace_dict = {'Optimus': 'Optimus Prime', 50: 5000}
x = frame_manager.replace(self.df, replace_dict)
y = self.df.replace(replace_dict)
assertFrameEqual(x, y)
# def test_reindex(self):
# pass
def test_rename_columns(self):
"""Tests to verify renamed columns using dictionary key/value combinations"""
rename_dict = {'Name': 'Title', 'Points': 'Salary'}
x = frame_manager.rename_columns(self.df, rename_dict)
y = self.df.rename(columns=rename_dict)
assertFrameEqual(x, y)
# def test_column_info(self):
# pass
@pytest.mark.skip('Dtypes seem to be wrong, should be passing sql types?')
def test_set_column_types(self):
"""Tests to verify data type conversion for columns"""
type_dict = {'Name': 's32', 'Points': 'float16', 'Age': 'int8'}
self.assertNotEqual('int8', self.df['Age'].dtypes)
self.assertNotEqual('float16', self.df['Points'].dtypes)
x = frame_manager.set_column_types(self.df, type_dict)
self.assertEqual('float32', x['Points'].dtypes)
self.assertEqual('int64', x['Age'].dtypes)
self.assertEqual('object', x['Name'].dtypes)
def test_drop_column(self):
"""Tests to verify columns dropped appropriately"""
x = frame_manager.drop_column(self.df, ['Age'])
y = self.df2
del y['Age']
assertFrameEqual(x, y)
def test_has_data(self):
"""Tests to verify a data frame does/doesn't have data"""
x = frame_manager.has_data(self.df_blank)
y = frame_manager.has_data(self.df)
self.assertFalse(x)
self.assertTrue(y)
# def test_in_column(self):
# pass
# def test_frame_source_reduce(self):
# """Tests to verify that data is filtered as expected (aka SQL Where)"""
# x = frame_manager.frame_source_reduce(self.df)
# assertFrameEqual(x, self.df2)
# def test_apply_variables(self):
# pass
# def test_frame_map_update(self):
# pass
# def test_get_entity_frame(self):
# pass
# def test_save_entity_frame(self):
# pass
def test_lookup(self):
"""Tests to verify lookup capability"""
# x = frame_manager.lookup(self.df, self.df6, ['Age'], None, ['Age', 'Title'])
orig_lookup = self.df6.copy()
w = frame_manager.lookup(self.df, self.df9, left_on=['Name', 'Age'], right_on=['Name', 'age'])
print(w)
x = frame_manager.lookup(self.df, self.df6, ['Age'])
y = frame_manager.distinct(self.df6, ['Age'])
z = frame_manager.left_join(self.df, y, ['Age'])
print(x)
print(z)
assertFrameEqual(x, z)
# ensure lookup frame integrity
assertFrameEqual(orig_lookup, self.df6)
def tearDown(self):
"Clean up any test structure or records generated during the testing"
del self.df
del self.df2
del self.df_blank
del self.df_mon_val
del self.df6
class TestCoalesce(unittest.TestCase):
def setUp(self):
self.reference_data = {
'A': [nan, 'aa', nan, nan, nan],
'B': ['b', 'bb', None, nan, 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', '', nan, nan, nan],
'E': ['e', 'ee', nan, None, 7],
'one': [1, nan, nan, nan, nan], # float64
'two': [2, 2, 2.2, nan, 0], # float64
'three': [nan, nan, nan, 3, 3]
}
def test_string_columns(self):
"""Test the basic case with strings."""
df = | pd.DataFrame(data=self.reference_data) | pandas.DataFrame |
# Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import argparse
import base64
import time
import datetime
import multiprocessing
import tarfile
import traceback
import asyncio
from subprocess import run, PIPE
import os
import pathlib
import sys
import gzip
import re
import json
import logging
import hashlib
import shutil
import copy
import importlib
import textwrap
import math
import pandas
import yaml
import graphviz
import time
import uuid
import shlex
import platform
import getpass
import distro
import netifaces
from pathlib import Path
from timeit import default_timer as timer
from siliconcompiler.client import *
from siliconcompiler.schema import *
from siliconcompiler.scheduler import _deferstep
from siliconcompiler import utils
from siliconcompiler import _metadata
class Chip:
"""Object for configuring and executing hardware design flows.
This is the main object used for configuration, data, and
execution within the SiliconCompiler platform.
Args:
design (string): Name of the top level chip design module.
Examples:
>>> siliconcompiler.Chip(design="top")
Creates a chip object with name "top".
"""
###########################################################################
def __init__(self, design=None, loglevel="INFO"):
# Local variables
self.scroot = os.path.dirname(os.path.abspath(__file__))
self.cwd = os.getcwd()
self.error = 0
self.cfg = schema_cfg()
self.cfghistory = {}
# The 'status' dictionary can be used to store ephemeral config values.
# Its contents will not be saved, and can be set by parent scripts
# such as a web server or supervisor process. Currently supported keys:
# * 'jobhash': A hash or UUID which can identify jobs in a larger system.
# * 'remote_cfg': Dictionary containing remote server configurations
# (address, credentials, etc.)
# * 'slurm_account': User account ID in a connected slurm HPC cluster.
# * 'slurm_partition': Name of the partition in which a task should run
# on a connected slurm HPC cluster.
# * 'watchdog': Activity-monitoring semaphore for jobs scheduled on an
# HPC cluster; expects a 'threading.Event'-like object.
# * 'max_fs_bytes': A limit on how much disk space a job is allowed
# to consume in a connected HPC cluster's storage.
self.status = {}
self.builtin = ['minimum','maximum',
'mux', 'join', 'verify']
# We set 'design' and 'loglevel' directly in the config dictionary
# because of a chicken-and-egg problem: self.set() relies on the logger,
# but the logger relies on these values.
self.cfg['design']['value'] = design
self.cfg['loglevel']['value'] = loglevel
# We set scversion directly because it has its 'lock' flag set by default.
self.cfg['version']['sc']['value'] = _metadata.version
self._init_logger()
###########################################################################
def _init_logger(self, step=None, index=None):
self.logger = logging.getLogger(uuid.uuid4().hex)
# Don't propagate log messages to "root" handler (we get duplicate
# messages without this)
# TODO: this prevents us from being able to capture logs with pytest:
# we should revisit it
self.logger.propagate = False
loglevel = self.get('loglevel')
jobname = self.get('jobname')
if jobname == None:
jobname = '---'
if step == None:
step = '---'
if index == None:
index = '-'
run_info = '%-7s | %-12s | %-3s' % (jobname, step, index)
if loglevel=='DEBUG':
logformat = '| %(levelname)-7s | %(funcName)-10s | %(lineno)-4s | ' + run_info + ' | %(message)s'
else:
logformat = '| %(levelname)-7s | ' + run_info + ' | %(message)s'
handler = logging.StreamHandler()
formatter = logging.Formatter(logformat)
handler.setFormatter(formatter)
# Clear any existing handlers so we don't end up with duplicate messages
# if repeat calls to _init_logger are made
if len(self.logger.handlers) > 0:
self.logger.handlers.clear()
self.logger.addHandler(handler)
self.logger.setLevel(loglevel)
###########################################################################
def _deinit_logger(self):
self.logger = None
###########################################################################
def create_cmdline(self, progname, description=None, switchlist=[]):
"""Creates an SC command line interface.
Exposes parameters in the SC schema as command line switches,
simplifying creation of SC apps with a restricted set of schema
parameters exposed at the command line. The order of command
line switch settings parsed from the command line is as follows:
1. design
2. loglevel
3. mode
4. target('target')
5. read_manifest([cfg])
6. all other switches
The cmdline interface is implemented using the Python argparse package
and the following use restrictions apply.
* Help is accessed with the '-h' switch.
* Arguments that include spaces must be enclosed with double quotes.
* List parameters are entered individually. (ie. -y libdir1 -y libdir2)
* For parameters with Boolean types, the switch implies "true".
* Special characters (such as '-') must be enclosed in double quotes.
* Compiler compatible switches include: -D, -I, -O{0,1,2,3}
* Verilog legacy switch formats are supported: +libext+, +incdir+
Args:
progname (str): Name of program to be executed.
description (str): Short program description.
switchlist (list of str): List of SC parameter switches to expose
at the command line. By default all SC schema switches are
available. Parameter switches should be entered without
'-', based on the parameter 'switch' field in the 'schema'.
Examples:
>>> chip.create_cmdline(progname='sc-show',switchlist=['source','cfg'])
Creates a command line interface for 'sc-show' app.
"""
# Argparse
parser = argparse.ArgumentParser(prog=progname,
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description)
# Get all keys from global dictionary or override at command line
allkeys = self.getkeys()
# Iterate over all keys to add parser argument
for key in allkeys:
#Fetch fields from leaf cell
helpstr = self.get(*key, field='shorthelp')
typestr = self.get(*key, field='type')
#Switch field fully describes switch format
switch = self.get(*key, field='switch')
if switch is None:
switches = []
elif isinstance(switch, list):
switches = switch
else:
switches = [switch]
switchstrs = []
dest = None
for switch in switches:
switchmatch = re.match(r'(-[\w_]+)\s+(.*)', switch)
gccmatch = re.match(r'(-[\w_]+)(.*)', switch)
plusmatch = re.match(r'(\+[\w_\+]+)(.*)', switch)
if switchmatch:
switchstr = switchmatch.group(1)
if re.search('_', switchstr):
this_dest = re.sub('-','',switchstr)
else:
this_dest = key[0]
elif gccmatch:
switchstr = gccmatch.group(1)
this_dest = key[0]
elif plusmatch:
switchstr = plusmatch.group(1)
this_dest = key[0]
switchstrs.append(switchstr)
if dest is None:
dest = this_dest
elif dest != this_dest:
raise ValueError('Destination for each switch in list must match')
#Four switch types (source, scalar, list, bool)
if ('source' not in key) & ((switchlist == []) | (dest in switchlist)):
if typestr == 'bool':
parser.add_argument(*switchstrs,
metavar='',
dest=dest,
action='store_const',
const="true",
help=helpstr,
default=argparse.SUPPRESS)
#list type arguments
elif re.match(r'\[', typestr):
#all the rest
parser.add_argument(*switchstrs,
metavar='',
dest=dest,
action='append',
help=helpstr,
default=argparse.SUPPRESS)
else:
#all the rest
parser.add_argument(*switchstrs,
metavar='',
dest=dest,
help=helpstr,
default=argparse.SUPPRESS)
#Preprocess sys.argv to enable linux commandline switch formats
#(gcc, verilator, etc)
scargs = []
# Iterate from index 1, otherwise we end up with script name as a
# 'source' positional argument
for item in sys.argv[1:]:
#Split switches with one character and a number after (O0,O1,O2)
opt = re.match(r'(\-\w)(\d+)', item)
#Split assign switches (-DCFG_ASIC=1)
assign = re.search(r'(\-\w)(\w+\=\w+)', item)
#Split plusargs (+incdir+/path)
plusarg = re.search(r'(\+\w+\+)(.*)', item)
if opt:
scargs.append(opt.group(1))
scargs.append(opt.group(2))
elif plusarg:
scargs.append(plusarg.group(1))
scargs.append(plusarg.group(2))
elif assign:
scargs.append(assign.group(1))
scargs.append(assign.group(2))
else:
scargs.append(item)
# exit on version check
if '-version' in scargs:
print(_metadata.version)
sys.exit(0)
# Required positional source file argument
if ((switchlist == []) &
(not '-cfg' in scargs)) | ('source' in switchlist) :
parser.add_argument('source',
nargs='+',
help=self.get('source', field='shorthelp'))
#Grab argument from pre-process sysargs
#print(scargs)
cmdargs = vars(parser.parse_args(scargs))
#print(cmdargs)
#sys.exit()
# Print banner
print(_metadata.banner)
print("Authors:", ", ".join(_metadata.authors))
print("Version:", _metadata.version, "\n")
print("-"*80)
os.environ["COLUMNS"] = '80'
# 1. set design name (override default)
if 'design' in cmdargs.keys():
self.name = cmdargs['design']
# 2. set loglevel if set at command line
if 'loglevel' in cmdargs.keys():
self.logger.setLevel(cmdargs['loglevel'])
# 3. read in target if set
if 'target' in cmdargs.keys():
if 'mode' in cmdargs.keys():
self.set('mode', cmdargs['mode'], clobber=True)
if 'techarg' in cmdargs.keys():
print("NOT IMPLEMENTED")
sys.exit()
if 'flowarg' in cmdargs.keys():
print("NOT IMPLEMENTED")
sys.exit()
if 'arg_step' in cmdargs.keys():
self.set('arg', 'step', cmdargs['arg_step'], clobber=True)
# running target command
self.target(cmdargs['target'])
# 4. read in all cfg files
if 'cfg' in cmdargs.keys():
for item in cmdargs['cfg']:
self.read_manifest(item, update=True, clobber=True, clear=True)
# insert all parameters in dictionary
self.logger.info('Setting commandline arguments')
allkeys = self.getkeys()
for key, val in cmdargs.items():
# Unifying around no underscores for now
keylist = key.split('_')
orderhash = {}
# Find keypath with matching keys
for keypath in allkeys:
match = True
for item in keylist:
if item in keypath:
orderhash[item] = keypath.index(item)
else:
match = False
if match:
chosenpath = keypath
break
# Turn everything into a list for uniformity
if isinstance(val, list):
val_list = val
else:
val_list = [val]
for item in val_list:
#space used to separate values!
extrakeys = item.split(' ')
for i in range(len(extrakeys)):
# look for the first default statement
# "delete' default in temp list by setting to None
if 'default' in chosenpath:
next_default = chosenpath.index('default')
orderhash[extrakeys[i]] = next_default
chosenpath[next_default] = None
else:
# Creating a sorted list based on key placement
args = list(dict(sorted(orderhash.items(),
key=lambda orderhash: orderhash[1])))
# Adding data value
args = args + [extrakeys[i]]
# Set/add value based on type
#Check that keypath is valid
if self.valid(*args[:-1], quiet=True):
if re.match(r'\[', self.get(*args[:-1], field='type')):
self.add(*args)
else:
self.set(*args, clobber=True)
else:
self.set(*args, clobber=True)
#########################################################################
def create_env(self):
'''
Creates a working environment for interactive design.
Sets environment variables and initializees tools specific
setup files based on paramater set loaded.
Actions taken:
* Append values found in eda 'path' parameter to current path
*
'''
# Add paths
env_path = os.environ['PATH']
for tool in self.getkeys('eda'):
for path in self.get('eda', tool):
env_path = env_path + os.pathsep + path
# Call setup_env functions
for tool in self.getkeys('eda'):
for step in self.getkeys('eda', tool):
setup_env = self.find_function(tool, 'tool', 'setup_env')
if setup_env:
setup_env(self)
#########################################################################
def find_function(self, modulename, functype, funcname):
'''
Returns a function attribute from a module on disk.
Searches the SC root directory and the 'scpath' parameter for the
modulename provided and imports the module if found. If the funcname
provided is found in the module, a callable function attribute is
returned, otherwise None is returned.
The function assumes the following directory structure:
* tools/modulename/modulename.py
* flows/modulename.py
* pdks/modulname.py
Supported functions include:
* pdk (make_docs, setup_pdk)
* flow (make_docs, setup_flow)
* tool (make_docs, setup_tool, check_version, runtime_options,
pre_process, post_process)
Args:
modulename (str): Name of module to import.
functype (str): Type of function to import (tool,flow, pdk).
funcname (str): Name of the function to find within the module.
Examples:
>>> setup_pdk = chip.find_function('freepdk45','pdk','setup_pdk')
>>> setup_pdk()
Imports the freepdk45 module and runs the setup_pdk function
'''
# module search path depends on functype
if functype == 'tool':
fullpath = self._find_sc_file(f"tools/{modulename}/{modulename}.py", missing_ok=True)
elif functype == 'flow':
fullpath = self._find_sc_file(f"flows/{modulename}.py", missing_ok=True)
elif functype == 'pdk':
fullpath = self._find_sc_file(f"pdks/{modulename}.py", missing_ok=True)
elif functype == 'project':
fullpath = self._find_sc_file(f"projects/{modulename}.py", missing_ok=True)
else:
self.logger.error(f"Illegal module type '{functype}'.")
self.error = 1
return
# try loading module if found
if fullpath:
if functype == 'tool':
self.logger.debug(f"Loading function '{funcname}' from module '{modulename}'")
else:
self.logger.info(f"Loading function '{funcname}' from module '{modulename}'")
try:
spec = importlib.util.spec_from_file_location(modulename, fullpath)
imported = importlib.util.module_from_spec(spec)
spec.loader.exec_module(imported)
if hasattr(imported, funcname):
function = getattr(imported, funcname)
else:
function = None
return function
except:
traceback.print_exc()
self.logger.error(f"Module setup failed for '{modulename}'")
self.error = 1
###########################################################################
def target(self, name=None):
"""
Configures the compilation manifest based on pre-defined target modules.
The target function imports and executes a set of setup functions based
on a '_' separated string. The following target string combinations are
permitted:
* <projname>
* <flowname>
* <flowname>_<pdkname>
* <flowname>_<partname> (for fpga flows)
* <pdk>
* <tool>
* <tool>_<pdkname>
If no target name is provided, the target will be read from the
'target' schema parameter. Calling target() with no target name provided
and an undefined 'target' parameter results in an error.
The target function uses the find_function() method to import and
execute setup functions based on the 'scpath' search parameter.
Args:
name (str): Name of target combination to load.
Examples:
>>> chip.target("asicflow_freepdk45")
Loads the 'freepdk45' and 'asicflow' setup functions.
>>> chip.target()
Loads target based on result from chip.get('target')
"""
#Sets target in dictionary if string is passed in
if name is not None:
self.set('target', name)
# Error checking
if not self.get('target'):
self.logger.error('Target not defined.')
sys.exit(1)
elif len(self.get('target').split('_')) > 2:
self.logger.error('Target should have zero or one underscore')
sys.exit(1)
target = self.get('target')
self.logger.info(f"Loading target '{target}'")
# search for module matches
targetlist = target.split('_')
for i, item in enumerate(targetlist):
if i == 0:
func_project = self.find_function(item, 'project', 'setup_project')
if func_project is not None:
func_project(self)
if len(targetlist) > 1:
self.logger.error('Target string beginning with a project name '
'must only have one entry')
sys.exit(1)
break
func_flow = self.find_function(item, 'flow', 'setup_flow')
if func_flow is not None:
func_flow(self)
continue
func_pdk = self.find_function(item, 'pdk', 'setup_pdk')
if func_pdk is not None:
func_pdk(self)
if len(targetlist) > 1:
self.logger.error('Target string beginning with a PDK name '
'must only have one entry')
sys.exit(1)
break
func_tool = self.find_function(item, 'tool', 'setup_tool')
if func_tool is not None:
step = self.get('arg','step')
self.set('flowgraph', step, '0', 'tool', item)
self.set('flowgraph', step, '0', 'weight', 'errors', 0)
self.set('flowgraph', step, '0', 'weight', 'warnings', 0)
self.set('flowgraph', step, '0', 'weight', 'runtime', 0)
# We must always have an import step, so add a default no-op
# if need be.
if step != 'import':
self.set('flowgraph', 'import', '0', 'tool', 'join')
self.set('flowgraph', step, '0', 'input', ('import','0'))
self.set('arg', 'step', None)
continue
self.logger.error(f'Target {item} not found')
sys.exit(1)
else:
func_pdk = self.find_function(item, 'pdk', 'setup_pdk')
if func_pdk is not None:
func_pdk(self)
break
# Only an error if we're not in FPGA mode. Otherwise, we assume
# the second item is a partname, which will be read directly
# from the target by the FPGA flow logic.
if self.get('mode') != 'fpga':
self.logger.error(f'PDK {item} not found')
sys.exit(1)
if self.get('mode') is not None:
self.logger.info(f"Operating in '{self.get('mode')}' mode")
else:
self.logger.warning(f"No mode set")
###########################################################################
def list_metrics(self):
'''
Returns a list of all metrics in the schema.
'''
return self.getkeys('metric','default','default')
###########################################################################
def help(self, *keypath):
"""
Returns a schema parameter description.
Args:
*keypath(str): Keypath to parameter.
Returns:
A formatted multi-line help paragraph for the parameter provided.
Examples:
>>> print(chip.help('asic','diearea'))
Displays help information about the 'asic, diearea' parameter
"""
self.logger.debug('Fetching help for %s', keypath)
#Fetch Values
description = self.get(*keypath, field='shorthelp')
typestr = self.get(*keypath, field='type')
switchstr = str(self.get(*keypath, field='switch'))
defstr = str(self.get(*keypath, field='defvalue'))
requirement = str(self.get(*keypath, field='require'))
helpstr = self.get(*keypath, field='help')
example = self.get(*keypath, field='example')
#Removing multiple spaces and newlines
helpstr = helpstr.rstrip()
helpstr = helpstr.replace("\n", "")
helpstr = ' '.join(helpstr.split())
for idx, item in enumerate(example):
example[idx] = ' '.join(item.split())
example[idx] = example[idx].replace(", ", ",")
#Wrap text
para = textwrap.TextWrapper(width=60)
para_list = para.wrap(text=helpstr)
#Full Doc String
fullstr = ("-"*80 +
"\nDescription: " + description +
"\nSwitch: " + switchstr +
"\nType: " + typestr +
"\nRequirement: " + requirement +
"\nDefault: " + defstr +
"\nExamples: " + example[0] +
"\n " + example[1] +
"\nHelp: " + para_list[0] + "\n")
for line in para_list[1:]:
fullstr = (fullstr +
" "*13 + line.lstrip() + "\n")
return fullstr
###########################################################################
def valid(self, *args, valid_keypaths=None, quiet=True, default_valid=False):
"""
Checks validity of a keypath.
Checks the validity of a parameter keypath and returns True if the
keypath is valid and False if invalid.
Args:
keypath(list str): Variable length schema key list.
valid_keypaths (list of list): List of valid keypaths as lists. If
None, check against all keypaths in the schema.
quiet (bool): If True, don't display warnings for invalid keypaths.
Returns:
Boolean indicating validity of keypath.
Examples:
>>> check = chip.valid('design')
Returns True.
>>> check = chip.valid('blah')
Returns False.
"""
keypathstr = ','.join(args)
keylist = list(args)
if default_valid:
default = 'default'
else:
default = None
if valid_keypaths is None:
valid_keypaths = self.getkeys()
# Look for a full match with default playing wild card
for valid_keypath in valid_keypaths:
if len(keylist) != len(valid_keypath):
continue
ok = True
for i in range(len(keylist)):
if valid_keypath[i] not in (keylist[i], default):
ok = False
break
if ok:
return True
# Match not found
if not quiet:
self.logger.warning(f"Keypath [{keypathstr}] is not valid")
return False
###########################################################################
def get(self, *keypath, field='value', job=None, cfg=None):
"""
Returns a schema parameter field.
Returns a schema parameter filed based on the keypath and value provided
in the ``*args``. The returned type is consistent with the type field of
the parameter. Fetching parameters with empty or undefined value files
returns None for scalar types and [] (empty list) for list types.
Accessing a non-existent keypath produces a logger error message and
raises the Chip object error flag.
Args:
keypath(list str): Variable length schema key list.
field(str): Parameter field to fetch.
job (str): Jobname to use for dictionary access in place of the
current active jobname.
cfg(dict): Alternate dictionary to access in place of the default
chip object schema dictionary.
Returns:
Value found for the keypath and field provided.
Examples:
>>> foundry = chip.get('pdk', 'foundry')
Returns the name of the foundry from the PDK.
"""
if cfg is None:
if job is not None:
cfg = self.cfghistory[job]
else:
cfg = self.cfg
keypathstr = ','.join(keypath)
self.logger.debug(f"Reading from [{keypathstr}]. Field = '{field}'")
return self._search(cfg, keypathstr, *keypath, field=field, mode='get')
###########################################################################
def getkeys(self, *keypath, cfg=None):
"""
Returns a list of schema dictionary keys.
Searches the schema for the keypath provided and returns a list of
keys found, excluding the generic 'default' key. Accessing a
non-existent keypath produces a logger error message and raises the
Chip object error flag.
Args:
keypath(list str): Variable length ordered schema key list
cfg(dict): Alternate dictionary to access in place of self.cfg
Returns:
List of keys found for the keypath provided.
Examples:
>>> keylist = chip.getkeys('pdk')
Returns all keys for the 'pdk' keypath.
>>> keylist = chip.getkeys()
Returns all list of all keypaths in the schema.
"""
if cfg is None:
cfg = self.cfg
if len(list(keypath)) > 0:
keypathstr = ','.join(keypath)
self.logger.debug('Getting schema parameter keys for: %s', keypathstr)
keys = list(self._search(cfg, keypathstr, *keypath, mode='getkeys'))
if 'default' in keys:
keys.remove('default')
else:
self.logger.debug('Getting all schema parameter keys.')
keys = list(self._allkeys(cfg))
return keys
###########################################################################
def getdict(self, *keypath, cfg=None):
"""
Returns a schema dictionary.
Searches the schema for the keypath provided and returns a complete
dictionary. Accessing a non-existent keypath produces a logger error
message and raises the Chip object error flag.
Args:
keypath(list str): Variable length ordered schema key list
cfg(dict): Alternate dictionary to access in place of self.cfg
Returns:
A schema dictionary
Examples:
>>> pdk = chip.getdict('pdk')
Returns the complete dictionary found for the keypath 'pdk'
"""
if cfg is None:
cfg = self.cfg
if len(list(keypath)) > 0:
keypathstr = ','.join(keypath)
self.logger.debug('Getting cfg for: %s', keypathstr)
localcfg = self._search(cfg, keypathstr, *keypath, mode='getcfg')
return copy.deepcopy(localcfg)
###########################################################################
def set(self, *args, field='value', clobber=True, cfg=None):
'''
Sets a schema parameter field.
Sets a schema parameter field based on the keypath and value provided
in the ``*args``. New schema dictionaries are automatically created for
keypaths that overlap with 'default' dictionaries. The write action
is ignored if the parameter value is non-empty and the clobber
option is set to False.
The value provided must agree with the dictionary parameter 'type'.
Accessing a non-existent keypath or providing a value that disagrees
with the parameter type produces a logger error message and raises the
Chip object error flag.
Args:
args (list): Parameter keypath followed by a value to set.
field (str): Parameter field to set.
clobber (bool): Existing value is overwritten if True.
cfg(dict): Alternate dictionary to access in place of self.cfg
Examples:
>>> chip.set('design', 'top')
Sets the name of the design to 'top'
'''
if cfg is None:
cfg = self.cfg
# Verify that all keys are strings
for key in args[:-1]:
if not isinstance(key,str):
self.logger.error(f"Key [{key}] is not a string [{args}]")
keypathstr = ','.join(args[:-1])
all_args = list(args)
# Special case to ensure loglevel is updated ASAP
if len(args) == 2 and args[0] == 'loglevel' and field == 'value':
self.logger.setLevel(args[1])
self.logger.debug(f"Setting [{keypathstr}] to {args[-1]}")
return self._search(cfg, keypathstr, *all_args, field=field, mode='set', clobber=clobber)
###########################################################################
def add(self, *args, cfg=None, field='value'):
'''
Adds item(s) to a schema parameter list.
Adds item(s) to schema parameter list based on the keypath and value
provided in the ``*args``. New schema dictionaries are
automatically created for keypaths that overlap with 'default'
dictionaries.
The value provided must agree with the dictionary parameter 'type'.
Accessing a non-existent keypath, providing a value that disagrees
with the parameter type, or using add with a scalar parameter produces
a logger error message and raises the Chip object error flag.
Args:
args (list): Parameter keypath followed by a value to add.
cfg(dict): Alternate dictionary to access in place of self.cfg
field (str): Parameter field to set.
Examples:
>>> chip.add('source', 'hello.v')
Adds the file 'hello.v' to the list of sources.
'''
if cfg is None:
cfg = self.cfg
# Verify that all keys are strings
for key in args[:-1]:
if not isinstance(key,str):
self.logger.error(f"Key [{key}] is not a string [{args}]")
keypathstr = ','.join(args[:-1])
all_args = list(args)
self.logger.debug(f'Appending value {args[-1]} to [{keypathstr}]')
return self._search(cfg, keypathstr, *all_args, field=field, mode='add')
###########################################################################
def _allkeys(self, cfg, keys=None, keylist=None):
'''
Returns list of all keypaths in the schema.
'''
if keys is None:
keylist = []
keys = []
for k in cfg:
newkeys = keys.copy()
newkeys.append(k)
if 'defvalue' in cfg[k]:
keylist.append(newkeys)
else:
self._allkeys(cfg[k], keys=newkeys, keylist=keylist)
return keylist
###########################################################################
def _search(self, cfg, keypath, *args, field='value', mode='get', clobber=True):
'''
Internal recursive function that searches the Chip schema for a
match to the combination of *args and fields supplied. The function is
used to set and get data within the dictionary.
Args:
cfg(dict): The cfg schema to search
keypath (str): Concatenated keypath used for error logging.
args (str): Keypath/value variable list used for access
field(str): Leaf cell field to access.
mode(str): Action (set/get/add/getkeys/getkeys)
clobber(bool): Specifies to clobber (for set action)
'''
all_args = list(args)
param = all_args[0]
val = all_args[-1]
empty = [None, 'null', [], 'false']
#set/add leaf cell (all_args=(param,val))
if (mode in ('set', 'add')) & (len(all_args) == 2):
# clean error if key not found
if (not param in cfg) & (not 'default' in cfg):
self.logger.error(f"Set/Add keypath [{keypath}] does not exist.")
self.error = 1
else:
# making an 'instance' of default if not found
if (not param in cfg) & ('default' in cfg):
cfg[param] = copy.deepcopy(cfg['default'])
list_type =bool(re.match(r'\[', cfg[param]['type']))
# copying over defvalue if value doesn't exist
if 'value' not in cfg[param]:
cfg[param]['value'] = copy.deepcopy(cfg[param]['defvalue'])
# checking for illegal fields
if not field in cfg[param] and (field != 'value'):
self.logger.error(f"Field '{field}' for keypath [{keypath}]' is not a valid field.")
self.error = 1
# check legality of value
if field == 'value':
(type_ok,type_error) = self._typecheck(cfg[param], param, val)
if not type_ok:
self.logger.error("%s", type_error)
self.error = 1
# converting python True/False to lower case string
if (field == 'value') and (cfg[param]['type'] == 'bool'):
if val == True:
val = "true"
elif val == False:
val = "false"
# checking if value has been set
if field not in cfg[param]:
selval = cfg[param]['defvalue']
else:
selval = cfg[param]['value']
# updating values
if cfg[param]['lock'] == "true":
self.logger.debug("Ignoring {mode}{} to [{keypath}]. Lock bit is set.")
elif (mode == 'set'):
if (selval in empty) | clobber:
if field in ('copy', 'lock'):
# boolean fields
if val is True:
cfg[param][field] = "true"
elif val is False:
cfg[param][field] = "false"
else:
self.logger.error(f'{field} must be set to boolean.')
self.error = 1
elif field in ('filehash', 'date', 'author', 'signature'):
if isinstance(val, list):
cfg[param][field] = val
else:
cfg[param][field] = [val]
elif (not list_type) & (val is None):
cfg[param][field] = None
elif (not list_type) & (not isinstance(val, list)):
cfg[param][field] = str(val)
elif list_type & (not isinstance(val, list)):
cfg[param][field] = [str(val)]
elif list_type & isinstance(val, list):
if re.search(r'\(', cfg[param]['type']):
cfg[param][field] = list(map(str,val))
else:
cfg[param][field] = val
else:
self.logger.error(f"Assigning list to scalar for [{keypath}]")
self.error = 1
else:
self.logger.debug(f"Ignoring set() to [{keypath}], value already set. Use clobber=true to override.")
elif (mode == 'add'):
if field in ('filehash', 'date', 'author', 'signature'):
cfg[param][field].append(str(val))
elif field in ('copy', 'lock'):
self.logger.error(f"Illegal use of add() for scalar field {field}.")
self.error = 1
elif list_type & (not isinstance(val, list)):
cfg[param][field].append(str(val))
elif list_type & isinstance(val, list):
cfg[param][field].extend(val)
else:
self.logger.error(f"Illegal use of add() for scalar parameter [{keypath}].")
self.error = 1
return cfg[param][field]
#get leaf cell (all_args=param)
elif len(all_args) == 1:
if not param in cfg:
self.error = 1
self.logger.error(f"Get keypath [{keypath}] does not exist.")
elif mode == 'getcfg':
return cfg[param]
elif mode == 'getkeys':
return cfg[param].keys()
else:
if not (field in cfg[param]) and (field!='value'):
self.error = 1
self.logger.error(f"Field '{field}' not found for keypath [{keypath}]")
elif field == 'value':
#Select default if no value has been set
if field not in cfg[param]:
selval = cfg[param]['defvalue']
else:
selval = cfg[param]['value']
#check for list
if bool(re.match(r'\[', cfg[param]['type'])):
sctype = re.sub(r'[\[\]]', '', cfg[param]['type'])
return_list = []
if selval is None:
return None
for item in selval:
if sctype == 'int':
return_list.append(int(item))
elif sctype == 'float':
return_list.append(float(item))
elif sctype == '(str,str)':
if isinstance(item,tuple):
return_list.append(item)
else:
tuplestr = re.sub(r'[\(\)\'\s]','',item)
return_list.append(tuple(tuplestr.split(',')))
elif sctype == '(float,float)':
if isinstance(item,tuple):
return_list.append(item)
else:
tuplestr = re.sub(r'[\(\)\s]','',item)
return_list.append(tuple(map(float, tuplestr.split(','))))
else:
return_list.append(item)
return return_list
else:
if selval is None:
# Unset scalar of any type
scalar = None
elif cfg[param]['type'] == "int":
#print(selval, type(selval))
scalar = int(float(selval))
elif cfg[param]['type'] == "float":
scalar = float(selval)
elif cfg[param]['type'] == "bool":
scalar = (selval == 'true')
elif re.match(r'\(', cfg[param]['type']):
tuplestr = re.sub(r'[\(\)\s]','',selval)
scalar = tuple(map(float, tuplestr.split(',')))
else:
scalar = selval
return scalar
#all non-value fields are strings (or lists of strings)
else:
if cfg[param][field] == 'true':
return True
elif cfg[param][field] == 'false':
return False
else:
return cfg[param][field]
#if not leaf cell descend tree
else:
##copying in default tree for dynamic trees
if not param in cfg and 'default' in cfg:
cfg[param] = copy.deepcopy(cfg['default'])
elif not param in cfg:
self.error = 1
self.logger.error(f"Get keypath [{keypath}] does not exist.")
return None
all_args.pop(0)
return self._search(cfg[param], keypath, *all_args, field=field, mode=mode, clobber=clobber)
###########################################################################
def _prune(self, cfg, top=True, keeplists=False):
'''
Internal recursive function that creates a local copy of the Chip
schema (cfg) with only essential non-empty parameters retained.
'''
# create a local copy of dict
if top:
localcfg = copy.deepcopy(cfg)
else:
localcfg = cfg
#10 should be enough for anyone...
maxdepth = 10
i = 0
#Prune when the default & value are set to the following
if keeplists:
empty = ("null", None)
else:
empty = ("null", None, [])
# When at top of tree loop maxdepth times to make sure all stale
# branches have been removed, not elegant, but stupid-simple
# "good enough"
while i < maxdepth:
#Loop through all keys starting at the top
for k in list(localcfg.keys()):
#removing all default/template keys
# reached a default subgraph, delete it
if k == 'default':
del localcfg[k]
# reached leaf-cell
elif 'help' in localcfg[k].keys():
del localcfg[k]['help']
elif 'example' in localcfg[k].keys():
del localcfg[k]['example']
elif 'defvalue' in localcfg[k].keys():
if localcfg[k]['defvalue'] in empty:
if 'value' in localcfg[k].keys():
if localcfg[k]['value'] in empty:
del localcfg[k]
else:
del localcfg[k]
#removing stale branches
elif not localcfg[k]:
localcfg.pop(k)
#keep traversing tree
else:
self._prune(cfg=localcfg[k], top=False, keeplists=keeplists)
if top:
i += 1
else:
break
return localcfg
###########################################################################
def _find_sc_file(self, filename, missing_ok=False):
"""
Returns the absolute path for the filename provided.
Searches the SC root directory and the 'scpath' parameter for the
filename provided and returns the absolute path. If no valid absolute
path is found during the search, None is returned.
Shell variables ('$' followed by strings consisting of numbers,
underscores, and digits) are replaced with the variable value.
Args:
filename (str): Relative or absolute filename.
Returns:
Returns absolute path of 'filename' if found, otherwise returns
None.
Examples:
>>> chip._find_sc_file('flows/asicflow.py')
Returns the absolute path based on the sc installation directory.
"""
# Replacing environment variables
filename = self._resolve_env_vars(filename)
# If we have a path relative to our cwd or an abs path, pass-through here
if os.path.exists(os.path.abspath(filename)):
return os.path.abspath(filename)
# Otherwise, search relative to scpaths
scpaths = [self.scroot, self.cwd]
scpaths.extend(self.get('scpath'))
if 'SCPATH' in os.environ:
scpaths.extend(os.environ['SCPATH'].split(os.pathsep))
searchdirs = ', '.join(scpaths)
self.logger.debug(f"Searching for file {filename} in {searchdirs}")
result = None
for searchdir in scpaths:
if not os.path.isabs(searchdir):
searchdir = os.path.join(self.cwd, searchdir)
abspath = os.path.abspath(os.path.join(searchdir, filename))
if os.path.exists(abspath):
result = abspath
break
if result is None and not missing_ok:
self.error = 1
self.logger.error(f"File {filename} was not found")
return result
###########################################################################
def find_files(self, *keypath, cfg=None, missing_ok=False):
"""
Returns absolute paths to files or directories based on the keypath
provided.
By default, this function first checks if the keypath provided has its
`copy` parameter set to True. If so, it returns paths to the files in
the build directory. Otherwise, it resolves these files based on the
current working directory and SC path.
The keypath provided must point to a schema parameter of type file, dir,
or lists of either. Otherwise, it will trigger an error.
Args:
keypath (list str): Variable length schema key list.
cfg (dict): Alternate dictionary to access in place of the default
chip object schema dictionary.
Returns:
If keys points to a scalar entry, returns an absolute path to that
file/directory, or None if not found. It keys points to a list
entry, returns a list of either the absolute paths or None for each
entry, depending on whether it is found.
Examples:
>>> chip.find_files('source')
Returns a list of absolute paths to source files, as specified in
the schema.
"""
if cfg is None:
cfg = self.cfg
copyall = self.get('copyall', cfg=cfg)
paramtype = self.get(*keypath, field='type', cfg=cfg)
if 'file' in paramtype:
copy = self.get(*keypath, field='copy', cfg=cfg)
else:
copy = False
if 'file' not in paramtype and 'dir' not in paramtype:
self.logger.error('Can only call find_files on file or dir types')
self.error = 1
return None
is_list = bool(re.match(r'\[', paramtype))
paths = self.get(*keypath, cfg=cfg)
# Convert to list if we have scalar
if not is_list:
paths = [paths]
result = []
# Special case where we're looking to find tool outputs: check the
# output directory and return those files directly
if len(keypath) == 5 and keypath[0] == 'eda' and keypath[2] in ('input', 'output'):
step = keypath[3]
index = keypath[4]
io = keypath[2] + 's' # inputs or outputs
iodir = os.path.join(self._getworkdir(step=step, index=index), io)
for path in paths:
abspath = os.path.join(iodir, path)
if os.path.isfile(abspath):
result.append(abspath)
return result
for path in paths:
if (copyall or copy) and ('file' in paramtype):
name = self._get_imported_filename(path)
abspath = os.path.join(self._getworkdir(step='import'), 'outputs', name)
if os.path.isfile(abspath):
# if copy is True and file is found in import outputs,
# continue. Otherwise, fall through to _find_sc_file (the
# file may not have been gathered in imports yet)
result.append(abspath)
continue
result.append(self._find_sc_file(path, missing_ok=missing_ok))
# Convert back to scalar if that was original type
if not is_list:
return result[0]
return result
###########################################################################
def find_result(self, filetype, step, jobname='job0', index='0'):
"""
Returns the absolute path of a compilation result.
Utility function that returns the absolute path to a results
file based on the provided arguments. The result directory
structure is:
<dir>/<design>/<jobname>/<step>/<index>/outputs/<design>.filetype
Args:
filetype (str): File extension (.v, .def, etc)
step (str): Task step name ('syn', 'place', etc)
jobid (str): Jobid directory name
index (str): Task index
Returns:
Returns absolute path to file.
Examples:
>>> manifest_filepath = chip.find_result('.vg', 'syn')
Returns the absolute path to the manifest.
"""
workdir = self._getworkdir(jobname, step, index)
design = self.get('design')
filename = f"{workdir}/outputs/{design}.{filetype}"
self.logger.debug("Finding result %s", filename)
if os.path.isfile(filename):
return filename
else:
self.error = 1
return None
###########################################################################
def _abspath(self, cfg):
'''
Internal function that goes through provided dictionary and resolves all
relative paths where required.
'''
for keypath in self.getkeys(cfg=cfg):
paramtype = self.get(*keypath, cfg=cfg, field='type')
#only do something if type is file or dir
if 'file' in paramtype or 'dir' in paramtype:
abspaths = self.find_files(*keypath, cfg=cfg, missing_ok=True)
self.set(*keypath, abspaths, cfg=cfg)
###########################################################################
def _print_csv(self, cfg, file=None):
allkeys = self.getkeys(cfg=cfg)
for key in allkeys:
keypath = f'"{",".join(key)}"'
value = self.get(*key, cfg=cfg)
if isinstance(value,list):
for item in value:
print(f"{keypath},{item}", file=file)
else:
print(f"{keypath},{value}", file=file)
###########################################################################
def _print_tcl(self, cfg, keys=None, file=None, prefix=""):
'''
Prints out schema as TCL dictionary
'''
#TODO: simplify, no need for recursion
if keys is None:
keys = []
for k in cfg:
newkeys = keys.copy()
newkeys.append(k)
#detect leaf cell
if 'defvalue' in cfg[k]:
if 'value' not in cfg[k]:
selval = cfg[k]['defvalue']
else:
selval = cfg[k]['value']
if bool(re.match(r'\[', str(cfg[k]['type']))):
alist = selval
else:
alist = [selval]
for i, val in enumerate(alist):
#replace $VAR with env(VAR) for tcl
m = re.match(r'\$(\w+)(.*)', str(val))
if m:
alist[i] = ('$env(' +
m.group(1) +
')' +
m.group(2))
#create a TCL dict
keystr = ' '.join(newkeys)
valstr = ' '.join(map(str, alist)).replace(';', '\\;')
outlst = [prefix,
keystr,
'[list ',
valstr,
']']
outstr = ' '.join(outlst)
outstr = outstr + '\n'
#print out value
if file is None:
print(outstr)
else:
print(outstr, file=file)
else:
self._print_tcl(cfg[k],
keys=newkeys,
file=file,
prefix=prefix)
###########################################################################
def merge_manifest(self, cfg, job=None, clobber=True, clear=True, check=False):
"""
Merges an external manifest with the current compilation manifest.
All value fields in the provided schema dictionary are merged into the
current chip object. Dictionaries with non-existent keypath produces a
logger error message and raises the Chip object error flag.
Args:
job (str): Specifies non-default job to merge into
clear (bool): If True, disables append operations for list type
clobber (bool): If True, overwrites existing parameter value
check (bool): If True, checks the validity of each key
Examples:
>>> chip.merge_manifest('my.pkg.json')
Merges all parameters in my.pk.json into the Chip object
"""
if job is not None:
# fill ith default schema before populating
self.cfghistory[job] = schema_cfg()
dst = self.cfghistory[job]
else:
dst = self.cfg
for keylist in self.getkeys(cfg=cfg):
#only read in valid keypaths without 'default'
key_valid = True
if check:
key_valid = self.valid(*keylist, quiet=False, default_valid=True)
if key_valid and 'default' not in keylist:
# update value, handling scalars vs. lists
typestr = self.get(*keylist, cfg=cfg, field='type')
val = self.get(*keylist, cfg=cfg)
arg = keylist.copy()
arg.append(val)
if bool(re.match(r'\[', typestr)) & bool(not clear):
self.add(*arg, cfg=dst)
else:
self.set(*arg, cfg=dst, clobber=clobber)
# update other fields that a user might modify
for field in self.getdict(*keylist, cfg=cfg).keys():
if field in ('value', 'switch', 'type', 'require', 'defvalue',
'shorthelp', 'example', 'help'):
# skip these fields (value handled above, others are static)
continue
v = self.get(*keylist, cfg=cfg, field=field)
self.set(*keylist, v, cfg=dst, field=field)
###########################################################################
def _keypath_empty(self, key):
'''
Utility function to check key for an empty list.
'''
emptylist = ("null", None, [])
value = self.get(*key)
defvalue = self.get(*key, field='defvalue')
value_empty = (defvalue in emptylist) and (value in emptylist)
return value_empty
###########################################################################
def _check_files(self):
allowed_paths = [os.path.join(self.cwd, self.get('dir'))]
allowed_paths.extend(os.environ['SC_VALID_PATHS'].split(os.pathsep))
for keypath in self.getkeys():
if 'default' in keypath:
continue
paramtype = self.get(*keypath, field='type')
#only do something if type is file or dir
if 'file' in paramtype or 'dir' in paramtype:
if self.get(*keypath) is None:
# skip unset values (some directories are None by default)
continue
abspaths = self.find_files(*keypath, missing_ok=True)
if not isinstance(abspaths, list):
abspaths = [abspaths]
for abspath in abspaths:
ok = False
if abspath is not None:
for allowed_path in allowed_paths:
if os.path.commonpath([abspath, allowed_path]) == allowed_path:
ok = True
continue
if not ok:
self.logger.error(f'Keypath {keypath} contains path(s) '
'that do not exist or resolve to files outside of '
'allowed directories.')
return False
return True
###########################################################################
def check_manifest(self):
'''
Verifies the integrity of the pre-run compilation manifest.
Checks the validity of the current schema manifest in
memory to ensure that the design has been properly set up prior
to running compilation. The function is called inside the run()
function but can also be called separately. Checks performed by the
check_manifest() function include:
* Has a flowgraph been defined?
* Does the manifest satisfy the schema requirement field settings?
* Are all flowgraph input names legal step/index pairs?
* Are the tool parameter setting requirements met?
Returns:
Returns True if the manifest is valid, else returns False.
Examples:
>>> manifest_ok = chip.check_manifest()
Returns True of the Chip object dictionary checks out.
'''
steplist = self.get('steplist')
if steplist is None:
steplist = self.list_steps()
#1. Checking that flowgraph is legal
if not self.getkeys('flowgraph'):
self.error = 1
self.logger.error(f"No flowgraph defined.")
legal_steps = self.getkeys('flowgraph')
if 'import' not in legal_steps:
self.error = 1
self.logger.error("Flowgraph doesn't contain import step.")
#2. Check requirements list
allkeys = self.getkeys()
for key in allkeys:
keypath = ",".join(key)
if 'default' not in key:
key_empty = self._keypath_empty(key)
requirement = self.get(*key, field='require')
if key_empty and (str(requirement) == 'all'):
self.error = 1
self.logger.error(f"Global requirement missing for [{keypath}].")
elif key_empty and (str(requirement) == self.get('mode')):
self.error = 1
self.logger.error(f"Mode requirement missing for [{keypath}].")
#3. Check per tool parameter requirements (when tool exists)
for step in steplist:
for index in self.getkeys('flowgraph', step):
tool = self.get('flowgraph', step, index, 'tool')
if tool not in self.builtin:
# checking that requirements are set
if self.valid('eda', tool, 'require', step, index):
all_required = self.get('eda', tool, 'require', step, index)
for item in all_required:
keypath = item.split(',')
if self._keypath_empty(keypath):
self.error = 1
self.logger.error(f"Value empty for [{keypath}].")
if self._keypath_empty(['eda', tool, 'exe']):
self.error = 1
self.logger.error(f'Executable not specified for tool {tool}')
if 'SC_VALID_PATHS' in os.environ:
if not self._check_files():
self.error = 1
if not self._check_flowgraph_io():
self.error = 1
# Dynamic checks
# We only perform these if arg, step and arg, index are set.
step = self.get('arg', 'step')
index = self.get('arg', 'index')
if step and index:
tool = self.get('flowgraph', step, index, 'tool')
if self.valid('eda', tool, 'input', step, index):
required_inputs = self.get('eda', tool, 'input', step, index)
else:
required_inputs = []
input_dir = os.path.join(self._getworkdir(step=step, index=index), 'inputs')
for filename in required_inputs:
path = os.path.join(input_dir, filename)
if not os.path.isfile(path):
self.logger.error(f'Required input {filename} not received for {step}{index}.')
self.error = 1
if (not tool in self.builtin) and self.valid('eda', tool, 'require', step, index):
all_required = self.get('eda', tool, 'require', step, index)
for item in all_required:
keypath = item.split(',')
paramtype = self.get(*keypath, field='type')
if ('file' in paramtype) or ('dir' in paramtype):
abspath = self.find_files(*keypath)
if abspath is None or (isinstance(abspath, list) and None in abspath):
self.logger.error(f"Required file keypath {keypath} can't be resolved.")
self.error = 1
return self.error
###########################################################################
def _gather_outputs(self, step, index):
'''Return set of filenames that are guaranteed to be in outputs
directory after a successful run of step/index.'''
tool = self.get('flowgraph', step, index, 'tool')
outputs = set()
if tool in self.builtin:
in_tasks = self.get('flowgraph', step, index, 'input')
in_task_outputs = [self._gather_outputs(*task) for task in in_tasks]
if tool in ('minimum', 'maximum'):
if len(in_task_outputs) > 0:
outputs = in_task_outputs[0].intersection(*in_task_outputs[1:])
elif tool in ('join'):
if len(in_task_outputs) > 0:
outputs = in_task_outputs[0].union(*in_task_outputs[1:])
else:
# TODO: logic should be added here when mux/verify builtins are implemented.
self.logger.error(f'Builtin {tool} not yet implemented')
else:
# Not builtin tool
if self.valid('eda', tool, 'output', step, index):
outputs = set(self.get('eda', tool, 'output', step, index))
else:
outputs = set()
if step == 'import':
imports = {self._get_imported_filename(p) for p in self._collect_paths()}
outputs.update(imports)
return outputs
###########################################################################
def _check_flowgraph_io(self):
'''Check if flowgraph is valid in terms of input and output files.
Returns True if valid, False otherwise.
'''
steplist = self.get('steplist')
if not steplist:
steplist = self.list_steps()
for step in steplist:
for index in self.getkeys('flowgraph', step):
# For each task, check input requirements.
tool = self.get('flowgraph', step, index, 'tool')
if tool in self.builtin:
# We can skip builtins since they don't have any particular
# input requirements -- they just pass through what they
# receive.
continue
# Get files we receive from input tasks.
in_tasks = self.get('flowgraph', step, index, 'input')
if len(in_tasks) > 1:
self.logger.error(f'Tool task {step}{index} has more than one input task.')
elif len(in_tasks) > 0:
in_step, in_index = in_tasks[0]
if in_step not in steplist:
# If we're not running the input step, the required
# inputs need to already be copied into the build
# directory.
workdir = self._getworkdir(step=in_step, index=in_index)
in_step_out_dir = os.path.join(workdir, 'outputs')
inputs = set(os.listdir(in_step_out_dir))
else:
inputs = self._gather_outputs(in_step, in_index)
else:
inputs = set()
if self.valid('eda', tool, 'input', step, index):
requirements = self.get('eda', tool, 'input', step, index)
else:
requirements = []
for requirement in requirements:
if requirement not in inputs:
self.logger.error(f'Invalid flow: {step}{index} will '
f'not receive required input {requirement}.')
return False
return True
###########################################################################
def read_manifest(self, filename, job=None, update=True, clear=True, clobber=True):
"""
Reads a manifest from disk and merges it with the current compilation manifest.
The file format read is determined by the filename suffix. Currently
json (*.json) and yaml(*.yaml) formats are supported.
Args:
filename (filepath): Path to a manifest file to be loaded.
update (bool): If True, manifest is merged into chip object.
clear (bool): If True, disables append operations for list type.
clobber (bool): If True, overwrites existing parameter value.
Returns:
A manifest dictionary.
Examples:
>>> chip.read_manifest('mychip.json')
Loads the file mychip.json into the current Chip object.
"""
abspath = os.path.abspath(filename)
self.logger.debug('Reading manifest %s', abspath)
#Read arguments from file based on file type
with open(abspath, 'r') as f:
if abspath.endswith('.json'):
localcfg = json.load(f)
elif abspath.endswith('.yaml') | abspath.endswith('.yml'):
localcfg = yaml.load(f, Loader=yaml.SafeLoader)
else:
self.error = 1
self.logger.error('Illegal file format. Only json/yaml supported')
f.close()
#Merging arguments with the Chip configuration
if update:
self.merge_manifest(localcfg, job=job, clear=clear, clobber=clobber)
return localcfg
###########################################################################
def write_manifest(self, filename, prune=True, abspath=False, job=None):
'''
Writes the compilation manifest to a file.
The write file format is determined by the filename suffix. Currently
json (*.json), yaml (*.yaml), tcl (*.tcl), and (*.csv) formats are
supported.
Args:
filename (filepath): Output filepath
prune (bool): If True, essential non-empty parameters from the
the Chip object schema are written to the output file.
abspath (bool): If set to True, then all schema filepaths
are resolved to absolute filepaths.
Examples:
>>> chip.write_manifest('mydump.json')
Prunes and dumps the current chip manifest into mydump.json
'''
filepath = os.path.abspath(filename)
self.logger.info('Writing manifest to %s', filepath)
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
if prune:
self.logger.debug('Pruning dictionary before writing file %s', filepath)
# Keep empty lists to simplify TCL coding
if filepath.endswith('.tcl'):
keeplists = True
else:
keeplists = False
cfgcopy = self._prune(self.cfg, keeplists=keeplists)
else:
cfgcopy = copy.deepcopy(self.cfg)
# resolve absolute paths
if abspath:
self._abspath(cfgcopy)
# TODO: fix
#remove long help (adds no value)
#allkeys = self.getkeys(cfg=cfgcopy)
#for key in allkeys:
# self.set(*key, "...", cfg=cfgcopy, field='help')
# format specific dumping
with open(filepath, 'w') as f:
if filepath.endswith('.json'):
print(json.dumps(cfgcopy, indent=4, sort_keys=True), file=f)
elif filepath.endswith('.yaml') | filepath.endswith('yml'):
print(yaml.dump(cfgcopy, Dumper=YamlIndentDumper, default_flow_style=False), file=f)
elif filepath.endswith('.core'):
cfgfuse = self._dump_fusesoc(cfgcopy)
print("CAPI=2:", file=f)
print(yaml.dump(cfgfuse, Dumper=YamlIndentDumper, default_flow_style=False), file=f)
elif filepath.endswith('.tcl'):
print("#############################################", file=f)
print("#!!!! AUTO-GENERATED FILE. DO NOT EDIT!!!!!!", file=f)
print("#############################################", file=f)
self._print_tcl(cfgcopy, prefix="dict set sc_cfg", file=f)
elif filepath.endswith('.csv'):
self._print_csv(cfgcopy, file=f)
else:
self.logger.error('File format not recognized %s', filepath)
self.error = 1
###########################################################################
def check_checklist(self, standard, item=None):
'''
Check an item in checklist.
Checks the status of an item in the checklist for the standard
provided. If the item is unspecified, all items are checked.
The function relies on the checklist 'criteria' parameter and
'step' parameter to check for the existence of report filess
and a passing metric based criteria. Checklist items with
empty 'report' values or unmet criteria result in error messages
and raising the error flag.
Args:
standard(str): Standard to check.
item(str): Item to check from standard.
Returns:
Status of item check.
Examples:
>>> status = chip.check_checklist('iso9000', 'd000')
Returns status.
'''
if item is None:
items = self.getkeys('checklist', standard)
else:
items = [item]
global_check = True
for item in items:
step = self.get('checklist', standard, item, 'step')
index = self.get('checklist', standard, item, 'index')
all_criteria = self.get('checklist', standard, item, 'criteria')
report_ok = False
criteria_ok = True
# manual
if step not in self.getkeys('flowgraph'):
#criteria not used, so always ok
criteria_ok = True
if len(self.getkeys('checklist',standard, item, 'report')) <2:
self.logger.error(f"No report found for {item}")
report_ok = False
else:
tool = self.get('flowgraph', step, index, 'tool')
# copy report paths over to checklsit
for reptype in self.getkeys('eda', tool, 'report', step, index):
report_ok = True
report = self.get('eda', tool, 'report', step, index, reptype)
self.set('checklist', standard, item, 'report', reptype, report)
# quantifiable checklist criteria
for criteria in all_criteria:
m = re.match(r'(\w+)([\>\=\<]+)(\w+)', criteria)
if not m:
self.logger.error(f"Illegal checklist criteria: {criteria}")
return False
elif m.group(1) not in self.getkeys('metric', step, index):
self.logger.error(f"Critera must use legal metrics only: {criteria}")
return False
else:
param = m.group(1)
op = m.group(2)
goal = str(m.group(3))
value = str(self.get('metric', step, index, param, 'real'))
criteria_ok = self._safecompare(value, op, goal)
#item check
if not report_ok:
self.logger.error(f"Report missing for checklist: {standard} {item}")
global_check = False
self.error = 1
elif not criteria_ok:
self.logger.error(f"Criteria check failed for checklist: {standard} {item}")
global_check = False
self.error = 1
return global_check
###########################################################################
def read_file(self, filename, step='import', index='0'):
'''
Read file defined in schema. (WIP)
'''
return(0)
###########################################################################
def package(self, filename, prune=True):
'''
Create sanitized project package. (WIP)
The SiliconCompiler project is filtered and exported as a JSON file.
If the prune option is set to True, then all metrics, records and
results are pruned from the package file.
Args:
filename (filepath): Output filepath
prune (bool): If True, only essential source parameters are
included in the package.
Examples:
>>> chip.package('package.json')
Write project information to 'package.json'
'''
return(0)
###########################################################################
def publish(self, filename):
'''
Publishes package to registry. (WIP)
The filename is uploaed to a central package registry based on the
the user credentials found in ~/.sc/credentials.
Args:
filename (filepath): Package filename
Examples:
>>> chip.publish('hello.json')
Publish hello.json to central repository.
'''
return(0)
###########################################################################
def _dump_fusesoc(self, cfg):
'''
Internal function for dumping core information from chip object.
'''
fusesoc = {}
toplevel = self.get('design', cfg=cfg)
if self.get('name'):
name = self.get('name', cfg=cfg)
else:
name = toplevel
version = self.get('projversion', cfg=cfg)
# Basic information
fusesoc['name'] = f"{name}:{version}"
fusesoc['description'] = self.get('description', cfg=cfg)
fusesoc['filesets'] = {}
# RTL
#TODO: place holder fix with pre-processor list
files = []
for item in self.get('source', cfg=cfg):
files.append(item)
fusesoc['filesets']['rtl'] = {}
fusesoc['filesets']['rtl']['files'] = files
fusesoc['filesets']['rtl']['depend'] = {}
fusesoc['filesets']['rtl']['file_type'] = {}
# Constraints
files = []
for item in self.get('constraint', cfg=cfg):
files.append(item)
fusesoc['filesets']['constraints'] = {}
fusesoc['filesets']['constraints']['files'] = files
# Default Target
fusesoc['targets'] = {}
fusesoc['targets']['default'] = {
'filesets' : ['rtl', 'constraints', 'tb'],
'toplevel' : toplevel
}
return fusesoc
###########################################################################
def write_flowgraph(self, filename, fillcolor='#ffffff',
fontcolor='#000000', fontsize='14',
border=True, landscape=False):
'''Renders and saves the compilation flowgraph to a file.
The chip object flowgraph is traversed to create a graphviz (\*.dot)
file comprised of node, edges, and labels. The dot file is a
graphical representation of the flowgraph useful for validating the
correctness of the execution flow graph. The dot file is then
converted to the appropriate picture or drawing format based on the
filename suffix provided. Supported output render formats include
png, svg, gif, pdf and a few others. For more information about the
graphviz project, see see https://graphviz.org/
Args:
filename (filepath): Output filepath
Examples:
>>> chip.write_flowgraph('mydump.png')
Renders the object flowgraph and writes the result to a png file.
'''
filepath = os.path.abspath(filename)
self.logger.debug('Writing flowgraph to file %s', filepath)
fileroot, ext = os.path.splitext(filepath)
fileformat = ext.replace(".", "")
# controlling border width
if border:
penwidth = '1'
else:
penwidth = '0'
# controlling graph direction
if landscape:
rankdir = 'LR'
else:
rankdir = 'TB'
dot = graphviz.Digraph(format=fileformat)
dot.graph_attr['rankdir'] = rankdir
dot.attr(bgcolor='transparent')
for step in self.getkeys('flowgraph'):
irange = 0
for index in self.getkeys('flowgraph', step):
irange = irange +1
for i in range(irange):
index = str(i)
node = step+index
# create step node
tool = self.get('flowgraph', step, index, 'tool')
if tool in self.builtin:
labelname = step
elif tool is not None:
labelname = f"{step}{index}\n({tool})"
else:
labelname = f"{step}{index}"
dot.node(node, label=labelname, bordercolor=fontcolor, style='filled',
fontcolor=fontcolor, fontsize=fontsize, ordering="in",
penwidth=penwidth, fillcolor=fillcolor)
# get inputs
all_inputs = []
for in_step, in_index in self.get('flowgraph', step, index, 'input'):
all_inputs.append(in_step + in_index)
for item in all_inputs:
dot.edge(item, node)
dot.render(filename=fileroot, cleanup=True)
########################################################################
def _collect_paths(self):
'''
Returns list of paths to files that will be collected by import step.
See docstring for _collect() for more details.
'''
paths = []
copyall = self.get('copyall')
allkeys = self.getkeys()
for key in allkeys:
leaftype = self.get(*key, field='type')
if re.search('file', leaftype):
copy = self.get(*key, field='copy')
value = self.get(*key)
if copyall or copy:
for item in value:
paths.append(item)
return paths
########################################################################
def _collect(self, step, index, active):
'''
Collects files found in the configuration dictionary and places
them in inputs/. The function only copies in files that have the 'copy'
field set as true. If 'copyall' is set to true, then all files are
copied in.
1. indexing like in run, job1
2. chdir package
3. run tool to collect files, pickle file in output/design.v
4. copy in rest of the files below
5. record files read in to schema
'''
indir = 'inputs'
if not os.path.exists(indir):
os.makedirs(indir)
self.logger.info('Collecting input sources')
for path in self._collect_paths():
filename = self._get_imported_filename(path)
abspath = self._find_sc_file(path)
if abspath:
self.logger.info(f"Copying {abspath} to '{indir}' directory")
shutil.copy(abspath, os.path.join(indir, filename))
else:
self._haltstep(step, index, active)
outdir = 'outputs'
if not os.path.exists(outdir):
os.makedirs(outdir)
# Logic to make links from outputs/ to inputs/, skipping anything that
# will be output by the tool as well as the manifest. We put this here
# so that tools used for the import stage don't have to duplicate this
# logic. We skip this logic for 'join'-based single-step imports, since
# 'join' does the copy for us.
tool = self.get('flowgraph', step, index, 'tool')
if tool not in self.builtin:
if self.valid('eda', tool, 'output', step, index):
outputs = self.get('eda', tool, 'output', step, index)
else:
outputs = []
design = self.get('design')
ignore = outputs + [f'{design}.pkg.json']
utils.copytree(indir, outdir, dirs_exist_ok=True, link=True, ignore=ignore)
elif tool != 'join':
self.error = 1
self.logger.error(f'Invalid import step builtin {tool}. Must be tool or join.')
###########################################################################
def archive(self, step=None, index=None, all_files=False):
'''Archive a job directory.
Creates a single compressed archive (.tgz) based on the design,
jobname, and flowgraph in the current chip manifest. Individual
steps and/or indices can be archived based on argumnets specified.
By default, all steps and indices in the flowgraph are archived.
By default, only the outputs directory content and the log file
are archived.
Args:
step(str): Step to archive.
index (str): Index to archive
all_files (bool): If True, all files are archived.
'''
jobname = self.get('jobname')
design = self.get('design')
buildpath = self.get('dir')
if step:
steplist = [step]
elif self.get('arg', 'step'):
steplist = [self.get('arg', 'step')]
elif self.get('steplist'):
steplist = self.get('steplist')
else:
steplist = self.list_steps()
if step:
archive_name = f"{design}_{jobname}_{step}.tgz"
else:
archive_name = f"{design}_{jobname}.tgz"
with tarfile.open(archive_name, "w:gz") as tar:
for step in steplist:
if index:
indexlist = [index]
else:
indexlist = self.getkeys('flowgraph', step)
for item in indexlist:
basedir = os.path.join(buildpath, design, jobname, step, item)
if all_files:
tar.add(os.path.abspath(basedir), arcname=basedir)
else:
outdir = os.path.join(basedir,'outputs')
logfile = os.path.join(basedir, step+'.log')
tar.add(os.path.abspath(outdir), arcname=outdir)
if os.path.isfile(logfile):
tar.add(os.path.abspath(logfile), arcname=logfile)
###########################################################################
def hash_files(self, *keypath, algo='sha256', update=True):
'''Generates hash values for a list of parameter files.
Generates a a hash value for each file found in the keypath.
If the update variable is True, the has values are recorded in the
'filehash' field of the parameter, following the order dictated by
the files within the 'values' parameter field.
Files are located using the find_files() function.
The file hash calculation is performed basd on the 'algo' setting.
Supported algorithms include SHA1, SHA224, SHA256, SHA384, SHA512,
and MD5.
Args:
*keypath(str): Keypath to parameter.
algo (str): Algorithm to use for file hash calculation
update (bool): If True, the hash values are recorded in the
chip object manifest.
Returns:
A list of hash values.
Examples:
>>> hashlist = hash_files('sources')
Hashlist gets list of hash values computed from 'sources' files.
'''
keypathstr = ','.join(keypath)
#TODO: Insert into find_files?
if 'file' not in self.get(*keypath, field='type'):
self.logger.error(f"Illegal attempt to hash non-file parameter [{keypathstr}].")
self.error = 1
else:
filelist = self.find_files(*keypath)
#cycle through all paths
hashlist = []
if filelist:
self.logger.info(f'Computing hash value for [{keypathstr}]')
for filename in filelist:
if os.path.isfile(filename):
#TODO: Implement algo selection
hashobj = hashlib.sha256()
with open(filename, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
hashobj.update(byte_block)
hash_value = hashobj.hexdigest()
hashlist.append(hash_value)
else:
self.error = 1
self.logger.info(f"Internal hashing error, file not found")
# compare previous hash to new hash
oldhash = self.get(*keypath,field='filehash')
for i,item in enumerate(oldhash):
if item != hashlist[i]:
self.logger.error(f"Hash mismatch for [{keypath}]")
self.error = 1
self.set(*keypath, hashlist, field='filehash', clobber=True)
###########################################################################
def audit_manifest(self):
'''Verifies the integrity of the post-run compilation manifest.
Checks the integrity of the chip object implementation flow after
the run() function has been completed. Errors, warnings, and debug
messages are reported through the logger object.
Audit checks performed include:
* Time stamps
* File modifications
* Error and warning policy
* IP and design origin
* User access
* License terms
* Version checks
Returns:
Returns True if the manifest has integrity, else returns False.
Example:
>>> chip.audit_manifest()
Audits the Chip object manifest and returns 0 if successful.
'''
return 0
###########################################################################
def calc_area(self):
'''Calculates the area of a rectilinear diearea.
Uses the shoelace formulate to calculate the design area using
the (x,y) point tuples from the 'diearea' parameter. If only diearea
paramater only contains two points, then the first and second point
must be the lower left and upper right points of the rectangle.
(Ref: https://en.wikipedia.org/wiki/Shoelace_formula)
Returns:
Design area (float).
Examples:
>>> area = chip.calc_area()
'''
vertices = self.get('asic', 'diearea')
if len(vertices) == 2:
width = vertices[1][0] - vertices[0][0]
height = vertices[1][1] - vertices[0][1]
area = width * height
else:
area = 0.0
for i in range(len(vertices)):
j = (i + 1) % len(vertices)
area += vertices[i][0] * vertices[j][1]
area -= vertices[j][0] * vertices[i][1]
area = abs(area) / 2
return area
###########################################################################
def calc_yield(self, model='poisson'):
'''Calculates raw die yield.
Calculates the raw yield of the design as a function of design area
and d0 defect density. Calculation can be done based on the poisson
model (default) or the murphy model. The die area and the d0
parameters are taken from the chip dictionary.
* Poisson model: dy = exp(-area * d0/100).
* Murphy model: dy = ((1-exp(-area * d0/100))/(area * d0/100))^2.
Args:
model (string): Model to use for calculation (poisson or murphy)
Returns:
Design yield percentage (float).
Examples:
>>> yield = chip.calc_yield()
Yield variable gets yield value based on the chip manifest.
'''
d0 = self.get('pdk', 'd0')
diearea = self.calc_area()
if model == 'poisson':
dy = math.exp(-diearea * d0/100)
elif model == 'murphy':
dy = ((1-math.exp(-diearea * d0/100))/(diearea * d0/100))**2
return dy
##########################################################################
def calc_dpw(self):
'''Calculates dies per wafer.
Calculates the gross dies per wafer based on the design area, wafersize,
wafer edge margin, and scribe lines. The calculation is done by starting
at the center of the wafer and placing as many complete design
footprints as possible within a legal placement area.
Returns:
Number of gross dies per wafer (int).
Examples:
>>> dpw = chip.calc_dpw()
Variable dpw gets gross dies per wafer value based on the chip manifest.
'''
#PDK information
wafersize = self.get('pdk', 'wafersize')
edgemargin = self.get('pdk', 'edgemargin')
hscribe = self.get('pdk', 'hscribe')
vscribe = self.get('pdk', 'vscribe')
#Design parameters
diesize = self.get('asic', 'diesize').split()
diewidth = (diesize[2] - diesize[0])/1000
dieheight = (diesize[3] - diesize[1])/1000
#Derived parameters
radius = wafersize/2 -edgemargin
stepwidth = (diewidth + hscribe)
stepheight = (dieheight + vscribe)
#Raster dies out from center until you touch edge margin
#Work quadrant by quadrant
dies = 0
for quad in ('q1', 'q2', 'q3', 'q4'):
x = 0
y = 0
if quad == "q1":
xincr = stepwidth
yincr = stepheight
elif quad == "q2":
xincr = -stepwidth
yincr = stepheight
elif quad == "q3":
xincr = -stepwidth
yincr = -stepheight
elif quad == "q4":
xincr = stepwidth
yincr = -stepheight
#loop through all y values from center
while math.hypot(0, y) < radius:
y = y + yincr
while math.hypot(x, y) < radius:
x = x + xincr
dies = dies + 1
x = 0
return int(dies)
###########################################################################
def summary(self, steplist=None, show_all_indices=False):
'''
Prints a summary of the compilation manifest.
Metrics from the flowgraph steps, or steplist parameter if
defined, are printed out on a per step basis. All metrics from the
metric dictionary with weights set in the flowgraph dictionary are
printed out.
Args:
show_all_indices (bool): If True, displays metrics for all indices
of each step. If False, displays metrics only for winning
indices.
Examples:
>>> chip.summary()
Prints out a summary of the run to stdout.
'''
# display whole flowgraph if no steplist specified
if not steplist:
steplist = self.list_steps()
#only report tool based steps functions
for step in steplist:
if self.get('flowgraph',step,'0','tool') in self.builtin:
index = steplist.index(step)
del steplist[index]
# job directory
jobdir = self._getworkdir()
# Custom reporting modes
paramlist = []
for item in self.getkeys('param'):
paramlist.append(item+"="+self.get('param',item))
if paramlist:
paramstr = ', '.join(paramlist)
else:
paramstr = "None"
info_list = ["SUMMARY:\n",
"design : " + self.get('design'),
"params : " + paramstr,
"jobdir : "+ jobdir,
]
if self.get('mode') == 'asic':
info_list.extend(["foundry : " + self.get('pdk', 'foundry'),
"process : " + self.get('pdk', 'process'),
"targetlibs : "+" ".join(self.get('asic', 'targetlib'))])
elif self.get('mode') == 'fpga':
info_list.extend(["partname : "+self.get('fpga','partname')])
info = '\n'.join(info_list)
print("-"*135)
print(info, "\n")
# Stepping through all steps/indices and printing out metrics
data = []
#Creating Header
header = []
indices_to_show = {}
colwidth = 8
for step in steplist:
if show_all_indices:
indices_to_show[step] = self.getkeys('flowgraph', step)
else:
# Default for last step in list (could be tool or function)
indices_to_show[step] = ['0']
# Find winning index
for index in self.getkeys('flowgraph', step):
stepindex = step + index
for i in self.getkeys('flowstatus'):
for j in self.getkeys('flowstatus',i):
for in_step, in_index in self.get('flowstatus',i,j,'select'):
if (in_step + in_index) == stepindex:
indices_to_show[step] = index
# header for data frame
for step in steplist:
for index in indices_to_show[step]:
header.append(f'{step}{index}'.center(colwidth))
# figure out which metrics have non-zero weights
metric_list = []
for step in steplist:
for metric in self.getkeys('metric','default','default'):
if metric in self.getkeys('flowgraph', step, '0', 'weight'):
if self.get('flowgraph', step, '0', 'weight', metric) is not None:
if metric not in metric_list:
metric_list.append(metric)
# print out all metrics
metrics = []
for metric in metric_list:
metrics.append(" " + metric)
row = []
for step in steplist:
for index in indices_to_show[step]:
value = None
if 'real' in self.getkeys('metric', step, index, metric):
value = self.get('metric', step, index, metric, 'real')
if value is None:
value = 'ERR'
else:
value = str(value)
row.append(" " + value.center(colwidth))
data.append(row)
pandas.set_option('display.max_rows', 500)
pandas.set_option('display.max_columns', 500)
| pandas.set_option('display.width', 100) | pandas.set_option |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from os.path import join, expanduser, basename, splitext, abspath, exists # NOQA
import pandas as pd
import cv2
import glob
import ubelt as ub
import numpy as np
from clab.tasks._sseg import SemanticSegmentationTask
from clab import util
from clab.util import imutil
from clab.util import colorutil
from clab.util import fnameutil # NOQA
from clab import inputs
from clab import preprocess
from clab import getLogger
import parse
if True:
logger = getLogger(__name__)
print = logger.info
def _imshow_dtm(image):
"""
out_data2 = imutil.imread(out_path)
out_data == out_data2
image = out_data2
"""
import copy
import matplotlib as mpl
from matplotlib.colors import Normalize
import plottool as pt
UNKNOWN = -32767
vmin = image[image != UNKNOWN].min()
vmax = image.max()
norm = Normalize(vmin=vmin, vmax=vmax)
cmap = copy.copy(mpl.cm.get_cmap('viridis'))
cmap.set_bad((0, 0, 0))
pt.imshow(image, cmap=cmap, norm=norm, fnum=1)
class UrbanMapper3D(SemanticSegmentationTask):
"""
References:
https://community.topcoder.com/longcontest/?module=ViewProblemStatement&compid=57607&rd=17007
Example:
>>> from clab.tasks.urban_mapper_3d import *
>>> task = UrbanMapper3D(root='~/remote/aretha/data/UrbanMapper3D',
>>> workdir='~/data/work/urban_mapper4', boundary=True)
>>> print(task.classnames)
>>> task.prepare_fullres_inputs()
>>> print(task.classnames)
>>> task.preprocess()
>>> (train, test), = task.xval_splits()
>>> inputs_base = ub.ensuredir((task.workdir, 'inputs'))
>>> train.base_dpath = inputs_base
>>> train.prepare_images(force=True)
>>> train.prepare_input()
>>> gtstats = train.prepare_gtstats(task)
>>> nan_value = -32767.0 # hack: specific number for DTM
>>> center_stats = self.inputs.prepare_center_stats(
>>> task, nan_value=nan_value, colorspace='RGB')
"""
def __init__(task, root=None, workdir=None, boundary=True):
if root is None:
assert False
task.workdir = expanduser(workdir)
task.root = expanduser(root)
task.target_shape = (360, 480)
task.input_shape = (360, 480)
# the challenge training set (split into train test for our evaluation)
task.fullres = None
task.input_modes = {}
task.augment_modes = {}
task.eval_fullres = None # the challenge evaluation dataset
task.classnames = [
'non-building',
'building',
'uncertain',
]
task.null_classname = 'uncertain'
super(UrbanMapper3D, task).__init__()
task.boundary_mode_enabled = boundary
if task.boundary_mode_enabled:
task._boundary_mode()
else:
task.set_classnames(task.classnames, task.null_classname)
def _boundary_mode(task):
task.boundary_mode_enabled = True
# update the task to reflect the updated gt labels
classnames = [
'non-building',
'inner_building',
'outer_building',
'uncertain',
]
null_classname = 'uncertain'
task.set_classnames(classnames, null_classname, {})
def customize_colors(task):
# called by set_classnames
lookup_bgr255 = colorutil.lookup_bgr255
task.class_colors['non-building'] = lookup_bgr255('black')
def preprocess(task, force=False):
task.prepare_fullres_inputs()
datadir = ub.ensuredir((task.workdir, 'data'))
prep = preprocess.Preprocessor(datadir)
prep.part_config['overlap'] = .75
prep.ignore_label = task.ignore_label
clear = force
fullres = task.fullres
# task.input_modes['lowres'] = prep.make_lowres(fullres, clear=clear)
task.input_modes['part-scale1'] = prep.make_parts(
fullres, scale=1, clear=clear)
# for k, v in task.input_modes.items():
# # old code needed for caffe
# task.augment_modes[k] = prep.make_augment_inputs(v, rng='determ', clear=clear)
def load_fullres_inputs(task, subdir='training'):
"""
Loads the source data into the Inputs format for further processing.
Example:
>>> from clab.tasks.urban_mapper_3d import *
>>> task = UrbanMapper3D(root='~/remote/aretha/data/UrbanMapper3D',
>>> workdir='~/data/work/urban_mapper')
>>> task.load_fullres_inputs()
>>> subdir = 'training'
"""
tagged_paths = {
'gt': glob.glob(join(task.root, subdir, '*_GTL.tif')),
'im': glob.glob(join(task.root, subdir, '*_RGB.tif')),
'gti': glob.glob(join(task.root, subdir, '*_GTI.tif')),
# digital terrain model
'dtm': glob.glob(join(task.root, subdir, '*_DTM.tif')),
# digital surface model
'dsm': glob.glob(join(task.root, subdir, '*_DSM.tif')),
}
def extract_primary_key_info(paths, tag):
if not paths:
return | pd.DataFrame() | pandas.DataFrame |
import unittest,os
import pandas as pd
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.process.metadata_reformat.reformat_metadata_file import Reformat_metadata_file
class Reformat_metadata_file_testA(unittest.TestCase):
def setUp(self):
self.tmp_dir = get_temp_dir()
def tearDown(self):
remove_dir(self.tmp_dir)
def test_sample_name_reformat(self):
sample_name = 'IGF*0(1_1)'
self.assertEqual(Reformat_metadata_file.sample_name_reformat(sample_name),'IGF-0-1-1')
def test_sample_and_project_reformat(self):
sample_id = 'IGF*0(1_1)'
self.assertEqual(Reformat_metadata_file.sample_and_project_reformat(tag_name=sample_id),'IGF-0-1_1')
project_name = 'IGF scRNA '
self.assertEqual(Reformat_metadata_file.sample_and_project_reformat(tag_name=project_name),'IGF-scRNA')
def test_get_assay_info(self):
# test 1
library_preparation_val = 'Whole Genome Sequencing Human - Sample'
sample_description_val = 'NA'
library_type_val = 'NA'
re_metadata = \
Reformat_metadata_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_metadata.csv')
library_source,library_strategy,exp_type,biomaterial_type = \
re_metadata.get_assay_info(library_preparation_val,sample_description_val,library_type_val)
self.assertEqual(library_source,'GENOMIC')
self.assertEqual(library_strategy,'WGS')
self.assertEqual(exp_type,'WGS')
self.assertEqual(biomaterial_type,'UNKNOWN')
## test 2
library_preparation_val = "Single Cell -3' RNAseq- Sample"
sample_description_val = 'NA'
library_type_val = 'NA'
library_source,library_strategy,exp_type,biomaterial_type = \
re_metadata.get_assay_info(library_preparation_val,sample_description_val,library_type_val)
self.assertEqual(library_source,'TRANSCRIPTOMIC_SINGLE_CELL')
self.assertEqual(library_strategy,'RNA-SEQ')
self.assertEqual(exp_type,'TENX-TRANSCRIPTOME-3P')
self.assertEqual(biomaterial_type,'UNKNOWN')
## test 3
library_preparation_val = "Single Cell -3' RNAseq- Sample Nuclei"
sample_description_val = 'NA'
library_type_val = 'NA'
library_source,library_strategy,exp_type,biomaterial_type = \
re_metadata.get_assay_info(library_preparation_val,sample_description_val,library_type_val)
self.assertEqual(library_source,'TRANSCRIPTOMIC_SINGLE_CELL')
self.assertEqual(library_strategy,'RNA-SEQ')
self.assertEqual(exp_type,'TENX-TRANSCRIPTOME-3P')
self.assertEqual(biomaterial_type,'SINGLE_NUCLEI')
## test 4
library_preparation_val = "Not Applicable"
sample_description_val = "Pre Made Library"
library_type_val = "SINGLE CELL-3' RNA (NUCLEI)"
library_source,library_strategy,exp_type,biomaterial_type = \
re_metadata.get_assay_info(library_preparation_val,sample_description_val,library_type_val)
self.assertEqual(library_source,'TRANSCRIPTOMIC_SINGLE_CELL')
self.assertEqual(library_strategy,'RNA-SEQ')
self.assertEqual(exp_type,'TENX-TRANSCRIPTOME-3P')
self.assertEqual(biomaterial_type,'SINGLE_NUCLEI')
def test_calculate_insert_length_from_fragment(self):
self.assertEqual(Reformat_metadata_file.calculate_insert_length_from_fragment(fragment_length=400),280)
def test_get_species_info(self):
re_metadata = \
Reformat_metadata_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_metadata.csv')
taxon_id, scientific_name, species_name = \
re_metadata.get_species_info(species_text_val='human')
self.assertEqual(taxon_id,'9606')
self.assertEqual(scientific_name,'Homo sapiens')
self.assertEqual(species_name,'HG38')
def test_populate_metadata_values(self):
data = pd.Series(\
{'project_igf_id':'IGFQ1 scRNA-seq5primeFB',
'sample_igf_id':'IGF3[',
'library_preparation':'RNA Sequencing - Total RNA',
'sample_description':'NA',
'library_type':'NA',
'species_text':'mouse'})
re_metadata = \
Reformat_metadata_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_metadata.csv')
data = re_metadata.populate_metadata_values(row=data)
self.assertEqual(data.project_igf_id,'IGFQ1-scRNA-seq5primeFB')
self.assertTrue('library_source' in data.keys())
self.assertEqual(data.library_source,'TRANSCRIPTOMIC')
def test_reformat_raw_metadata_file(self):
output_file = os.path.join(self.tmp_dir,'samplesheet.csv')
re_metadata = \
Reformat_metadata_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_metadata.csv')
re_metadata.\
reformat_raw_metadata_file(output_file=output_file)
data = \
| pd.read_csv(output_file) | pandas.read_csv |
import pandas as pd
import json
import os
from sklearn.metrics import confusion_matrix
from io import StringIO
import boto3
class Visualization:
def __init__(self):
self.parser_args = None
def _generate_confusion_matrix_metadata(self, confusion_matrix_path, vocab):
print("Generating Confusion matrix Metadata")
metadata = {
"type": "confusion_matrix",
"format": "csv",
"schema": [
{"name": "target", "type": "CATEGORY"},
{"name": "predicted", "type": "CATEGORY"},
{"name": "count", "type": "NUMBER"},
],
"source": confusion_matrix_path,
# Convert vocab to string because for bealean values we want "True|False" to match csv data.
"labels": list(map(str, vocab)),
}
self._write_ui_metadata(
metadata_filepath="/mlpipeline-ui-metadata.json", metadata_dict=metadata
)
def _write_ui_metadata(self, metadata_filepath, metadata_dict, key="outputs"):
if not os.path.exists(metadata_filepath):
metadata = {key: [metadata_dict]}
else:
with open(metadata_filepath) as fp:
metadata = json.load(fp)
metadata_outputs = metadata[key]
metadata_outputs.append(metadata_dict)
print("Writing to file: {}".format(metadata_filepath))
with open(metadata_filepath, "w") as fp:
json.dump(metadata, fp)
def _enable_tensorboard_visualization(self, tensorboard_root):
print("Enabling Tensorboard Visualization")
metadata = {
"type": "tensorboard",
"source": tensorboard_root,
}
import os
os.environ["AWS_REGION"] = "us-east-2"
self._write_ui_metadata(
metadata_filepath="/mlpipeline-ui-metadata.json", metadata_dict=metadata
)
def _visualize_accuracy_metric(self, accuracy):
metadata = {
"name": "accuracy-score",
"numberValue": accuracy,
"format": "PERCENTAGE",
}
self._write_ui_metadata(
metadata_filepath="/mlpipeline-metrics.json", metadata_dict=metadata, key="metrics"
)
def _generate_confusion_matrix(self, confusion_matrix_dict):
actuals = confusion_matrix_dict["actuals"]
preds = confusion_matrix_dict["preds"]
bucket_name = confusion_matrix_dict["bucket_name"]
folder_name = confusion_matrix_dict["folder_name"]
# Generating confusion matrix
df = pd.DataFrame(list(zip(actuals, preds)), columns=["target", "predicted"])
vocab = list(df["target"].unique())
cm = confusion_matrix(df["target"], df["predicted"], labels=vocab)
data = []
for target_index, target_row in enumerate(cm):
for predicted_index, count in enumerate(target_row):
data.append((vocab[target_index], vocab[predicted_index], count))
confusion_matrix_df = | pd.DataFrame(data, columns=["target", "predicted", "count"]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script reads in the QPESUMS data in *.npy, parse its timstamp, and convert UTC to LST (UTC+8).
"""
import os, logging, argparse, datetime, shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#-----------------------------------------------------------------------
__author__ = "<NAME>"
__copyright__ = "Copyright 2017~2019, DataQualia Lab Co. Ltd."
__credits__ = ["<NAME>"]
__license__ = "UNLICENSED"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = '2019-01-09'
#-----------------------------------------------------------------------
def search_qpesums_npy(srcdir):
'''Scan QPESUMS data in *.npy format (6*275*162) from the specified directory.
'''
import pandas as pd
fileinfo = []
for subdir, dirs, files in os.walk(srcdir, followlinks=True):
for f in files:
if f.endswith('.npy'):
# Parse file name for time information
furi = os.path.join(subdir, f)
finfo = f.split('.')
ftime = finfo[0]
#logging.debug([furi] + finfo[1:3])
fileinfo.append([furi, ftime])
results = | pd.DataFrame(fileinfo, columns=['furi', 'timestamp']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
import datetime
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import (roc_auc_score,roc_curve,precision_recall_curve, auc,
classification_report, confusion_matrix, average_precision_score,
accuracy_score,silhouette_score,mean_squared_error)
from sklearn.utils.fixes import signature
def print_classification_performance2class_report(model,X_test,y_test):
"""
Program: print_classification_performance2class_report
Author: <NAME>.
Purpose: print standard 2-class classification metrics report
"""
sns.set()
y_pred = model.predict(X_test)
y_pred_proba = model.predict_proba(X_test)[:,1]
conf_mat = confusion_matrix(y_test,y_pred)
TN = conf_mat[0][0]
FP = conf_mat[0][1]
FN = conf_mat[1][0]
TP = conf_mat[1][1]
PC = TP/(TP+FP)
RC = TP/(TP+FN)
FS = 2 *((PC*RC)/(PC+RC))
AP = average_precision_score(y_test,y_pred)
ACC = accuracy_score(y_test,y_pred)
RMSE = np.sqrt(mean_squared_error(y_test, y_pred))
print("Accuracy:{:.2%}".format(ACC))
print("Precision:{:.2%}".format(PC))
print("Recall:{:.2%}".format(RC))
print("Fscore:{:.2%}".format(FS))
print("Average precision:{:.2%}".format(AP))
print('The RMSE value is {:.4f}'.format(RMSE))
fig = plt.figure(figsize=(20,3))
fig.subplots_adjust(hspace=0.2,wspace=0.2)
#heatmap
plt.subplot(141)
labels = np.asarray([['True Negative\n{}'.format(TN),'False Positive\n{}'.format(FP)],
['False Negative\n{}'.format(FN),'True Positive\n{}'.format(TP)]])
sns.heatmap(conf_mat,annot=labels,fmt="",cmap=plt.cm.Blues,xticklabels="",yticklabels="",cbar=False)
#ROC
plt.subplot(142)
pfr, tpr, _ = roc_curve(y_test,y_pred_proba)
roc_auc = auc(pfr, tpr)
gini = (roc_auc*2)-1
plt.plot(pfr, tpr, label='ROC Curve (area = {:.2%})'.format(roc_auc) )
plt.plot([0,1], [0,1])
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('Receiver Operating Charecteristic Curve with Gini {:.2}'.format(gini))
plt.legend(loc='lower right')
#pr
plt.subplot(143)
precision, recall, _ = precision_recall_curve(y_test,y_pred_proba)
step_kwargs = ({'step':'post'}
if 'step'in signature(plt.fill_between).parameters
else {})
plt.step(recall,precision,color='b',alpha=0.2, where='post')
plt.fill_between(recall,precision,alpha=0.2,color='b',**step_kwargs)
plt.ylim([0.0,1.05])
plt.xlim([0.0,1.0])
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.title('2-class Precision-Recall Curve: AP={:.2%}'.format(AP))
#hist
plt.subplot(144)
tmp = | pd.DataFrame(data=[y_test,y_pred_proba]) | pandas.DataFrame |
# Series Objects
import pandas as pd
data = [1,2,3,4]
series1 = pd.Series(data)
series1
type(series1)
# changing index of aa series object
series1 = pd.Series(data,index=['a','b','c','d'])
series1
# create a dataframe using list
import pandas as pd
data=[1,2,3,4,5]
df = pd.DataFrame(data)
df
# create a dataframe using dict
dictionary = {'fruits':['apples','bananas','mangoes'],
'count':[10,20,15]}
df = pd.DataFrame(dictionary)
df
# create a dataframe using series
series = pd.Series([6,12],index=['a','b'])
df = pd.DataFrame(series)
df
# create a dataframe using numpy array
import numpy as np
numpyarray = np.array([[50000,60000],['John','James']])
df = pd.DataFrame({'name':numpyarray[1],'salary':numpyarray[0]})
df
# Merge Operation
import pandas as pd
player = ['Player1','Player2','Player3']
point = [8,9,6]
title = ['Game1','Game2','Game3']
df1 = | pd.DataFrame({'Player':player,'Points':point,'Title':title}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import glob
import pandas as pd
import os
def comparison(results_dir, datasets, encoders, mechanisms, tasc_approach):
save_dir = "summarised_results/" + tasc_approach + "-tasc_comparing_explanations/"
try:
os.makedirs(save_dir)
except:
pass
set_results = []
header = ["dataset","encoder",
"omission (Tanh)", "grad (Tanh)", "IG (Tanh)", "Attn_Grad*Attn (Tanh +)",
"omission (Dot)", "grad (Dot)", "IG (Dot)", "Attn_Grad*Attn (Dot +)"
]
set_results.append(header)
for dataset in datasets:
for encoder in encoders:
mech_temp = {}
for mechanism in mechanisms:
nontasc_files = glob.glob(results_dir + dataset + "/"+encoder+ "*" + mechanism + "*decision-flip-set-summary.csv")
tasc_files = glob.glob(results_dir + tasc_approach + "_" + dataset + "/"+encoder+ "*" + mechanism + "*decision-flip-set-summary.csv")
nontasc = dict(pd.read_csv(nontasc_files[0]).values)
tasc = dict(pd.read_csv(tasc_files[0]).values)
mech_temp[mechanism] = {}
mech_temp[mechanism]["Attn*Attn_Grad +"] = round(tasc["scaled attention"],2)
mech_temp[mechanism]["omission"] = round(nontasc["omission"],2)
mech_temp[mechanism]["grad"] = round(nontasc["gradients"],2)
mech_temp[mechanism]["IG"] = round(nontasc["ig"],2)
set_results.append([dataset, encoder,
mech_temp["Tanh"]["omission"], mech_temp["Tanh"]["grad"], mech_temp["Tanh"]["IG"], mech_temp["Tanh"]["Attn*Attn_Grad +"],
mech_temp["Dot"]["omission"], mech_temp["Dot"]["grad"], mech_temp["Dot"]["IG"], mech_temp["Dot"]["Attn*Attn_Grad +"],
])
set_of_w = | pd.DataFrame(set_results) | pandas.DataFrame |
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x['dim0']} , {x['dim1']}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
df_c1.to_csv("c1_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_2d.txt", "r") as f1, open("pA_c1_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_2d.txt")
####c12
df_c12.to_csv("c12_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_2d.txt", "r") as f1, open("pA_c12_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_2d.txt")
####c123
df_c123.to_csv("c123_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_2d.txt", "r") as f1, open("pA_c123_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_2d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_2d.txt", "r") as f1, open(
"pA_c1_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_2d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_2d.txt", "r") as f1, open(
"pA_c12_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_2d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_2d.txt", "r") as f1, open(
"pA_c123_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_2d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_2d.txt", "r") as f1, open(
"c1_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_2d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = | pd.DataFrame(frame_indices_c12, columns=["frame_index"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
module for mul and mulfix class: fund combination management
"""
import logging
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Pie, ThemeRiver
from xalpha.cons import convert_date, myround, yesterdaydash, yesterdayobj
from xalpha.evaluate import evaluate
from xalpha.exceptions import FundTypeError, TradeBehaviorError
from xalpha.record import record, irecord
from xalpha.indicator import indicator
from xalpha.info import cashinfo, fundinfo, mfundinfo, get_fund_holdings
from xalpha.trade import (
bottleneck,
trade,
turnoverrate,
vtradevolume,
xirrcal,
itrade,
vtradecost,
)
from xalpha.universal import get_fund_type, ttjjcode, get_rt, get_industry_fromxq
import xalpha.universal as xu
logger = logging.getLogger(__name__)
class mul:
"""
multiple fund positions manage class
:param fundtradeobj: list of trade obj which you want to analyse together
:param status: the status table of trade, all code in this table would be considered.
one must provide one of the two paramters, if both are offered, status will be overlooked
可以是场内记账单 DataFrame,也可以是 record 对象。
:param istatus: 场内交易账单,也可以是 irecord 对象。
若提供,则场内外交易联合统计展示。该选项只保证 ``combsummary`` 方法可正常使用,不保证 ``mul`` 类的其他方法可用。
:param property: Dict[fundcode, property_number]. property number 的解释:
int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值处理(暂时只支持货币基金,事实上无法精确支持按份额赎回的净值型基金)。将想要的性质数值相加即可,类似 *nix 上的 xwr 系统。
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database
:param form: string, the format of IO, options including: 'csv','sql'
"""
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv"
):
if isinstance(status, record):
if not property:
property = getattr(status, "property", {})
status = status.status
elif not property:
property = {}
self.is_in = False
if fundtradeobj:
fundtradeobj = list(fundtradeobj)
for t in fundtradeobj:
if isinstance(t, itrade):
self.is_in = True
break
else:
fundtradeobj = []
# warning: not a very good way to automatic generate these fund obj
# because there might be some funds use round_down for share calculation, ie, label=2 must be given
# unless you are sure corresponding funds are added to the droplist
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.columns:
if code == "date" or code.startswith("#"):
continue
# r1, d2, v4 p = r+d+v
if code in fundcodelist:
continue
p = property.get(code, 0)
round_label = p % 2
dividend_label = ((p - round_label) / 2) % 2
value_label = ((p - round_label - dividend_label) / 4) % 2
try:
fundtradeobj.append(
trade(
fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
except FundTypeError:
fundtradeobj.append(
trade(
mfundinfo(
code,
round_label=round_label,
value_label=value_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
if istatus is not None:
self.is_in = True
if isinstance(istatus, irecord):
istatus = istatus.status
for code in istatus.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, istatus))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
def tot(self, prop="基金现值", date=yesterdayobj()):
"""
sum of all the values from one prop of fund daily report,
of coures many of the props make no sense to sum
:param prop: string defined in the daily report dict,
typical one is 'currentvalue' or 'originalpurchase'
"""
res = 0
for fund in self.fundtradeobj:
res += fund.dailyreport(date).iloc[0][prop]
return res
def combsummary(self, date=yesterdayobj()):
"""
brief report table of every funds and the combination investment
:param date: string or obj of date, show info of the date given
:returns: empty dict if nothing is remaining that date
dict of various data on the trade positions
"""
date = convert_date(date)
columns = [
"基金名称",
"基金代码",
"当日净值",
"单位成本",
"持有份额",
"基金现值",
"基金总申购",
"历史最大占用",
"基金持有成本",
"基金分红与赎回",
"换手率",
"基金收益总额",
"投资收益率",
]
summarydf = pd.DataFrame([], columns=columns)
for fund in self.fundtradeobj:
summarydf = summarydf.append(
fund.dailyreport(date), ignore_index=True, sort=True
)
tname = "总计"
tcode = "total"
tunitvalue = float("NaN")
tunitcost = float("NaN")
tholdshare = float("NaN")
tcurrentvalue = summarydf["基金现值"].sum()
tpurchase = summarydf["基金总申购"].sum()
tbtnk = bottleneck(self.totcftable[self.totcftable["date"] <= date])
tcost = summarydf["基金持有成本"].sum()
toutput = summarydf["基金分红与赎回"].sum()
tturnover = turnoverrate(self.totcftable[self.totcftable["date"] <= date], date)
# 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率
tearn = summarydf["基金收益总额"].sum()
trate = round(tearn / tbtnk * 100, 4)
trow = pd.DataFrame(
[
[
tname,
tcode,
tunitvalue,
tunitcost,
tholdshare,
tcurrentvalue,
tpurchase,
tbtnk,
tcost,
toutput,
tturnover,
tearn,
trate,
]
],
columns=columns,
)
summarydf = summarydf.append(trow, ignore_index=True, sort=True)
return summarydf[columns].sort_values(by="基金现值", ascending=False)
summary = combsummary
def _mergecftb(self):
"""
merge the different cftable for different funds into one table
"""
dtlist = []
for fund in self.fundtradeobj:
dtlist2 = []
for _, row in fund.cftable.iterrows():
dtlist2.append((row["date"], row["cash"]))
dtlist.extend(dtlist2)
nndtlist = set([item[0] for item in dtlist])
nndtlist = sorted(list(nndtlist), key=lambda x: x)
reslist = []
for date in nndtlist:
reslist.append(sum([item[1] for item in dtlist if item[0] == date]))
df = pd.DataFrame(data={"date": nndtlist, "cash": reslist})
df = df[df["cash"] != 0]
df = df.reset_index(drop=True)
return df
def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):
"""
xirr rate evauation of the whole invest combination
:param date: string or obj of datetime, the virtually sell-all date
:param startdate: string or obj of datetime, the beginning date of calculation, default from first buy
"""
return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)
def evaluation(self, start=None):
"""
give the evaluation object to analysis funds properties themselves instead of trades
:returns: :class:`xalpha.evaluate.evaluate` object, with referenced funds the same as funds
we invested
"""
if self.is_in:
raise NotImplementedError()
case = evaluate(
*[fundtrade.aim for fundtrade in self.fundtradeobj], start=start
)
return case
def get_stock_holdings(
self, year=None, season=None, date=yesterdayobj(), threhold=100
):
"""
获取整个基金组合的底层股票持仓总和和细节,组合穿透
:param year: 基于的基金季报年份
:param season: 基于的基金季报季度
:param date: 默认昨天
:param threhold: 默认100。小于100元的底层股票将不在最后的结果中展示
:return: pd.DataFrame column: name, code, value, ratio
"""
d = {}
if year is None or season is None:
rd = convert_date(date) - pd.Timedelta(days=120)
if not year:
year = rd.year
if not season:
season = int((rd.month - 0.1) / 3) + 1
logger.debug("use %s, %s for fund report" % (year, season))
for f in self.fundtradeobj:
if isinstance(f, itrade):
if f.get_type() == "股票":
code = f.code
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if code.startswith("SH") or code.startswith("SZ"):
stock = code
d[stock] = d.get(stock, 0) + value
elif code == "mf":
continue
else:
df = get_fund_holdings(code, year, season)
if df is None:
continue
for _, row in df.iterrows():
stock = row["code"]
stock = ttjjcode(stock)
d[stock] = d.get(stock, 0) + row["ratio"] / 100 * value
# print("%s has %s contribution from %s" %(stock, row["ratio"] / 100 * value, f.name))
l = []
for code, value in sorted(d.items(), key=lambda item: -item[1]):
if value >= threhold:
try:
name = get_rt(code)["name"]
except:
name = code
l.append([name, code, value])
fdf = pd.DataFrame(l, columns=["name", "code", "value"])
fdf["ratio"] = fdf["value"] / fdf["value"].sum()
return fdf
def get_portfolio(self, date=yesterdayobj()):
"""
获取基金组合底层资产大类配置的具体值
:param date:
:return: Dict[str, float]. stock,bond,cash 对应总值的字典
"""
d = {"stock": 0, "bond": 0, "cash": 0}
date = convert_date(date)
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
d["stock"] += value
continue
elif f.get_type() in ["可转债", "债券"]:
d["bond"] += value
continue
elif f.get_type() == "货币基金":
d["cash"] += value
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
d["cash"] += value
continue
if get_fund_type(code) == "货币基金":
d["cash"] += value
continue
df = xu.get_daily("pt-F" + code, end=date.strftime("%Y%m%d"))
if df is None or len(df) == 0:
logger.warning("empty portfolio info for %s" % code)
row = df.iloc[-1]
if row["bond_ratio"] + row["stock_ratio"] < 10: # 联接基金
d["stock"] += (
(100 - row["bond_ratio"] - row["cash_ratio"]) * value / 100
)
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
else:
d["stock"] += row["stock_ratio"] * value / 100
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
return d
get_portfolio_holdings = get_portfolio
def get_industry(self, date=yesterdayobj()):
"""
获取基金组合持仓的行业占比信息,底层为非 A 股持仓的暂不支持
:param date:
:return: Dict
"""
# TODO: hard coded 一个字典来合并一些二级行业
d = {}
date = convert_date(date)
rd = date - | pd.Timedelta(days=120) | pandas.Timedelta |
import pandas as pd
import numpy as np
import re
import json
#from tkinter.ttk import Separator
def parse_thermo_vcf(vcf,excel):
''' Les inn vcf og excel, slå de sammen til en pandas dataframe'''
df_vcf = pd.read_csv(vcf, sep="\t", comment='#', names=["CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","GT"])
df_excel = pd.read_excel(excel)
df1 = pd.DataFrame()
df2 = pd.DataFrame()
# With fusion
df_excel_w = df_excel.loc[df_excel['Type'] == 'Fusion']
if not df_excel_w.empty:
#df_excel_w.loc[:,'ID'] = df_excel_w.loc[:,'Variant ID'] + "_1"
df_excel_w = df_excel_w.assign(ID = df_excel_w.loc[:,'Variant ID'] + "_1")
df1 = | pd.merge(df_excel_w,df_vcf,on='ID',how='left') | pandas.merge |
from __future__ import absolute_import, division, print_function
import utool
import pandas as pd
import numpy as np
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[pdh]')
from ibeis.model.hots.hstypes import VEC_DIM, INTEGER_TYPE
class LazyGetter(object):
def __init__(self, getter_func):
self.getter_func = getter_func
def __getitem__(self, index):
return self.getter_func(index)
def __call__(self, index):
return self.getter_func(index)
#def lazy_getter(getter_func):
# def lazy_closure(*args):
# return getter_func(*args)
# return lazy_closure
class DataFrameProxy(object):
"""
pandas is actually really slow. This class emulates it so
I don't have to change my function calls, but without all the slowness.
"""
def __init__(self, ibs):
self.ibs = ibs
def __getitem__(self, key):
if key == 'kpts':
return LazyGetter(self.ibs.get_annot_kpts)
elif key == 'vecs':
return LazyGetter(self.ibs.get_annot_desc)
elif key == 'labels':
return LazyGetter(self.ibs.get_annot_class_labels)
@profile
def Int32Index(data, dtype=np.int32, copy=True, name=None):
return pd.Index(data, dtype=dtype, copy=copy, name=name)
if INTEGER_TYPE is np.int32:
IntIndex = Int32Index
else:
IntIndex = pd.Int64Index
@profile
def RangeIndex(size, name=None):
arr = np.arange(size, dtype=INTEGER_TYPE)
index = IntIndex(arr, copy=False, name=name)
return index
VEC_COLUMNS = RangeIndex(VEC_DIM, name='vec')
KPT_COLUMNS = pd.Index(['xpos', 'ypos', 'a', 'c', 'd', 'theta'], name='kpt')
PANDAS_TYPES = (pd.Series, pd.DataFrame, pd.Index)
@profile
def IntSeries(data, *args, **kwargs):
if 'index' not in kwargs:
index = IntIndex(np.arange(len(data), dtype=INTEGER_TYPE))
return pd.Series(data, *args, index=index, **kwargs)
else:
return pd.Series(data, *args, **kwargs)
@profile
def pandasify_dict1d(dict_, keys, val_name, series_name, dense=True):
""" Turns dict into heirarchy of series """
if dense:
key2_series = pd.Series(
{key: pd.Series(dict_.get(key, []), name=val_name,)
for key in keys},
index=keys, name=series_name)
else:
key2_series = pd.Series(
{key: pd.Series(dict_.get(key), name=val_name,)
for key in keys},
index=IntIndex(dict_.keys(), name=keys.name), name=series_name)
return key2_series
@profile
def pandasify_dict2d(dict_, keys, key2_index, columns, series_name):
""" Turns dict into heirarchy of dataframes """
item_list = [dict_[key] for key in keys]
index_list = [key2_index[key] for key in keys]
_data = {
key: pd.DataFrame(item, index=index, columns=columns,)
for key, item, index in zip(keys, item_list, index_list)
}
key2_df = pd.Series(_data, index=keys, name=series_name)
return key2_df
@profile
def pandasify_list2d(item_list, keys, columns, val_name, series_name):
""" Turns dict into heirarchy of dataframes """
index_list = [RangeIndex(len(item), name=val_name) for item in item_list]
_data = [pd.DataFrame(item, index=index, columns=columns,)
for item, index in zip(item_list, index_list)]
key2_df = pd.Series(_data, index=keys, name=series_name)
return key2_df
@profile
def ensure_values(data):
if isinstance(data, (np.ndarray, list)):
return data
elif isinstance(data, PANDAS_TYPES):
return data.values
elif isinstance(data, dict):
return np.array(list(data.values()))
else:
raise AssertionError(type(data))
@profile
def ensure_index(data):
if isinstance(data, PANDAS_TYPES):
return data.index
elif isinstance(data, dict):
return np.array(list(data.keys()))
else:
return np.arange(len(data))
#raise AssertionError(type(data))
def ensure_values_subset(data, keys):
if isinstance(data, dict):
return [data[key] for key in keys]
elif isinstance(data, PANDAS_TYPES):
return [ensure_values(item) for item in data[keys].values]
else:
raise AssertionError(type(data))
def ensure_values_scalar_subset(data, keys):
if isinstance(data, dict):
return [data[key] for key in keys]
elif isinstance(data, PANDAS_TYPES):
return [item for item in data[keys].values]
else:
raise AssertionError(type(data))
def ensure_2d_values(data):
#if not isinstance(data, PANDAS_TYPES):
# return data
data_ = ensure_values(data)
if len(data_) == 0:
return data_
else:
if isinstance(data_[0], PANDAS_TYPES):
return [item.values for item in data]
else:
raise AssertionError(type(data))
@profile
def pandasify_rvecs_list(wx_sublist, wx2_idxs_values, rvecs_list, aids_list,
fxs_list):
assert len(rvecs_list) == len(wx2_idxs_values)
assert len(rvecs_list) == len(wx_sublist)
rvecsdf_list = [
pd.DataFrame(rvecs, index=idxs, columns=VEC_COLUMNS)
for rvecs, idxs in zip(rvecs_list, wx2_idxs_values)] # 413 ms
_aids_list = [pd.Series(aids) for aids in aids_list]
wx2_rvecs = IntSeries(rvecsdf_list, index=wx_sublist, name='rvec')
wx2_aids = IntSeries(_aids_list, index=wx_sublist, name='wx2_aids')
wx2_fxs = IntSeries(fxs_list, index=wx_sublist, name='wx2_aids')
return wx2_rvecs, wx2_aids, wx2_fxs
@profile
def pandasify_agg_list(wx_sublist, aggvecs_list, aggaids_list, aggfxs_list):
"""
Example:
>>> from ibeis.model.hots.smk.pandas_helpers import *
"""
_aids_list = [IntSeries(aids, name='aids') for aids in aggaids_list]
_aggvecs_list = [ | pd.DataFrame(vecs, index=aids, columns=VEC_COLUMNS) | pandas.DataFrame |
# Import libraries
import numpy as np
import pandas as pd
import os
import os.path
import datetime as dt
# Print output to be displayed in terminal
print("\nBegin merge script.\nScanning for files.\n\nImporting files:")
# Get all .xlsx files from project root recursively
targetF = []
errFiles = ""
validFiles = ""
validCnt = 0
errCnt = 0
for root, subdirs, files in os.walk(os.curdir):
for f in files:
if f.endswith('.xlsx') and not (f == "master_table.xlsx" or f == "~$master_table.xlsx"):
try:
checkfile = open(os.path.join(root, f))
checkfile.close()
validCnt += 1
print(" " + os.path.join(root, f))
validFiles += " " + os.path.join(root, f) + "\n"
targetF.append(os.path.join(root, f))
except:
errCnt += 1
errFiles += " " + os.path.join(root, f[2:]) + "\n"
pass
# Output a log file from the merge process
if errCnt == 0:
logMsg = "Process executed at: {}\n\nFiles imported:\n".format(str(dt.datetime.now()).split('.')[0]) + validFiles + "\nNumber of files imported = {}\n\nImport successful!".format(validCnt)
log_file = open('log.txt', 'w')
elif errCnt > 0:
logMsg = "Process executed at: {}\n\nFiles imported:\n".format(str(dt.datetime.now()).split('.')[0]) + validFiles + "{} file(s) imported.\n\nWarning!!! Some files were in use during the time of import.".format(validCnt) \
+ "\nRun the script again in case there are changes from the affected files.\n\n{} file(s) affected:\n".format(errCnt) \
+ errFiles
log_file = open('log.txt', 'w')
print("\n{} file(s) imported.\nWarning!!! {} file(s) in use during runtime:\n".format(validCnt, errCnt) + \
errFiles + "There may have been changes to the affected files.")
log_file.write(logMsg)
log_file.close()
# Load all .xlsx to dataframes and concatenate into master dataframe
dataframes = [ | pd.read_excel(t) | pandas.read_excel |
"""
The dataset module uses a data connection to retrieve symbol data for strategy
simulation.
"""
import pandas as pd
from nowtrade import logger
class Dataset(object):
"""
The Dataset object utilizes the pandas DataFrame as a backend for all
the data handling.
"""
def __init__(self, symbol_list, data_connection, start_datetime=None, \
end_datetime=None, periods=None, granularity=None):
self.symbol_list = symbol_list
# Either specify a start and end date or a number of periods since now
assert periods != None or start_datetime != None
self.start_datetime = start_datetime
self.end_datetime = end_datetime
self.periods = periods
self.granularity = granularity
self.data_connection = data_connection
self.data_frame = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
# # Example Predictor: Linear Rollout Predictor
#
# This example contains basic functionality for training and evaluating a linear predictor that rolls out predictions day-by-day.
#
# First, a training data set is created from historical case and npi data.
#
# Second, a linear model is trained to predict future cases from prior case data along with prior and future npi data.
# The model is an off-the-shelf sklearn Lasso model, that uses a positive weight constraint to enforce the assumption that increased npis has a negative correlation with future cases.
#
# Third, a sample evaluation set is created, and the predictor is applied to this evaluation set to produce prediction results in the correct format.
# ## Training
# In[1]:
import pickle
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
# ### Copy the data locally
# In[2]:
# Main source for the training data
DATA_URL = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
# Local file
DATA_FILE = 'data/OxCGRT_latest.csv'
# In[3]:
import os
import urllib.request
if not os.path.exists('data'):
os.mkdir('data')
urllib.request.urlretrieve(DATA_URL, DATA_FILE)
# In[4]:
# Load historical data from local file
old_df = pd.read_csv(DATA_FILE,
parse_dates=['Date'],
encoding="ISO-8859-1",
dtype={"RegionName": str,
"RegionCode": str},
error_bad_lines=False)
# In[5]:
if not os.path.exists('data/supplement'):
os.mkdir('data/supplement')
# In[6]:
date_range = pd.date_range(old_df['Date'].min(), old_df['Date'].max(), freq='D').strftime("%m-%d-%Y")
# In[7]:
no_info_dates = []
info_dates = []
# for date in date_range:
# date_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/" + date + ".csv"
# date_file = "data/us_states/" + date + "_states.csv"
# try:
# if os.path.exists(date_file):
# os.remove(date_file)
# urllib.request.urlretrieve(date_url, date_file)
# info_dates.append(date)
# except:
# no_info_dates.append(date)
# In[8]:
state_df = pd.DataFrame(columns=["Country_Region", "Province_State", "Date", "Confirmed", "Deaths", "Recovered", "Active"])
for date in date_range:
file = "data/us_states/" + date + "_states.csv"
if os.path.exists(file):
day_df = pd.read_csv(file)
day_df["Date"] = pd.to_datetime(date)
state_df = pd.concat([state_df, day_df])
state_df = state_df.rename({'Country_Region': 'CountryName', 'Province_State': 'RegionName'}, axis=1)
state_df["CountryName"] = "United States"
# In[9]:
# low testing -> confirmed vs infected?
# In[10]:
# predict change in sir parameters from past/present NPIs & cases
# In[11]:
confirmed_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
confirmed_file = "data/jh_confirmed.csv"
# In[12]:
country_info_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
country_file = "data/jh_country_info.csv"
# In[13]:
confirmed_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
confirmed_file = "data/jh_confirmed.csv"
# In[14]:
deaths_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
deaths_file = "data/jh_deaths.csv"
# In[15]:
recovered_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
recovered_file = "data/jh_recovered.csv"
# In[16]:
urllib.request.urlretrieve(confirmed_url, confirmed_file)
urllib.request.urlretrieve(country_info_url, country_file)
urllib.request.urlretrieve(deaths_url, deaths_file)
urllib.request.urlretrieve(recovered_url, recovered_file);
# In[17]:
confirmed = pd.read_csv(confirmed_file)
deaths = pd.read_csv(deaths_file)
recovered = pd.read_csv(recovered_file)
country_info = pd.read_csv(country_file)
# In[18]:
#also https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv
# In[19]:
date_columns = confirmed.columns[4:]
# In[20]:
def get_geoid(row):
return row['CountryName'] + '__' + str(row['RegionName'])
# In[21]:
# confirmed['Country/Region'].unique()
# In[22]:
# brazil, australia
replace_text = {'Country/Region': {"US": "United States", "Taiwan*": "Taiwan", "Burma": "Myanmar", "Kyrgyzstan": "Kyrgyz Republic", "Congo (Brazzaville)": "Congo", "Congo (Kinsasha)": "Democratic Republic of Congo",
"Cabo Verde": "Cape Verde", "Korea, South": "South Korea", "Slovakia": "Slovak Republic", "Czechia": "Czech Republic"}}
agg_regions = ["Brazil", "Australia", "Canada", "China"]
def clean_jh_df(df):
cleaned_df = df.replace(replace_text)
summed_by_region = df.groupby(["Country/Region"]).agg(sum).reset_index()
cleaned_df = cleaned_df[df.apply(lambda x: x["Country/Region"] not in agg_regions, axis=1)]
aggregated_df = summed_by_region[summed_by_region.apply(lambda x: x["Country/Region"] in agg_regions, axis=1)]
cleaned_df = pd.concat([cleaned_df, aggregated_df])
return cleaned_df
# In[23]:
# us state data is in separate file!
# In[24]:
# todo: group stuff like provinces of china
def reshape_jh_df(df, value):
df = clean_jh_df(df)
info_columns = df.columns[:2]
date_columns = df.columns[4:]
reshaped = df.melt(id_vars=info_columns, value_vars=date_columns, var_name='Date', value_name=value)
reshaped['Date'] = pd.to_datetime(reshaped['Date'])
reshaped = reshaped.rename({'Country/Region': 'CountryName', 'Province/State': 'RegionName'}, axis=1)
reshaped = reshaped.sort_values(['CountryName', 'RegionName', 'Date'])
#reshaped['Daily' + value] = reshaped.groupby(['CountryName', 'RegionName'], dropna=False)['Total' + value].transform(lambda x: x.diff())
#reshaped['GeoID'] = reshaped.apply(get_geoid, axis=1)
return reshaped.reset_index(drop=True)
# In[ ]:
# In[25]:
confirmed_df = reshape_jh_df(confirmed, "Confirmed")
recovered_df = reshape_jh_df(recovered, "Recovered")
deaths_df = reshape_jh_df(deaths, "Deaths")
# In[26]:
country_info = pd.read_csv("data/Additional_Context_Data_Global.csv")
country_info["RegionName"] = np.NaN
country_info = country_info.astype({'RegionName': 'object'})
# In[27]:
merged_df = old_df.merge(confirmed_df, how='left', on=["CountryName", "RegionName", "Date"], suffixes=[None, "_jh"])
merged_df = merged_df.merge(recovered_df, how='left', on=["CountryName", "RegionName", "Date"], suffixes=[None, "_jh"])
merged_df = merged_df.merge(deaths_df, how='left', on=["CountryName", "RegionName", "Date"], suffixes=[None, "_jh"])
merged_df = merged_df.merge(country_info, how='left', on=["CountryName", "RegionName"], suffixes=[None, "_jh"])
# perhaps use region name and region code to merge?
# currently inner merge - must be left (and should fix region clashes)
# In[28]:
merged_df = merged_df.merge(state_df, how='left', on=["CountryName", "RegionName", "Date"], suffixes=[None, "_state"])
# In[29]:
def fix_states(row):
new_row = pd.Series(row,copy=True)
if new_row["CountryName"] == "United States" and pd.notna(new_row["RegionName"]):
new_row["Confirmed"] = row["Confirmed_state"]
new_row["Recovered"] = row["Recovered_state"]
new_row["Deaths"] = row["Deaths_state"]
return new_row
# In[30]:
#merged_df[(merged_df["Date"]==pd.to_datetime("2020-12-20"))&(pd.isna(merged_df["Recovered"])&(pd.isna(merged_df["Deaths"])))]
# In[31]:
merged_df = merged_df.transform(fix_states, axis=1)
# In[32]:
# drop columns where there's _no_ data from johns hopkins to see what we're missing
#merged_df = merged_df.dropna()
# In[33]:
# required_geoids - jh_geoids
# In[34]:
df = | pd.DataFrame(merged_df) | pandas.DataFrame |
import copy
import random
import numpy as np
import pandas as pd
import pytest
from scipy import sparse
import sklearn.datasets
import sklearn.model_selection
from autosklearn.data.feature_validator import FeatureValidator
# Fixtures to be used in this class. By default all elements have 100 datapoints
@pytest.fixture
def input_data_featuretest(request):
if request.param == 'numpy_categoricalonly_nonan':
return np.random.randint(10, size=(100, 10))
elif request.param == 'numpy_numericalonly_nonan':
return np.random.uniform(10, size=(100, 10))
elif request.param == 'numpy_mixed_nonan':
return np.column_stack([
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 3)),
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 1)),
])
elif request.param == 'numpy_string_nonan':
return np.array([
['a', 'b', 'c', 'a', 'b', 'c'],
['a', 'b', 'd', 'r', 'b', 'c'],
])
elif request.param == 'numpy_categoricalonly_nan':
array = np.random.randint(10, size=(100, 10)).astype('float')
array[50, 0:5] = np.nan
return array
elif request.param == 'numpy_numericalonly_nan':
array = np.random.uniform(10, size=(100, 10)).astype('float')
array[50, 0:5] = np.nan
# Somehow array is changed to dtype object after np.nan
return array.astype('float')
elif request.param == 'numpy_mixed_nan':
array = np.column_stack([
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 3)),
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 1)),
])
array[50, 0:5] = np.nan
return array
elif request.param == 'numpy_string_nan':
return np.array([
['a', 'b', 'c', 'a', 'b', 'c'],
[np.nan, 'b', 'd', 'r', 'b', 'c'],
])
elif request.param == 'pandas_categoricalonly_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='category')
elif request.param == 'pandas_numericalonly_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='float')
elif request.param == 'pandas_mixed_nonan':
frame = pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='category')
frame['B'] = pd.to_numeric(frame['B'])
return frame
elif request.param == 'pandas_categoricalonly_nan':
return pd.DataFrame([
{'A': 1, 'B': 2, 'C': np.nan},
{'A': 3, 'C': np.nan},
], dtype='category')
elif request.param == 'pandas_numericalonly_nan':
return pd.DataFrame([
{'A': 1, 'B': 2, 'C': np.nan},
{'A': 3, 'C': np.nan},
], dtype='float')
elif request.param == 'pandas_mixed_nan':
frame = pd.DataFrame([
{'A': 1, 'B': 2, 'C': 8},
{'A': 3, 'B': 4},
], dtype='category')
frame['B'] = pd.to_numeric(frame['B'])
return frame
elif request.param == 'pandas_string_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='string')
elif request.param == 'list_categoricalonly_nonan':
return [
['a', 'b', 'c', 'd'],
['e', 'f', 'c', 'd'],
]
elif request.param == 'list_numericalonly_nonan':
return [
[1, 2, 3, 4],
[5, 6, 7, 8]
]
elif request.param == 'list_mixed_nonan':
return [
['a', 2, 3, 4],
['b', 6, 7, 8]
]
elif request.param == 'list_categoricalonly_nan':
return [
['a', 'b', 'c', np.nan],
['e', 'f', 'c', 'd'],
]
elif request.param == 'list_numericalonly_nan':
return [
[1, 2, 3, np.nan],
[5, 6, 7, 8]
]
elif request.param == 'list_mixed_nan':
return [
['a', np.nan, 3, 4],
['b', 6, 7, 8]
]
elif 'sparse' in request.param:
# We expect the names to be of the type sparse_csc_nonan
sparse_, type_, nan_ = request.param.split('_')
if 'nonan' in nan_:
data = np.ones(3)
else:
data = np.array([1, 2, np.nan])
# Then the type of sparse
row_ind = np.array([0, 1, 2])
col_ind = np.array([1, 2, 1])
if 'csc' in type_:
return sparse.csc_matrix((data, (row_ind, col_ind)))
elif 'csr' in type_:
return sparse.csr_matrix((data, (row_ind, col_ind)))
elif 'coo' in type_:
return sparse.coo_matrix((data, (row_ind, col_ind)))
elif 'bsr' in type_:
return sparse.bsr_matrix((data, (row_ind, col_ind)))
elif 'lil' in type_:
return sparse.lil_matrix((data))
elif 'dok' in type_:
return sparse.dok_matrix(np.vstack((data, data, data)))
elif 'dia' in type_:
return sparse.dia_matrix(np.vstack((data, data, data)))
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
elif 'openml' in request.param:
_, openml_id = request.param.split('_')
X, y = sklearn.datasets.fetch_openml(data_id=int(openml_id),
return_X_y=True, as_frame=True)
return X
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
# Actual checks for the features
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_categoricalonly_nonan',
'numpy_numericalonly_nonan',
'numpy_mixed_nonan',
'numpy_categoricalonly_nan',
'numpy_numericalonly_nan',
'numpy_mixed_nan',
'pandas_categoricalonly_nonan',
'pandas_numericalonly_nonan',
'pandas_mixed_nonan',
'pandas_numericalonly_nan',
'list_numericalonly_nonan',
'list_numericalonly_nan',
'sparse_bsr_nonan',
'sparse_bsr_nan',
'sparse_coo_nonan',
'sparse_coo_nan',
'sparse_csc_nonan',
'sparse_csc_nan',
'sparse_csr_nonan',
'sparse_csr_nan',
'sparse_dia_nonan',
'sparse_dia_nan',
'sparse_dok_nonan',
'sparse_dok_nan',
'sparse_lil_nonan',
'sparse_lil_nan',
'openml_40981', # Australian
),
indirect=True
)
def test_featurevalidator_supported_types(input_data_featuretest):
validator = FeatureValidator()
validator.fit(input_data_featuretest, input_data_featuretest)
transformed_X = validator.transform(input_data_featuretest)
if sparse.issparse(input_data_featuretest):
assert sparse.issparse(transformed_X)
else:
assert isinstance(transformed_X, np.ndarray)
assert np.shape(input_data_featuretest) == np.shape(transformed_X)
assert np.issubdtype(transformed_X.dtype, np.number)
assert validator._is_fitted
@pytest.mark.parametrize(
'input_data_featuretest',
(
'list_categoricalonly_nonan',
'list_categoricalonly_nan',
'list_mixed_nonan',
'list_mixed_nan',
),
indirect=True
)
def test_featurevalidator_unsupported_list(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r".*has invalid type object. Cast it to a valid dtype.*"):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_string_nonan',
'numpy_string_nan',
),
indirect=True
)
def test_featurevalidator_unsupported_numpy(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r".*When providing a numpy array.*not supported."):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'pandas_categoricalonly_nan',
'pandas_mixed_nan',
'openml_179', # adult workclass has NaN in columns
),
indirect=True
)
def test_featurevalidator_unsupported_pandas(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r"Categorical features in a dataframe.*missing/NaN"):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_categoricalonly_nonan',
'numpy_mixed_nonan',
'numpy_categoricalonly_nan',
'numpy_mixed_nan',
'pandas_categoricalonly_nonan',
'pandas_mixed_nonan',
'list_numericalonly_nonan',
'list_numericalonly_nan',
'sparse_bsr_nonan',
'sparse_bsr_nan',
'sparse_coo_nonan',
'sparse_coo_nan',
'sparse_csc_nonan',
'sparse_csc_nan',
'sparse_csr_nonan',
'sparse_csr_nan',
'sparse_dia_nonan',
'sparse_dia_nan',
'sparse_dok_nonan',
'sparse_dok_nan',
'sparse_lil_nonan',
),
indirect=True
)
def test_featurevalidator_fitontypeA_transformtypeB(input_data_featuretest):
"""
Check if we can fit in a given type (numpy) yet transform
if the user changes the type (pandas then)
This is problematic only in the case we create an encoder
"""
validator = FeatureValidator()
validator.fit(input_data_featuretest, input_data_featuretest)
if isinstance(input_data_featuretest, pd.DataFrame):
complementary_type = input_data_featuretest.to_numpy()
elif isinstance(input_data_featuretest, np.ndarray):
complementary_type = pd.DataFrame(input_data_featuretest)
elif isinstance(input_data_featuretest, list):
complementary_type = pd.DataFrame(input_data_featuretest)
elif sparse.issparse(input_data_featuretest):
complementary_type = sparse.csr_matrix(input_data_featuretest.todense())
else:
raise ValueError(type(input_data_featuretest))
transformed_X = validator.transform(complementary_type)
assert np.shape(input_data_featuretest) == np.shape(transformed_X)
assert np.issubdtype(transformed_X.dtype, np.number)
assert validator._is_fitted
def test_featurevalidator_get_columns_to_encode():
"""
Makes sure that encoded columns are returned by _get_columns_to_encode
whereas numerical columns are not returned
"""
validator = FeatureValidator()
df = pd.DataFrame([
{'int': 1, 'float': 1.0, 'category': 'one', 'bool': True},
{'int': 2, 'float': 2.0, 'category': 'two', 'bool': False},
])
for col in df.columns:
df[col] = df[col].astype(col)
enc_columns, feature_types = validator._get_columns_to_encode(df)
assert enc_columns == ['category', 'bool']
assert feature_types == ['numerical', 'numerical', 'categorical', 'categorical']
def test_features_unsupported_calls_are_raised():
"""
Makes sure we raise a proper message to the user,
when providing not supported data input or using the validator in a way that is not
expected
"""
validator = FeatureValidator()
with pytest.raises(ValueError, match=r"Auto-sklearn does not support time"):
validator.fit(
pd.DataFrame({'datetime': [pd.Timestamp('20180310')]})
)
with pytest.raises(ValueError, match="has invalid type object"):
validator.fit(
pd.DataFrame({'string': ['foo']})
)
with pytest.raises(ValueError, match=r"Auto-sklearn only supports.*yet, the provided input"):
validator.fit({'input1': 1, 'input2': 2})
with pytest.raises(ValueError, match=r"has unsupported dtype string"):
validator.fit(pd.DataFrame([{'A': 1, 'B': 2}], dtype='string'))
with pytest.raises(ValueError, match=r"The feature dimensionality of the train and test"):
validator.fit(X_train=np.array([[1, 2, 3], [4, 5, 6]]),
X_test=np.array([[1, 2, 3, 4], [4, 5, 6, 7]]),
)
with pytest.raises(ValueError, match=r"Cannot call transform on a validator that is not fit"):
validator.transform(np.array([[1, 2, 3], [4, 5, 6]]))
validator.feat_type = ['Numerical']
with pytest.raises(ValueError, match=r"providing the option feat_type to the fit method is.*"):
validator.fit( | pd.DataFrame([[1, 2, 3], [4, 5, 6]]) | pandas.DataFrame |
# Implementing KNN
import pandas as pd
import numpy as np
from scipy import spatial
import operator
r_cols = ['user_id', 'movie_id', 'rating']
ratings = pd.read_csv('C:/Users/<NAME>/Desktop/Machine Learning/DataScience-Python3/ml-100k/u.data', sep='\t', names=r_cols, usecols = range(3))
#print(ratings.head())
# compute total number of ratings and average rating for every movie
movieProperties = ratings.groupby('movie_id').agg({'rating':[np.size, np.mean]})
#print(movieProperties.head())
# normalizing number of ratings in the scale from 0 - 1
movieNumRatings = | pd.DataFrame(movieProperties['rating']['size']) | pandas.DataFrame |
import itertools
import numpy as np
import pandas as pd
def entropy_cumulative_residual(signal):
"""Cumulative residual entropy (CREn)
The cumulative residual entropy is an alternative to the Shannon
differential entropy with several advantageous properties, such as non-negativity.
The implementation is based on
`dit` <https://github.com/dit/dit/blob/master/dit/other/cumulative_residual_entropy.py>_.
This function can be called either via ``entropy_cumulative_residual()`` or ``complexity_cren()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
CREn : float
The cumulative residual entropy.
info : dict
A dictionary containing 'Values' for each pair of events.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = [1, 2, 3, 4, 5, 6]
>>> cren, info = nk.entropy_cumulative_residual(signal)
>>> cren #doctest: +SKIP
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Check if string ('ABBA'), and convert each character to list (['A', 'B', 'B', 'A'])
if not isinstance(signal, str):
signal = list(signal)
# Get probability of each event
valscount = | pd.Series(signal) | pandas.Series |
import abc
import warnings
from collections import OrderedDict
import os
from typing import Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import squidpy as sq
from anndata import AnnData, read_h5ad
from diffxpy.testing.correction import correct
from matplotlib.ticker import FormatStrFormatter
from matplotlib.tri import Triangulation
from omnipath.interactions import import_intercell_network
from pandas import read_csv, read_excel, DataFrame
from scipy import sparse, stats
from tqdm import tqdm
class GraphTools:
"""GraphTools class."""
celldata: AnnData
img_celldata: Dict[str, AnnData]
def compute_adjacency_matrices(
self, radius: int, coord_type: str = 'generic', n_rings: int = 1, transform: str = None
):
"""Compute adjacency matrix for each image in dataset (uses `squidpy.gr.spatial_neighbors`).
Parameters
----------
radius : int
Radius of neighbors for non-grid data.
coord_type : str
Type of coordinate system.
n_rings : int
Number of rings of neighbors for grid data.
transform : str
Type of adjacency matrix transform. Valid options are:
- `spectral` - spectral transformation of the adjacency matrix.
- `cosine` - cosine transformation of the adjacency matrix.
- `None` - no transformation of the adjacency matrix.
"""
pbar_total = len(self.img_celldata.keys())
with tqdm(total=pbar_total) as pbar:
for _k, adata in self.img_celldata.items():
sq.gr.spatial_neighbors(
adata=adata,
coord_type=coord_type,
radius=radius,
n_rings=n_rings,
transform=transform,
key_added="adjacency_matrix"
)
pbar.update(1)
@staticmethod
def _transform_a(a):
"""Compute degree transformation of adjacency matrix.
Computes D^(-1) * (A+I), with A an adjacency matrix, I the identity matrix and D the degree matrix.
Parameters
----------
a
sparse adjacency matrix.
Returns
-------
degree transformed sparse adjacency matrix
"""
warnings.filterwarnings("ignore", message="divide by zero encountered in true_divide")
degrees = 1 / a.sum(axis=0)
degrees[a.sum(axis=0) == 0] = 0
degrees = np.squeeze(np.asarray(degrees))
deg_matrix = sparse.diags(degrees)
a_out = deg_matrix * a
return a_out
def _transform_all_a(self, a_dict: dict):
"""Compute degree transformation for dictionary of adjacency matrices.
Computes D^(-1) * (A+I), with A an adjacency matrix, I the identity matrix and D the degree matrix for all
matrices in a dictionary.
Parameters
----------
a_dict : dict
a_dict
Returns
-------
dictionary of degree transformed sparse adjacency matrices
"""
a_transformed = {i: self._transform_a(a) for i, a in a_dict.items()}
return a_transformed
@staticmethod
def _compute_distance_matrix(pos_matrix):
"""Compute distance matrix.
Parameters
----------
pos_matrix
Position matrix.
Returns
-------
distance matrix
"""
diff = pos_matrix[:, :, None] - pos_matrix[:, :, None].T
return (diff * diff).sum(1)
def _get_degrees(self, max_distances: list):
"""Get dgrees.
Parameters
----------
max_distances : list
List of maximal distances.
Returns
-------
degrees
"""
degs = {}
degrees = {}
for i, adata in self.img_celldata.items():
pm = np.array(adata.obsm["spatial"])
dist_matrix = self._compute_distance_matrix(pm)
degs[i] = {dist: np.sum(dist_matrix < dist * dist, axis=0) for dist in max_distances}
for dist in max_distances:
degrees[dist] = [deg[dist] for deg in degs.values()]
return degrees
def plot_degree_vs_dist(
self,
degree_matrices: Optional[list] = None,
max_distances: Optional[list] = None,
lateral_resolution: float = 1.0,
save: Optional[str] = None,
suffix: str = "_degree_vs_dist.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plot degree versus distances.
Parameters
----------
degree_matrices : list, optional
List of degree matrices
max_distances : list, optional
List of maximal distances.
lateral_resolution : float
Lateral resolution
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis if `return_axs` is True.
Raises
------
ValueError
If `degree_matrices` and `max_distances` are `None`.
"""
if degree_matrices is None:
if max_distances is None:
raise ValueError("Provide either distance matrices or distance values!")
else:
degree_matrices = self._get_degrees(max_distances)
plt.ioff()
fig = plt.figure(figsize=(4, 3))
mean_degree = []
distances = []
for dist, degrees in degree_matrices.items():
mean_d = [np.mean(degree) for degree in degrees]
print(np.mean(mean_d))
mean_degree += mean_d
distances += [np.int(dist * lateral_resolution)] * len(mean_d)
sns_data = pd.DataFrame(
{
"dist": distances,
"mean_degree": mean_degree,
}
)
ax = fig.add_subplot(111)
sns.boxplot(data=sns_data, x="dist", color="steelblue", y="mean_degree", ax=ax)
ax.set_yscale("log", basey=10)
ax.grid(False)
plt.ylabel("")
plt.xlabel("")
plt.xticks(rotation=90)
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
class PlottingTools:
"""PlottingTools class."""
celldata: AnnData
img_celldata: Dict[str, AnnData]
def celldata_interaction_matrix(
self,
fontsize: Optional[int] = None,
figsize: Tuple[float, float] = (5, 5),
title: Optional[str] = None,
save: Optional[str] = None,
suffix: str = "_celldata_interaction_matrix.pdf",
):
"""Compute and plot interaction matrix of celldata.
The interaction matrix is computed by `squidpy.gr.interaction_matrix()`.
Parameters
----------
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
title : str, optional
Figure title.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
"""
interaction_matrix = []
cluster_key = self.celldata.uns["metadata"]["cluster_col_preprocessed"]
with tqdm(total=len(self.img_celldata.keys())) as pbar:
for adata in self.img_celldata.values():
im = sq.gr.interaction_matrix(
adata, cluster_key=cluster_key, connectivity_key="adjacency_matrix", normalized=False, copy=True
)
im = pd.DataFrame(
im, columns=list(np.unique(adata.obs[cluster_key])), index=list(np.unique(adata.obs[cluster_key]))
)
interaction_matrix.append(im)
pbar.update(1)
df_concat = pd.concat(interaction_matrix)
by_row_index = df_concat.groupby(df_concat.index)
df_means = by_row_index.sum().sort_index(axis=1)
interactions = np.array(df_means).T
self.celldata.uns[f"{cluster_key}_interactions"] = interactions/np.sum(interactions, axis=1)[:, np.newaxis]
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
if save:
save = save + suffix
sq.pl.interaction_matrix(
self.celldata,
cluster_key=cluster_key,
connectivity_key="adjacency_matrix",
figsize=figsize,
title=title,
save=save,
)
def celldata_nhood_enrichment(
self,
fontsize: Optional[int] = None,
figsize: Tuple[float, float] = (5, 5),
title: Optional[str] = None,
save: Optional[str] = None,
suffix: str = "_celldata_nhood_enrichment.pdf",
):
"""Compute and plot neighbourhood enrichment of celldata.
The enrichment is computed by `squidpy.gr.nhood_enrichment()`.
Parameters
----------
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
title : str, optional
Figure title.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
"""
zscores = []
counts = []
cluster_key = self.celldata.uns["metadata"]["cluster_col_preprocessed"]
with tqdm(total=len(self.img_celldata.keys())) as pbar:
for adata in self.img_celldata.values():
im = sq.gr.nhood_enrichment(
adata,
cluster_key=cluster_key,
connectivity_key="adjacency_matrix",
copy=True,
show_progress_bar=False,
)
zscore = pd.DataFrame(
im[0],
columns=list(np.unique(adata.obs[cluster_key])),
index=list(np.unique(adata.obs[cluster_key])),
)
count = pd.DataFrame(
im[1],
columns=list(np.unique(adata.obs[cluster_key])),
index=list(np.unique(adata.obs[cluster_key])),
)
zscores.append(zscore)
counts.append(count)
pbar.update(1)
df_zscores = pd.concat(zscores)
by_row_index = df_zscores.groupby(df_zscores.index)
df_zscores = by_row_index.mean().sort_index(axis=1)
df_counts = pd.concat(counts)
by_row_index = df_counts.groupby(df_counts.index)
df_counts = by_row_index.sum().sort_index(axis=1)
self.celldata.uns[f"{cluster_key}_nhood_enrichment"] = {
"zscore": np.array(df_zscores).T,
"count": np.array(df_counts).T,
}
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
if save:
save = save + suffix
sq.pl.nhood_enrichment(
self.celldata,
cluster_key=cluster_key,
connectivity_key="adjacency_matrix",
figsize=figsize,
title=title,
save=save,
)
def celltype_frequencies(
self,
figsize: Tuple[float, float] = (5.0, 6.0),
fontsize: Optional[int] = None,
save: Optional[str] = None,
suffix: str = "_noise_structure.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plot cell type frequencies from celldata on the complete dataset.
Parameters
----------
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis
If `return_axs` is True.
"""
plt.ioff()
cluster_id = self.celldata.uns["metadata"]["cluster_col_preprocessed"]
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
sns.barplot(
y=self.celldata.obs[cluster_id].value_counts().index,
x=list(self.celldata.obs[cluster_id].value_counts()),
color="steelblue",
ax=ax,
)
ax.grid(False)
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
def noise_structure(
self,
undefined_type: Optional[str] = None,
merge_types: Optional[Tuple[list, list]] = None,
min_x: Optional[float] = None,
max_x: Optional[float] = None,
panelsize: Tuple[float, float] = (2.0, 2.3),
fontsize: Optional[int] = None,
save: Optional[str] = None,
suffix: str = "_noise_structure.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plot cell type frequencies grouped by cell type.
Parameters
----------
undefined_type : str, optional
Undefined cell type.
merge_types : tuple, optional
Merge cell types.
min_x : float, optional
Minimal x value.
max_x : float, optional
Maximal x value.
fontsize : int, optional
Font size.
panelsize : tuple
Panel size.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis
If `return_axs` is True.
"""
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
feature_mat = pd.concat(
[
pd.concat(
[
pd.DataFrame(
{
"image": [k for _i in range(adata.shape[0])],
}
),
pd.DataFrame(adata.X, columns=list(adata.var_names)),
pd.DataFrame(
np.asarray(list(adata.uns["node_type_names"].values()))[
np.argmax(adata.obsm["node_types"], axis=1)
],
columns=["cell_type"],
),
],
axis=1,
).melt(value_name="expression", var_name="gene", id_vars=["cell_type", "image"])
for k, adata in self.img_celldata.items()
]
)
feature_mat["log_expression"] = np.log(feature_mat["expression"].values + 1)
if undefined_type:
feature_mat = feature_mat[feature_mat["cell_type"] != undefined_type]
if merge_types:
for mt in merge_types[0]:
feature_mat = feature_mat.replace(mt, merge_types[-1])
plt.ioff()
ct = np.unique(feature_mat["cell_type"].values)
nrows = len(ct) // 12 + int(len(ct) % 12 > 0)
fig, ax = plt.subplots(
ncols=12, nrows=nrows, figsize=(12 * panelsize[0], nrows * panelsize[1]), sharex="all", sharey="all"
)
ax = ax.flat
for axis in ax[len(ct) :]:
axis.remove()
for i, ci in enumerate(ct):
tab = feature_mat.loc[feature_mat["cell_type"].values == ci, :]
x = np.log(tab.groupby(["gene"])["expression"].mean() + 1)
y = np.log(tab.groupby(["gene"])["expression"].var() + 1)
sns.scatterplot(x=x, y=y, ax=ax[i])
min_x = np.min(x) if min_x is None else min_x
max_x = np.max(x) if max_x is None else max_x
sns.lineplot(x=[min_x, max_x], y=[2 * min_x, 2 * max_x], color="black", ax=ax[i])
ax[i].grid(False)
ax[i].set_title(ci, fontsize=fontsize)
ax[i].set_xlabel("")
ax[i].set_ylabel("")
ax[i].yaxis.set_major_formatter(FormatStrFormatter("%0.1f"))
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
def umap(
self,
image_key: str,
target_cell_type: Optional[str] = None,
undefined_type: Optional[str] = None,
n_neighbors: int = 15,
n_pcs: Optional[int] = None,
figsize: Tuple[float, float] = (4.0, 4.0),
fontsize: Optional[int] = None,
size: Optional[int] = None,
palette: Optional[str] = None,
save: Union[str, None] = None,
suffix: str = "_umap.pdf",
show: bool = True,
copy: bool = True,
):
"""Plot the umap for one image and optionally for a specific target cell type.
Parameters
----------
image_key : str
Image key.
target_cell_type : str, optional
Target cell type.
undefined_type : str, optional
Undefined cell type.
n_neighbors : int
The size of local neighborhood (in terms of number of neighboring data points) used for manifold
approximation. Larger values result in more global views of the manifold, while smaller values result in
more local data being preserved. In general values should be in the range 2 to 100.
n_pcs : int, optional
Use this many PCs.
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
size : int, optional
Point size. If `None`, is automatically computed as 120000 / n_cells.
palette : str, optional
Colors to use for plotting categorical annotation groups. The palette can be a valid `ListedColormap`
name (`'Set2'`, `'tab20'`, …). If `None`, `mpl.rcParams["axes.prop_cycle"]` is used unless the categorical
variable already has colors stored in `adata.uns["{var}_colors"]`. If provided, values of
`adata.uns["{var}_colors"]` will be set.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
copy : bool
Whether to return a copy of the AnnaData object.
Returns
-------
AnnData
If `copy` is True.
"""
temp_adata = self.img_celldata[image_key].copy()
cluster_id = temp_adata.uns["metadata"]["cluster_col_preprocessed"]
if undefined_type:
temp_adata = temp_adata[temp_adata.obs[cluster_id] != undefined_type]
if target_cell_type:
temp_adata = temp_adata[temp_adata.obs[cluster_id] == target_cell_type]
sc.pp.neighbors(temp_adata, n_neighbors=n_neighbors, n_pcs=n_pcs)
sc.tl.louvain(temp_adata)
sc.tl.umap(temp_adata)
print("n cells: ", temp_adata.shape[0])
if target_cell_type:
temp_adata.obs[f"{target_cell_type} substates"] = (
target_cell_type + " " + temp_adata.obs.louvain.astype(str)
)
temp_adata.obs[f"{target_cell_type} substates"] = temp_adata.obs[f"{target_cell_type} substates"].astype(
"category"
)
print(temp_adata.obs[f"{target_cell_type} substates"].value_counts())
color = [f"{target_cell_type} substates"]
else:
color = [cluster_id]
plt.ioff()
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(
nrows=1,
ncols=1,
figsize=figsize,
)
sc.pl.umap(temp_adata, color=color, ax=ax, show=False, size=size, palette=palette, title="")
# Save, show and return figure.
if save is not None:
plt.savefig(save + image_key + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if copy:
return temp_adata.copy()
def spatial(
self,
image_key: str,
undefined_type: Optional[str] = None,
figsize: Tuple[float, float] = (7.0, 7.0),
spot_size: int = 30,
fontsize: Optional[int] = None,
legend_loc: str = "right margin",
save: Union[str, None] = None,
suffix: str = "_spatial.pdf",
clean_view: bool = False,
show: bool = True,
copy: bool = True,
):
"""Plot spatial allocation of cells of one image for all cell types.
Parameters
----------
image_key : str
Image key.
undefined_type : str, optional
Undefined cell type.
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
spot_size : int
Diameter of spot (in coordinate space) for each point. Diameter in pixels of the spots will be
`size * spot_size * scale_factor`. This argument is required if it cannot be resolved from library info.
legend_loc : str
Location of legend, either `'on data'`, `'right margin'` or a valid keyword for the loc parameter of Legend.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
clean_view : bool
Whether to show cleaned view.
copy : bool
Whether to return a copy of the AnnaData object.
Returns
-------
AnnData
If `copy` is True.
"""
temp_adata = self.img_celldata[image_key].copy()
cluster_id = temp_adata.uns["metadata"]["cluster_col_preprocessed"]
if undefined_type:
temp_adata = temp_adata[temp_adata.obs[cluster_id] != undefined_type]
if clean_view:
temp_adata = temp_adata[np.argwhere(np.array(temp_adata.obsm["spatial"])[:, 1] < 0).squeeze()]
plt.ioff()
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
sc.pl.spatial(
temp_adata, color=cluster_id, spot_size=spot_size, legend_loc=legend_loc, ax=ax, show=False, title=""
)
ax.set_xlabel("")
ax.set_ylabel("")
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + image_key + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if copy:
return temp_adata
def compute_cluster_enrichment(
self,
image_key: list,
target_cell_type: str,
undefined_type: Optional[str] = None,
filter_titles: Optional[List[str]] = None,
n_neighbors: Optional[int] = None,
n_pcs: Optional[int] = None,
clip_pvalues: Optional[int] = -5,
):
"""Compute cluster enrichment for one image and one target cell type.
Parameters
----------
image_key : list
Image key.
target_cell_type : str
Target cell type.
undefined_type : str, optional
Undefined cell type.
filter_titles : list, optional
Filter certain titles.
n_neighbors : int
The size of local neighborhood (in terms of number of neighboring data points) used for manifold
approximation. Larger values result in more global views of the manifold, while smaller values result in
more local data being preserved. In general values should be in the range 2 to 100.
n_pcs : int, optional
Use this many PCs.
clip_pvalues : int, optional
Clipping value for p-values.
Returns
-------
adata, adata_substates, log_pval, fold_change
"""
titles = list(self.celldata.uns["node_type_names"].values())
sorce_type_names = [f"source type {x.replace('_', ' ')}" for x in titles]
pbar_total = len(self.img_celldata.keys()) + len(self.img_celldata.keys()) + len(titles)
with tqdm(total=pbar_total) as pbar:
for adata in self.img_celldata.values():
source_type = np.matmul(
np.asarray(adata.obsp["adjacency_matrix_connectivities"].todense() > 0, dtype="int"),
adata.obsm["node_types"],
)
source_type = (
pd.DataFrame((source_type > 0).astype(str), columns=sorce_type_names)
.replace({"True": "in neighbourhood", "False": "not in neighbourhood"}, regex=True)
.astype("category")
)
for col in source_type.columns:
adata.obs[col] = list(source_type[col])
adata.obs[col] = adata.obs[col].astype("category")
pbar.update(1)
pbar.update(1)
adata_list = list(self.img_celldata.values())
adata = adata_list[0].concatenate(adata_list[1:], uns_merge="same")
cluster_col = self.celldata.uns["metadata"]["cluster_col_preprocessed"]
image_col = self.celldata.uns["metadata"]["image_col"]
if undefined_type:
adata = adata[adata.obs[cluster_col] != undefined_type]
adata_substates = adata[
(adata.obs[cluster_col] == target_cell_type) & (adata.obs[image_col].isin(image_key))
]
sc.pp.neighbors(adata_substates, n_neighbors=n_neighbors, n_pcs=n_pcs)
sc.tl.louvain(adata_substates)
sc.tl.umap(adata_substates)
adata_substates.obs[
f"{target_cell_type} substates"
] = f"{target_cell_type} " + adata_substates.obs.louvain.astype(str)
adata_substates.obs[f"{target_cell_type} substates"] = adata_substates.obs[
f"{target_cell_type} substates"
].astype("category")
one_hot = pd.get_dummies(adata_substates.obs.louvain, dtype=np.bool)
# Join the encoded df
df = adata_substates.obs.join(one_hot)
distinct_louvain = len(np.unique(adata_substates.obs.louvain))
pval_source_type = []
for st in titles:
pval_cluster = []
for j in range(distinct_louvain):
crosstab = np.array(pd.crosstab(df[f"source type {st}"], df[str(j)]))
if crosstab.shape[0] < 2:
crosstab = np.vstack([crosstab, [0, 0]])
oddsratio, pvalue = stats.fisher_exact(crosstab)
pvalue = correct(np.array([pvalue]))
pval_cluster.append(pvalue)
pval_source_type.append(pval_cluster)
pbar.update(1)
print("n cells: ", adata_substates.shape[0])
substate_counts = adata_substates.obs[f"{target_cell_type} substates"].value_counts()
print(substate_counts)
columns = [f"{target_cell_type} {x}" for x in np.unique(adata_substates.obs.louvain)]
pval = pd.DataFrame(
np.array(pval_source_type).squeeze(), index=[x.replace("_", " ") for x in titles], columns=columns
)
log_pval = np.log10(pval)
if filter_titles:
log_pval = log_pval.sort_values(columns, ascending=True).filter(items=filter_titles, axis=0)
if clip_pvalues:
log_pval[log_pval < clip_pvalues] = clip_pvalues
fold_change_df = adata_substates.obs[[cluster_col, f"{target_cell_type} substates"] + sorce_type_names]
counts = pd.pivot_table(
fold_change_df.replace({"in neighbourhood": 1, "not in neighbourhood": 0}),
index=[f"{target_cell_type} substates"],
aggfunc=np.sum,
margins=True,
).T
counts["new_index"] = [x.replace("source type ", "") for x in counts.index]
counts = counts.set_index("new_index")
fold_change = counts.loc[:, columns].div(np.array(substate_counts), axis=1)
fold_change = fold_change.subtract(np.array(counts["All"] / adata_substates.shape[0]), axis=0)
if filter_titles:
fold_change = fold_change.fillna(0).filter(items=filter_titles, axis=0)
return adata.copy(), adata_substates.copy(), log_pval, fold_change
def cluster_enrichment(
self,
pvalues,
fold_change,
figsize: Tuple[float, float] = (4.0, 10.0),
fontsize: Optional[int] = None,
pad: float = 0.15,
pvalues_cmap=None,
linspace: Optional[Tuple[float, float, int]] = None,
save: Union[str, None] = None,
suffix: str = "_cluster_enrichment.pdf",
show: bool = True,
):
"""Plot cluster enrichment (uses the p-values and fold change computed by `compute_cluster_enrichment()`).
Parameters
----------
pvalues
P-values.
fold_change
Fold change.
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
pad : float
Pad.
pvalues_cmap : tuple, optional
Cmap of p-values.
linspace : tuple, optional
Linspace.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
"""
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
m = pvalues.shape[1]
n = pvalues.shape[0]
y = np.arange(n + 1)
x = np.arange(m + 1)
xs, ys = np.meshgrid(x, y)
triangles1 = [(i + j * (m + 1), i + 1 + j * (m + 1), i + (j + 1) * (m + 1)) for j in range(n) for i in range(m)]
triangles2 = [
(i + 1 + j * (m + 1), i + 1 + (j + 1) * (m + 1), i + (j + 1) * (m + 1)) for j in range(n) for i in range(m)
]
triang1 = Triangulation(xs.ravel() - 0.5, ys.ravel() - 0.5, triangles1)
triang2 = Triangulation(xs.ravel() - 0.5, ys.ravel() - 0.5, triangles2)
if not pvalues_cmap:
pvalues_cmap = plt.get_cmap("Greys_r")
img1 = plt.tripcolor(
triang1,
np.array(pvalues).ravel(),
cmap=pvalues_cmap,
)
img2 = plt.tripcolor(
triang2, np.array(fold_change).ravel(), cmap=plt.get_cmap("seismic"), norm=MidpointNormalize(midpoint=0.0)
)
if linspace:
ticks = np.linspace(linspace[0], linspace[1], linspace[2], endpoint=True)
plt.colorbar(
img2,
ticks=ticks,
pad=pad,
orientation="horizontal",
).set_label("fold change")
else:
plt.colorbar(
img2,
pad=pad,
orientation="horizontal",
).set_label("fold change")
plt.colorbar(
img1,
).set_label("$log_{10}$ FDR-corrected pvalues")
plt.xlim(x[0] - 0.5, x[-1] - 0.5)
plt.ylim(y[0] - 0.5, y[-1] - 0.5)
plt.yticks(y[:-1])
plt.xticks(x[:-1])
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_yticklabels(list(pvalues.index))
ax.set_xticklabels(list(pvalues.columns), rotation=90)
# Save, show and return figure.
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
@staticmethod
def umaps_cluster_enrichment(
adata: AnnData,
filter_titles: list,
nrows: int = 4,
ncols: int = 5,
size: Optional[int] = None,
figsize: Tuple[float, float] = (18, 12),
fontsize: Optional[int] = None,
save: Union[str, None] = None,
suffix: str = "_cluster_enrichment_umaps.pdf",
show: bool = True,
):
"""Plot cluster enrichment.
Uses the AnnData object from `compute_cluster_enrichment()`.
Parameters
----------
adata : AnnData
Annotated data object.
filter_titles : list
Filter certain titles.
nrows : int
Number of rows in grid.
ncols : int
Number of columns in grid.
figsize : tuple
Figure size.
fontsize : int, optional
Font size.
size : int, optional
Point size. If `None`, is automatically computed as 120000 / n_cells.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
"""
for x in filter_titles:
adata.uns[f"source type {x}_colors"] = ["darkgreen", "lightgrey"]
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
plt.ioff()
fig, axs = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=figsize,
)
n = len(filter_titles)
axs = axs.flat
for ax in axs[n:]:
ax.remove()
ax = axs[:n]
for i, x in enumerate(filter_titles[:-1]):
sc.pl.umap(adata, color=f"source type {x}", title=x, show=False, size=size, legend_loc="None", ax=ax[i])
ax[i].set_xlabel("")
ax[i].set_ylabel("")
sc.pl.umap(
adata,
color=f"source type {filter_titles[-1]}",
title=filter_titles[-1],
size=size,
show=False,
ax=ax[n - 1],
)
ax[n - 1].set_xlabel("")
ax[n - 1].set_ylabel("")
# Save, show and return figure.
# plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
def spatial_substates(
self,
adata_substates: AnnData,
image_key: str,
target_cell_type: str,
clean_view: bool = False,
figsize: Tuple[float, float] = (7.0, 7.0),
spot_size: int = 40,
fontsize: Optional[int] = None,
legend_loc: str = "right margin",
palette: Union[str, list] = "tab10",
save: Union[str, None] = None,
suffix: str = "_spatial_substates.pdf",
show: bool = True,
copy: bool = False,
):
"""Plot spatial allocation of cells.
Parameters
----------
adata_substates : AnnData
AnnData substates object.
image_key : str
Image key.
target_cell_type : str
Target cell type.
fontsize : int, optional
Font size.
figsize : tuple
Figure size.
spot_size : int
Diameter of spot (in coordinate space) for each point. Diameter in pixels of the spots will be
`size * spot_size * scale_factor`. This argument is required if it cannot be resolved from library info.
legend_loc : str
Location of legend, either `'on data'`, `'right margin'` or a valid keyword for the loc parameter of Legend.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
palette : str, optional
Colors to use for plotting categorical annotation groups. The palette can be a valid `ListedColormap`
name (`'Set2'`, `'tab20'`, …). If `None`, `mpl.rcParams["axes.prop_cycle"]` is used unless the categorical
variable already has colors stored in `adata.uns["{var}_colors"]`. If provided, values of
`adata.uns["{var}_colors"]` will be set.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
clean_view : bool
Whether to show cleaned view.
copy : bool
Whether to return a copy of the AnnaData object.
Returns
-------
AnnData if `copy` is True.
"""
temp_adata = self.img_celldata[image_key].copy()
cluster_id = temp_adata.uns["metadata"]["cluster_col_preprocessed"]
if clean_view:
temp_adata = temp_adata[np.argwhere(np.array(temp_adata.obsm["spatial"])[:, 1] < 0).squeeze()]
adata_substates = adata_substates[
np.argwhere(np.array(adata_substates.obsm["spatial"])[:, 1] < 0).squeeze()
]
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(
nrows=1,
ncols=1,
figsize=figsize,
)
sc.pl.spatial(
# adata,
temp_adata[temp_adata.obs[cluster_id] != target_cell_type],
spot_size=spot_size,
ax=ax,
show=False,
na_color="whitesmoke",
title="",
)
sc.pl.spatial(
adata_substates,
color=f"{target_cell_type} substates",
spot_size=spot_size,
ax=ax,
show=False,
legend_loc=legend_loc,
title="",
palette=palette,
)
ax.invert_yaxis()
ax.set_xlabel("")
ax.set_ylabel("")
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + image_key + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if copy:
return temp_adata
def ligrec(
self,
image_key: Optional[str] = None,
source_groups: Optional[Union[str, Sequence[str]]] = None,
undefined_type: Optional[str] = None,
hgnc_names: Optional[List[str]] = None,
fraction: Optional[float] = None,
pvalue_threshold: float = 0.3,
width: float = 3.0,
seed: int = 10,
random_state: int = 0,
fontsize: Optional[int] = None,
save: Union[str, None] = None,
suffix: str = "_ligrec.pdf",
show: bool = True,
copy: bool = True,
):
"""Plot spatial allocation of cells.
Parameters
----------
image_key : str, optional
Image key.
source_groups : str, optional
Source interaction clusters. If `None`, select all clusters.
undefined_type : str
Undefined cell type.
hgnc_names : list, optional
List of HGNC names.
fraction : float, optional
Subsample to this `fraction` of the number of observations.
pvalue_threshold : float
Only show interactions with p-value <= `pvalue_threshold`.
width : float
Width.
seed : int
Random seed for reproducibility.
random_state : int
Random seed to change subsampling.
fontsize : int, optional
Font size.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
copy : bool
Whether to return a copy of the AnnaData object.
Returns
-------
AnnData if `copy` is True.
"""
interactions = import_intercell_network(
transmitter_params={"categories": "ligand"}, receiver_params={"categories": "receptor"}
)
if "source" in interactions.columns:
interactions.pop("source")
if "target" in interactions.columns:
interactions.pop("target")
interactions.rename(
columns={"genesymbol_intercell_source": "source", "genesymbol_intercell_target": "target"}, inplace=True
)
if image_key:
temp_adata = self.img_celldata[image_key]
else:
if fraction:
temp_adata = sc.pp.subsample(self.celldata, fraction=fraction, copy=True, random_state=random_state)
else:
temp_adata = self.celldata.copy()
cluster_id = temp_adata.uns["metadata"]["cluster_col_preprocessed"]
if undefined_type:
temp_adata = temp_adata[temp_adata.obs[cluster_id] != undefined_type]
print("n cells:", temp_adata.shape[0])
temp_adata = temp_adata.copy()
if hgnc_names:
hgcn_x = pd.DataFrame(temp_adata.X, columns=hgnc_names)
temp_adata = AnnData(
X=hgcn_x,
obs=temp_adata.obs.astype("category"),
obsm=temp_adata.obsm,
obsp=temp_adata.obsp,
uns=temp_adata.uns,
)
sq.gr.ligrec(temp_adata, interactions=interactions, cluster_key=cluster_id, use_raw=False, seed=seed)
if save is not None:
save = save + image_key + suffix
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
sq.pl.ligrec(
temp_adata,
cluster_key=cluster_id,
title="",
source_groups=source_groups,
pvalue_threshold=pvalue_threshold,
width=width,
save=save,
)
if show:
plt.show()
plt.close()
plt.ion()
if copy:
return temp_adata.copy()
@staticmethod
def ligrec_barplot(
adata: AnnData,
source_group: str,
figsize: Tuple[float, float] = (5.0, 4.0),
fontsize: Optional[int] = None,
pvalue_threshold: float = 0.05,
save: Union[str, None] = None,
suffix: str = "_ligrec_barplot.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plot spatial allocation of cells.
Parameters
----------
adata : AnnData
AnnData object.
source_group : str
Source interaction cluster.
figsize : tuple
Figure size.
pvalue_threshold : float
Only show interactions with p-value <= `pvalue_threshold`.
fontsize : int, optional
Font size.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis if `return_axs` is True.
"""
cluster_id = adata.uns["metadata"]["cluster_col_preprocessed"]
pvals = adata.uns[f"{cluster_id}_ligrec"]["pvalues"].xs(source_group, axis=1)
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
sns.barplot(
x=list(np.sum(pvals < pvalue_threshold, axis=0).index),
y=list(np.sum(pvals < pvalue_threshold, axis=0)),
ax=ax,
color="steelblue",
)
ax.grid(False)
ax.tick_params(axis="x", labelrotation=90)
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
def compute_variance_decomposition(
self,
undefined_type: Optional[str] = None,
):
"""Compute variance decomposition.
Parameters
----------
undefined_type : str
Undefined cell type.
Returns
-------
var_decomposition
"""
temp_adata = self.celldata.copy()
cluster_id = temp_adata.uns["metadata"]["cluster_col_preprocessed"]
img_col = temp_adata.uns["metadata"]["image_col"]
if undefined_type:
temp_adata = temp_adata[temp_adata.obs[cluster_id] != undefined_type]
df = pd.DataFrame(temp_adata.X, columns=temp_adata.var_names)
df["image_col"] = pd.Series(list(temp_adata.obs[img_col]), dtype="category")
df["cluster_col_preprocessed"] = pd.Series(list(temp_adata.obs[cluster_id]), dtype="category")
images = np.unique(df["image_col"])
variance_decomposition = []
with tqdm(total=len(images)) as pbar:
for img in images:
mean_img_genes = np.mean(df[df["image_col"] == img], axis=0)
mean_img_global = np.mean(mean_img_genes)
intra_ct_var = []
inter_ct_var = []
gene_var = []
for ct in np.unique(df["cluster_col_preprocessed"]):
img_celltype = np.array(df[(df["image_col"] == img) & (df["cluster_col_preprocessed"] == ct)])[
:, :-2
]
if img_celltype.shape[0] == 0:
continue
mean_image_celltype = np.mean(img_celltype, axis=0)
for i in range(img_celltype.shape[0]):
intra_ct_var.append((img_celltype[i, :] - mean_image_celltype) ** 2)
inter_ct_var.append((mean_image_celltype - mean_img_genes) ** 2)
gene_var.append((mean_img_genes - mean_img_global) ** 2)
intra_ct_var = np.sum(intra_ct_var)
inter_ct_var = np.sum(inter_ct_var)
gene_var = np.sum(gene_var)
variance_decomposition.append(np.array([img, intra_ct_var, inter_ct_var, gene_var]))
pbar.update(1)
df = (
pd.DataFrame(
variance_decomposition, columns=["image_col", "intra_celltype_var", "inter_celltype_var", "gene_var"]
)
.astype(
{
"image_col": str,
"intra_celltype_var": "float32",
"inter_celltype_var": "float32",
"gene_var": "float32",
}
)
.set_index("image_col")
)
df["total"] = df.intra_celltype_var + df.inter_celltype_var + df.gene_var
df["intra cell type variance"] = df.intra_celltype_var / df.total
df["inter cell type variance"] = df.inter_celltype_var / df.total
df["gene variance"] = df.gene_var / df.total
return df
@staticmethod
def variance_decomposition(
df,
figsize: Tuple[float, float] = (16.0, 3.5),
fontsize: Optional[int] = None,
multiindex: bool = False,
save: Union[str, None] = None,
suffix: str = "_variance_decomposition.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plot spatial allocation of cells.
Parameters
----------
df
Variance decomposition dataframe.
figsize : tuple,
Figure size.
fontsize : int, optional
Font size.
multiindex : bool
Multiindex.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis
If `return_axs` is True.
"""
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
fig, ax = plt.subplots(1, 1, figsize=figsize)
df.plot(
y=["intra cell type variance", "inter cell type variance", "gene variance"],
kind="bar",
stacked=True,
figsize=figsize,
ax=ax,
colormap="Blues_r",
)
if multiindex:
def process_index(k):
return tuple(k.split("_"))
df["index1"], df["index2"] = zip(*map(process_index, df.index))
df = df.set_index(["index1", "index2"])
ax.set_xlabel("")
xlabel_mapping = OrderedDict()
for index1, index2 in df.index:
xlabel_mapping.setdefault(index1, [])
xlabel_mapping[index1].append(index2)
hline = []
new_xlabels = []
for _index1, index2_list in xlabel_mapping.items():
# slice_list[0] = "{} - {}".format(mouse, slice_list[0])
index2_list[0] = "{}".format(index2_list[0])
new_xlabels.extend(index2_list)
if hline:
hline.append(len(index2_list) + hline[-1])
else:
hline.append(len(index2_list))
ax.set_xticklabels(new_xlabels)
ax.set_xlabel("")
ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
# Save, show and return figure.
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
def cell_radius(
self,
area_key: Optional[str] = None,
volume_key: Optional[str] = None,
figsize: Tuple[float, float] = (16.0, 3.5),
fontsize: Optional[int] = None,
text_pos: Tuple[float, float] = (1.1, 0.9),
save: Union[str, None] = None,
suffix: str = "_distribution_cellradius.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plots the cell radius distribution.
Parameters
----------
area_key : str, optional
Key for cell area in obs.
volume_key : str, optional
Key for cell volume in obs.
figsize : tuple,
Figure size.
fontsize : int, optional
Font size.
text_pos : tuple
Relative text position.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis
If `return_axs` is True.
"""
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
if area_key:
x = np.sqrt(self.celldata.obs[area_key])
if volume_key:
x = np.cbrt(self.celldata.obs[volume_key])
fig, ax = plt.subplots(1, 1, figsize=figsize)
sns.histplot(x, ax=ax)
plt.axvline(np.mean(x), color='Red', linewidth=2, ax=ax)
min_ylim, max_ylim = plt.ylim()
plt.text(np.mean(x) * text_pos[0], max_ylim * text_pos[1], 'mean: {:.2f} $\mu$m'.format(np.mean(x)), ax=ax)
ax.set_xlabel("")
ax.set_ylabel("")
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
def minimal_cell_distance(
self,
figsize: Tuple[float, float] = (16.0, 3.5),
fontsize: Optional[int] = None,
text_pos: Tuple[float, float] = (1.1, 0.9),
save: Union[str, None] = None,
suffix: str = "_distribution_min_celldistance.pdf",
show: bool = True,
return_axs: bool = False,
):
"""Plots the minimal cell distance distribution.
Parameters
----------
figsize : tuple,
Figure size.
fontsize : int, optional
Font size.
text_pos : tuple
Relative text position.
save : str, optional
Whether (if not None) and where (path as string given as save) to save plot.
suffix : str
Suffix of file name to save to.
show : bool
Whether to display plot.
return_axs : bool
Whether to return axis objects.
Returns
-------
axis
If `return_axs` is True.
"""
if fontsize:
sc.set_figure_params(scanpy=True, fontsize=fontsize)
x = []
with tqdm(total=len(self.img_celldata.keys())) as pbar:
for adata in self.img_celldata.values():
dist = adata.obsp['adjacency_matrix_distances'].todense()
for i in range(dist.shape[0]):
vec = dist[i, :]
vec = vec[vec != 0]
if vec.shape[1] == 0:
continue
x.append(np.min(vec))
pbar.update(1)
fig, ax = plt.subplots(1, 1, figsize=figsize)
sns.histplot(x, ax=ax)
plt.axvline(np.mean(x), color='Red', linewidth=2, ax=ax)
min_ylim, max_ylim = plt.ylim()
plt.text(np.mean(x) * text_pos[0], max_ylim * text_pos[1], 'mean: {:.2f} $\mu$m'.format(np.mean(x)), ax=ax)
ax.set_xlabel("")
ax.set_ylabel("")
plt.tight_layout()
if save is not None:
plt.savefig(save + suffix)
if show:
plt.show()
plt.close(fig)
plt.ion()
if return_axs:
return ax
else:
return None
class DataLoader(GraphTools, PlottingTools):
"""DataLoader class. Inherits all functions from GraphTools and PlottingTools."""
def __init__(
self,
data_path: str,
radius: Optional[int] = None,
coord_type: str = 'generic',
n_rings: int = 1,
label_selection: Optional[List[str]] = None,
n_top_genes: Optional[int] = None
):
"""Initialize DataLoader.
Parameters
----------
data_path : str
Data path.
radius : int
Radius.
label_selection : list, optional
label selection.
"""
self.data_path = data_path
print("Loading data from raw files")
self.register_celldata(n_top_genes=n_top_genes)
self.register_img_celldata()
self.register_graph_features(label_selection=label_selection)
self.compute_adjacency_matrices(radius=radius, coord_type=coord_type, n_rings=n_rings)
self.radius = radius
print(
"Loaded %i images with complete data from %i patients "
"over %i cells with %i cell features and %i distinct celltypes."
% (
len(self.img_celldata),
len(self.patients),
self.celldata.shape[0],
self.celldata.shape[1],
len(self.celldata.uns["node_type_names"]),
)
)
@property
def patients(self):
"""Return number of patients in celldata.
Returns
-------
patients
"""
return np.unique(np.asarray(list(self.celldata.uns["img_to_patient_dict"].values())))
def register_celldata(self, n_top_genes: Optional[int] = None):
"""Load AnnData object of complete dataset."""
print("registering celldata")
self._register_celldata(n_top_genes=n_top_genes)
assert self.celldata is not None, "celldata was not loaded"
def register_img_celldata(self):
"""Load dictionary of of image-wise celldata objects with {imgage key : anndata object of image}."""
print("collecting image-wise celldata")
self._register_img_celldata()
assert self.img_celldata is not None, "image-wise celldata was not loaded"
def register_graph_features(self, label_selection):
"""Load graph level covariates.
Parameters
----------
label_selection
Label selection.
"""
print("adding graph-level covariates")
self._register_graph_features(label_selection=label_selection)
@abc.abstractmethod
def _register_celldata(self, n_top_genes: Optional[int] = None):
"""Load AnnData object of complete dataset."""
pass
@abc.abstractmethod
def _register_img_celldata(self):
"""Load dictionary of of image-wise celldata objects with {imgage key : anndata object of image}."""
pass
@abc.abstractmethod
def _register_graph_features(self, label_selection):
"""Load graph level covariates.
Parameters
----------
label_selection
Label selection.
"""
pass
def size_factors(self):
"""Get size factors (Only makes sense with positive input).
Returns
-------
sf_dict
"""
# Check if irregular sums are encountered:
for i, adata in self.img_celldata.items():
if np.any(np.sum(adata.X, axis=1) <= 0):
print("WARNING: found irregular node sizes in image %s" % str(i))
# Get global mean of feature intensity across all features:
global_mean_per_node = self.celldata.X.sum(axis=1).mean(axis=0)
return {i: global_mean_per_node / np.sum(adata.X, axis=1) for i, adata in self.img_celldata.items()}
@property
def var_names(self):
return self.celldata.var_names
class DataLoaderZhang(DataLoader):
"""DataLoaderZhang class. Inherits all functions from DataLoader."""
cell_type_merge_dict = {
"Astrocytes": "Astrocytes",
"Endothelial": "Endothelial",
"L23_IT": "L2/3 IT",
"L45_IT": "L4/5 IT",
"L5_IT": "L5 IT",
"L5_PT": "L5 PT",
"L56_NP": "L5/6 NP",
"L6_CT": "L6 CT",
"L6_IT": "L6 IT",
"L6_IT_Car3": "L6 IT Car3",
"L6b": "L6b",
"Lamp5": "Lamp5",
"Microglia": "Microglia",
"OPC": "OPC",
"Oligodendrocytes": "Oligodendrocytes",
"PVM": "PVM",
"Pericytes": "Pericytes",
"Pvalb": "Pvalb",
"SMC": "SMC",
"Sncg": "Sncg",
"Sst": "Sst",
"Sst_Chodl": "Sst Chodl",
"VLMC": "VLMC",
"Vip": "Vip",
"other": "other",
}
def _register_celldata(self, n_top_genes: Optional[int] = None):
"""Load AnnData object of complete dataset."""
metadata = {
"lateral_resolution": 0.109,
"fn": "preprocessed_zhang.h5ad",
"image_col": "slice_id",
"pos_cols": ["center_x", "center_y"],
"cluster_col": "subclass",
"cluster_col_preprocessed": "subclass_preprocessed",
"patient_col": "mouse",
}
celldata = read_h5ad(os.path.join(self.data_path, metadata["fn"])).copy()
celldata.uns["metadata"] = metadata
celldata.uns["img_keys"] = list(np.unique(celldata.obs[metadata["image_col"]]))
img_to_patient_dict = {
str(x): celldata.obs[metadata["patient_col"]].values[i].split("_")[0]
for i, x in enumerate(celldata.obs[metadata["image_col"]].values)
}
celldata.uns["img_to_patient_dict"] = img_to_patient_dict
self.img_to_patient_dict = img_to_patient_dict
# register x and y coordinates into obsm
celldata.obsm["spatial"] = celldata.obs[metadata["pos_cols"]]
# add clean cluster column which removes regular expression from cluster_col
celldata.obs[metadata["cluster_col_preprocessed"]] = list(
pd.Series(list(celldata.obs[metadata["cluster_col"]]), dtype="str").map(self.cell_type_merge_dict)
)
celldata.obs[metadata["cluster_col_preprocessed"]] = celldata.obs[metadata["cluster_col_preprocessed"]].astype(
"str"
)
# register node type names
node_type_names = list(np.unique(celldata.obs[metadata["cluster_col_preprocessed"]]))
celldata.uns["node_type_names"] = {x: x for x in node_type_names}
node_types = np.zeros((celldata.shape[0], len(node_type_names)))
node_type_idx = np.array(
[
node_type_names.index(x) for x in celldata.obs[metadata["cluster_col_preprocessed"]].values
] # index in encoding vector
)
node_types[np.arange(0, node_type_idx.shape[0]), node_type_idx] = 1
celldata.obsm["node_types"] = node_types
self.celldata = celldata
def _register_img_celldata(self):
"""Load dictionary of of image-wise celldata objects with {imgage key : anndata object of image}."""
image_col = self.celldata.uns["metadata"]["image_col"]
img_celldata = {}
for k in self.celldata.uns["img_keys"]:
img_celldata[str(k)] = self.celldata[self.celldata.obs[image_col] == k].copy()
self.img_celldata = img_celldata
def _register_graph_features(self, label_selection):
"""Load graph level covariates.
Parameters
----------
label_selection
Label selection.
"""
# Save processed data to attributes.
for adata in self.img_celldata.values():
graph_covariates = {
"label_names": {},
"label_tensors": {},
"label_selection": [],
"continuous_mean": {},
"continuous_std": {},
"label_data_types": {},
}
adata.uns["graph_covariates"] = graph_covariates
graph_covariates = {
"label_names": {},
"label_selection": [],
"continuous_mean": {},
"continuous_std": {},
"label_data_types": {},
}
self.celldata.uns["graph_covariates"] = graph_covariates
class DataLoaderJarosch(DataLoader):
"""DataLoaderJarosch class. Inherits all functions from DataLoader."""
cell_type_merge_dict = {
"B cells": "B cells",
"CD4 T cells": "CD4 T cells",
"CD8 T cells": "CD8 T cells",
"GATA3+ epithelial": "GATA3+ epithelial",
"Ki67 high epithelial": "Ki67 epithelial",
"Ki67 low epithelial": "Ki67 epithelial",
"Lamina propria cells": "Lamina propria cells",
"Macrophages": "Macrophages",
"Monocytes": "Monocytes",
"PD-L1+ cells": "PD-L1+ cells",
"intraepithelial Lymphocytes": "intraepithelial Lymphocytes",
"muscular cells": "muscular cells",
"other Lymphocytes": "other Lymphocytes",
}
def _register_celldata(self, n_top_genes: Optional[int] = None):
"""Load AnnData object of complete dataset."""
metadata = {
"lateral_resolution": 0.5,
"fn": "raw_inflamed_colon_1.h5ad",
"image_col": "Annotation",
"pos_cols": ["X", "Y"],
"cluster_col": "celltype_Level_2",
"cluster_col_preprocessed": "celltype_Level_2_preprocessed",
"patient_col": None,
}
celldata = read_h5ad(os.path.join(self.data_path, metadata["fn"]))
feature_cols_hgnc_names = [
'CD14',
'MS4A1',
'IL2RA',
'CD3G',
'CD4',
'PTPRC',
'PTPRC',
'PTPRC',
'CD68',
'CD8A',
'KRT5', # 'KRT1', 'KRT14'
'FOXP3',
'GATA3',
'MKI67',
'Nuclei',
'PDCD1',
'CD274',
'SMN1',
'VIM'
]
X = | DataFrame(celldata.X, columns=feature_cols_hgnc_names) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pylivetrader.data.data_portal import DataPortal
from pylivetrader.assets import AssetFinder
from pylivetrader.assets import Equity
from pylivetrader.misc.pd_utils import normalize_date
from pylivetrader.finance.order import Order as ZPOrder
from trading_calendars import get_calendar
def get_fixture_data_portal(**kwargs):
b = Backend(**kwargs)
finder = AssetFinder(b)
return DataPortal(b, finder, b._calendar, False)
def create_bars(minutes, offset):
length = len(minutes)
return pd.DataFrame({
'open': np.arange(length) + 10 + offset,
'high': np.arange(length) + 15 + offset,
'low': np.arange(length) + 8 + offset,
'close': np.arange(length) + 10 + offset,
'volume': 100 + offset,
}, index=minutes)
class Backend:
def __init__(self, start=None, end=None, assets=None, exchange='NYSE'):
self.start = normalize_date(pd.Timestamp(start or '2018-08-13'))
self.end = normalize_date(pd.Timestamp(end or '2018-08-14'))
self._exchange = exchange
self._calendar = get_calendar(exchange)
self.assets = assets or ['asset-0', 'asset-1', 'asset-2']
minutes = self._calendar.minutes_for_sessions_in_range(
self.start, self.end)
self._minutely_bars = {}
for i, asset in enumerate(self.get_equities()):
bars = create_bars(minutes, i)
self._minutely_bars[asset] = bars
days = self._calendar.sessions_in_range(self.start, self.end)
self._daily_bars = {}
for i, asset in enumerate(self.get_equities()):
bars = create_bars(days, i)
self._daily_bars[asset] = bars
def get_equities(self):
return [
Equity(
asset,
symbol=asset.upper().replace('-', ''),
exchange='NYSE',
start_date=self.start,
end_date=self.end + pd.Timedelta('1000 days'),
) for asset in self.assets
]
def get_adjusted_value(self, assets, field, dt, data_frequency):
return self.get_spot_value(assets, field, dt, data_frequency, False)
def get_spot_value(
self,
assets,
field,
dt,
data_frequency,
quantopian_compatible=True):
assets_is_scalar = not isinstance(assets, (list, set, tuple))
field = 'close' if field == 'price' else field
if assets_is_scalar:
if 'd' in data_frequency:
return self._daily_bars[assets][field].iloc[-1]
else:
return self._minutely_bars[assets][field].iloc[-1]
if 'd' in data_frequency:
return pd.Series([
self._daily_bars[asset][field].iloc[-1]
for asset in assets
], index=assets)
else:
return pd.Series([
self._minutely_bars[asset][field].iloc[-1]
for asset in assets
], index=assets)
def get_bars(self, assets, data_frequency, bar_count=500, end_dt=None):
assets_is_scalar = not isinstance(assets, (list, set, tuple))
if assets_is_scalar:
assets = [assets]
barslist = []
for asset in assets:
if 'm' in data_frequency:
bars = self._minutely_bars[asset].copy()
else:
bars = self._daily_bars[asset].copy()
bars.columns = pd.MultiIndex.from_product([[asset], bars.columns])
barslist.append(bars[-bar_count:])
return pd.concat(barslist, axis=1)
@property
def time_skew(self):
return pd.Timedelta('0s')
def all_orders(self, asset=None, before=None, status='all'):
a1 = 'ASSET1'
a2 = 'ASSET2'
return {
'o01': ZPOrder(
dt=pd.Timestamp('2018-10-31 09:40:00-0400'),
asset=a1,
amount=2,
id='o01',
),
'o02': ZPOrder(
dt= | pd.Timestamp('2018-10-31 09:45:00-0400') | pandas.Timestamp |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_record_batch(tsc,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
class Test_Parquet_IO:
def test_from_parquet_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pq.write_table(table,'test.parquet')
testData = from_parquet('test.parquet','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.parquet')
def test_from_parquet_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pq.write_table(table,'test_collection.parquet')
testData = from_parquet('test_collection.parquet','time','category')
assert tsc == testData
os.remove('test_collection.parquet')
###########
def test_to_parquet_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,False,True,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,True,False,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = | pd.DataFrame(expect_collection_expandFull['remove']) | pandas.DataFrame |
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import plotly.express as px
from pathlib import Path
from functools import lru_cache
import statsmodels.formula.api as smf
from datetime import datetime
import pandasdmx as pdmx
plt.style.use(
"https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt"
)
@st.cache
def prep_gdp_output_codes():
hdf = pd.read_excel(Path("data", "uk_gdp_output_hierarchy.xlsx"), header=None)
hdf = hdf.dropna(how="all", axis=1)
for i in range(3):
hdf.iloc[i, :] = hdf.iloc[i, :].fillna(method="ffill")
hdf = hdf.T
hdf["total"] = hdf[3].str.contains("Total")
hdf = hdf.query("total==False")
hdf = hdf.drop("total", axis=1)
for col in range(5):
hdf[col] = hdf[col].str.lstrip().str.rstrip()
hdf = hdf.rename(columns={4: "section", 5: "code"})
return hdf
def get_uk_regional_gdp():
# current year
latest_year = datetime.now().year - 1
# Tell pdmx we want OECD data
oecd = pdmx.Request("OECD")
# Set out everything about the request in the format specified by the OECD API
data = oecd.data(
resource_id="REGION_ECONOM",
key="1+2.UKC.SNA_2008.GDP.REG+CURR_PR.ALL.2017+2018+2019+2020/all?",
).to_pandas()
# example that works:
"https://stats.oecd.org/restsdmx/sdmx.ashx/GetData/REGION_ECONOM/1+2.GBR+UKC+UKC11+UKC12.SNA_2008.GDP.REG+CURR_PR+USD_PPP+REAL_PR+REAL_PPP+PC+PC_CURR_PR+PC_USD_PPP+PC_REAL_PR+PC_REAL_PPP.ALL.2001+2002+2003+2004+2005+2006+2007+2008+2009+2010+2011+2012+2013+2014+2015+2016+2017+2018+2019+2020/all?"
df = pd.DataFrame(data).reset_index()
df.head()
@st.cache
def ons_blue_book_data(code):
data = grab_ONS_time_series_data("BB", code)
xf = pd.DataFrame(pd.json_normalize(data["years"]))
xf = xf[["year", "value"]]
xf["year"] = xf["year"].astype(int)
xf["value"] = xf["value"].astype(float)
xf["title"] = data["description"]["title"]
xf["code"] = code
xf = pd.DataFrame(xf.loc[xf["year"].argmax(), :]).T
return xf
@st.cache
@lru_cache(maxsize=32)
def ons_get_gdp_output_with_breakdown():
df = prep_gdp_output_codes()
xf = pd.DataFrame()
for code in df["code"].unique():
xf = pd.concat([xf, ons_blue_book_data(code)], axis=0)
df = pd.merge(df, xf, on=["code"], how="inner")
# for later treemap use, only use highest level name if hierachy has
# missing levels
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
# now, any nones with non-none children must be swapped
df.loc[(df[2].isnull()) & (~df[3].isnull()), [2, 3]] = df.loc[
(df[2].isnull()) & (~df[3].isnull()), [3, 2]
].values
df.loc[(df[0] == df[1]), [1]] = df.loc[(df[0] == df[1]), [2]].values
df.loc[(df[1] == df[2]), [2]] = df.loc[(df[1] == df[2]), [3]].values
# another round of this
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
df.loc[(df[3] == df[2]), [3]] = None
return df
@st.cache
def grab_ONS_time_series_data(dataset_id, timeseries_id):
"""
This function grabs specified time series from the ONS API.
"""
api_endpoint = "https://api.ons.gov.uk/"
api_params = {"dataset": dataset_id, "timeseries": timeseries_id}
url = (
api_endpoint
+ "/".join(
[x + "/" + y for x, y in zip(api_params.keys(), api_params.values())][::-1]
)
+ "/data"
)
return requests.get(url).json()
def ons_clean_qna_data(data):
if data["quarters"] != []:
df = pd.DataFrame(pd.json_normalize(data["quarters"]))
df["date"] = (
pd.to_datetime(
df["date"].apply(lambda x: x[:4] + "-" + str(int(x[-1]) * 3)),
format="%Y-%m",
)
+ pd.tseries.offsets.MonthEnd()
)
else:
df = pd.DataFrame(pd.json_normalize(data["months"]))
df["date"] = (
pd.to_datetime(df["date"], format="%Y %b") + pd.tseries.offsets.MonthEnd()
)
df = df.drop([x for x in df.columns if x not in ["date", "value"]], axis=1)
return df
@lru_cache(maxsize=32)
def ons_qna_data(dataset_id, timeseries_id):
data = grab_ONS_time_series_data(dataset_id, timeseries_id)
desc_text = data["description"]["title"]
df = ons_clean_qna_data(data)
return df, desc_text
def visualize_line(df, x_axis, y_axis, scale, widths, ylabel, title):
height = 350
graph = (
alt.Chart(df)
.mark_line(strokeWidth=4)
.encode(
x=x_axis + ":T",
y=alt.Y(y_axis + ":Q", scale=scale, title=ylabel),
tooltip=[y_axis],
)
.properties(width=widths, title=title, height=height)
.interactive()
)
st.write(graph)
def plot_indices_of_output():
# Grab the three UK time series
indices_dicts = {"Production": "L2KQ", "Construction": "L2N8", "Services": "L2NC"}
df = pd.DataFrame()
for key, value in indices_dicts.items():
xf, x_text = ons_qna_data("QNA", value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
graph = (
alt.Chart(df)
.mark_line(strokeWidth=4)
.encode(
x=alt.X("date:T"),
y="value:Q",
color=alt.Color("Name:N", legend=None),
tooltip=["value"],
)
.properties(
width=200,
height=200,
)
.facet(column="Name:N")
.interactive()
)
st.write(graph)
def plot_labour_market_indicators():
# The labour market. TODO change to monthly LMS (series codes are same)
indices_dicts_lms = {
"Employment": "LF24",
"Unemployment": "MGSX",
"Inactivity": "LF2S",
}
df_lms = pd.DataFrame()
for key, value in indices_dicts_lms.items():
xf, x_text = ons_qna_data("LMS", value)
xf["Name"] = key
df_lms = pd.concat([df_lms, xf], axis=0)
graph_lms = (
alt.Chart(df_lms)
.mark_line(strokeWidth=4)
.encode(
x=alt.X("date:T", title=""),
y=alt.Y("value:Q", title="%"),
color="Name:N",
tooltip=["value"],
)
.properties(
title="Labour market indicators",
width=600,
)
.interactive()
)
st.write(graph_lms)
def plot_beveridge_curve():
indices_dicts_lms = {"Vacancies": "AP2Y", "Unemployment": "MGSX", "Active": "LF2K"}
df = pd.DataFrame()
for key, value in indices_dicts_lms.items():
xf, x_text = ons_qna_data("LMS", value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
df["value"] = df["value"].astype(np.double)
df = pd.pivot(df, index="date", columns="Name")
df.columns = df.columns.droplevel()
df = df.dropna()
df["Date"] = df.index
df["Vacancies"] = 100 * df["Vacancies"].divide(df["Active"])
max_u = df["Unemployment"].argmax()
# Need to divide vacs by labour force size
# Need to label most extremal u value
fig, ax = plt.subplots()
quivx = -df["Unemployment"].diff(-1)
quivy = -df["Vacancies"].diff(-1)
# This connects the points
ax.quiver(
df["Unemployment"],
df["Vacancies"],
quivx,
quivy,
scale_units="xy",
angles="xy",
scale=1,
width=0.006,
alpha=0.3,
)
ax.scatter(
df["Unemployment"],
df["Vacancies"],
marker="o",
s=35,
edgecolor="black",
linewidth=0.2,
alpha=0.9,
)
for j in [0, max_u, -1]:
ax.annotate(
f'{df["Date"].iloc[j].year} Q{df["Date"].iloc[j].quarter}',
xy=(df[["Unemployment", "Vacancies"]].iloc[j].tolist()),
xycoords="data",
xytext=(20, 20),
textcoords="offset points",
arrowprops=dict(
arrowstyle="->", connectionstyle="angle3,angleA=0,angleB=-90"
),
)
ax.set_xlabel("Unemployment rate, %")
ax.set_ylabel("Vacancy rate, %")
ax.grid(which="major", axis="both", lw=0.2)
plt.tight_layout()
st.pyplot(fig)
def plot_phillips_curve():
indices_dicts = {
"Average weekly earnings": ("LMS", "KAC3"),
"Unemployment": ("LMS", "MGSX"),
}
df = pd.DataFrame()
for key, value in indices_dicts.items():
xf, x_text = ons_qna_data(*value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
df["value"] = df["value"].astype(np.double)
df = pd.pivot(df, index="date", columns="Name")
df.columns = df.columns.droplevel()
df = df.dropna()
df = df.groupby(pd.Grouper(freq="Y")).mean()
# create year groupings
df["group"] = pd.cut(
df.index,
bins=[
df.index.min() - | pd.offsets.YearEnd() | pandas.offsets.YearEnd |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
tendulkar = getPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
df = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = pd.to_numeric(df['4s'])
runs = pd.to_numeric(df['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
# tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
df = clean (file)
# Remove all rows where 6s are 0
a= df['6s'] !=0
b= df[a]
x6s=b['6s'].astype(int)
runs=pd.to_numeric(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
df1=pd.concat([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=df1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGround
# This function plots the average runs scored by batsman at the ground. The xlabels indicate
# the number of innings at ground
#
###########################################################################################
def batsmanAvgRunsGround(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGround(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial getPlayerData()
name
Name of the batsman
Details
More details can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with getPlayerData()
##tendulkar = getPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']= | pd.to_numeric(batsman['Runs']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 21:23:17 2020
@author: kakdemi
"""
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#########################################################
# Weight by zone
#########################################################
#importing LMP data
Daily_sim = pd.read_excel('validation_prices.xlsx', sheet_name='Daily_sim')
Hourly_sim = pd.read_excel('validation_prices.xlsx', sheet_name='Hourly_sim')
Hourly_hist = | pd.read_excel('validation_prices.xlsx', sheet_name='hist_hourly') | pandas.read_excel |
#!/usr/bin/env python3
import base64
import bson
import json
import multiprocessing as mp
import os
import pandas as pd
import shutil
import socket
import subprocess
import time
import tornado
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import backend
import env
import model as Model
import visualizer as Visualizer
import workflow as Workflow
def list_dir_recursive(path, relpath_start=''):
files = [os.path.join(dir, f) for (dir, subdirs, filenames) in os.walk(path) for f in filenames]
files = [os.path.relpath(f, start=relpath_start) for f in files]
files.sort()
return files
def message(status, message):
return {
'status': status,
'message': message
}
class WorkflowQueryHandler(tornado.web.RequestHandler):
async def get(self):
page = int(self.get_query_argument('page', 0))
page_size = int(self.get_query_argument('page_size', 100))
db = self.settings['db']
workflows = await db.workflow_query(page, page_size)
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(workflows))
class WorkflowCreateHandler(tornado.web.RequestHandler):
REQUIRED_KEYS = set([
'pipeline'
])
DEFAULTS = {
'name': '',
'params_format': '',
'params_data': '',
'profiles': 'standard',
'revision': 'master',
'input_dir': 'input',
'output_dir': 'output',
'attempts': 0
}
def get(self):
workflow = {**self.DEFAULTS, **{ '_id': '0' }}
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(workflow))
async def post(self):
db = self.settings['db']
# make sure request body is valid
try:
data = tornado.escape.json_decode(self.request.body)
missing_keys = self.REQUIRED_KEYS - data.keys()
except json.JSONDecodeError:
self.set_status(422)
self.write(message(422, 'Ill-formatted JSON'))
return
if missing_keys:
self.set_status(400)
self.write(message(400, 'Missing required field(s): %s' % list(missing_keys)))
return
# create workflow
workflow = {**self.DEFAULTS, **data, **{ 'status': 'nascent' }}
workflow['_id'] = str(bson.ObjectId())
# append creation timestamp to workflow
workflow['date_created'] = int(time.time() * 1000)
# transform pipeline name to lowercase
workflow['pipeline'] = workflow['pipeline'].lower()
# save workflow
await db.workflow_create(workflow)
# create workflow directory
workflow_dir = os.path.join(env.WORKFLOWS_DIR, workflow['_id'])
os.makedirs(workflow_dir)
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode({ '_id': workflow['_id'] }))
class WorkflowEditHandler(tornado.web.RequestHandler):
REQUIRED_KEYS = set([
'pipeline'
])
DEFAULTS = {
'name': '',
'params_format': '',
'params_data': '',
'profiles': 'standard',
'revision': 'master',
'input_dir': 'input',
'output_dir': 'output',
'attempts': 0
}
async def get(self, id):
db = self.settings['db']
try:
# get workflow
workflow = await db.workflow_get(id)
# append list of input files
workflow_dir = os.path.join(env.WORKFLOWS_DIR, id)
input_dir = os.path.join(workflow_dir, workflow['input_dir'])
output_dir = os.path.join(workflow_dir, workflow['output_dir'])
if os.path.exists(input_dir):
workflow['input_files'] = list_dir_recursive(input_dir, relpath_start=workflow_dir)
else:
workflow['input_files'] = []
# append list of output files
if os.path.exists(output_dir):
workflow['output_files'] = list_dir_recursive(output_dir, relpath_start=workflow_dir)
else:
workflow['output_files'] = []
# append status of output data
workflow['output_data'] = os.path.exists('%s/%s-output.tar.gz' % (workflow_dir, id))
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(workflow))
except:
self.set_status(404)
self.write(message(404, 'Failed to get workflow \"%s\"' % id))
async def post(self, id):
db = self.settings['db']
# make sure request body is valid
try:
data = tornado.escape.json_decode(self.request.body)
missing_keys = self.REQUIRED_KEYS - data.keys()
except json.JSONDecodeError:
self.set_status(422)
self.write(message(422, 'Ill-formatted JSON'))
if missing_keys:
self.set_status(400)
self.write(message(400, 'Missing required field(s): %s' % list(missing_keys)))
return
try:
# update workflow from request body
workflow = await db.workflow_get(id)
workflow = {**self.DEFAULTS, **workflow, **data}
# transform pipeline name to lowercase
workflow['pipeline'] = workflow['pipeline'].lower()
# save workflow
await db.workflow_update(id, workflow)
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode({ '_id': id }))
except:
self.set_status(404)
self.write(message(404, 'Failed to update workflow \"%s\"' % id))
async def delete(self, id):
db = self.settings['db']
try:
# delete workflow
await db.workflow_delete(id)
# delete workflow directory
shutil.rmtree(os.path.join(env.WORKFLOWS_DIR, id), ignore_errors=True)
self.set_status(200)
self.write(message(200, 'Workflow \"%s\" was deleted' % id))
except:
self.set_status(404)
self.write(message(404, 'Failed to delete workflow \"%s\"' % id))
class WorkflowUploadHandler(tornado.web.RequestHandler):
async def post(self, id):
db = self.settings['db']
# make sure request body contains files
files = self.request.files
if not files:
self.set_status(400)
self.write(message(400, 'No files were uploaded'))
return
# get workflow
workflow = await db.workflow_get(id)
# initialize input directory
input_dir = os.path.join(env.WORKFLOWS_DIR, id, workflow['input_dir'])
os.makedirs(input_dir, exist_ok=True)
# save uploaded files to input directory
filenames = []
for f_list in files.values():
for f_arg in f_list:
filename, body = f_arg['filename'], f_arg['body']
with open(os.path.join(input_dir, filename), 'wb') as f:
f.write(body)
filenames.append(filename)
self.set_status(200)
self.write(message(200, 'File \"%s\" was uploaded for workflow \"%s\" successfully' % (filenames, id)))
class WorkflowLaunchHandler(tornado.web.RequestHandler):
resume = False
async def post(self, id):
db = self.settings['db']
try:
# get workflow
workflow = await db.workflow_get(id)
# make sure workflow is not already running
if workflow['status'] == 'running':
self.set_status(400)
self.write(message(400, 'Workflow \"%s\" is already running' % id))
return
# copy nextflow.config from input directory if it exists
workflow_dir = os.path.join(env.WORKFLOWS_DIR, id)
input_dir = os.path.join(workflow_dir, workflow['input_dir'])
src = os.path.join(input_dir, 'nextflow.config')
dst = os.path.join(workflow_dir, 'nextflow.config')
if os.path.exists(dst):
os.remove(dst)
if os.path.exists(src):
shutil.copyfile(src, dst)
# append additional settings to nextflow.config
with open(dst, 'a') as f:
weblog_url = 'http://%s:%d/api/tasks' % (socket.gethostbyname(socket.gethostname()), tornado.options.options.port)
f.write('weblog { enabled = true\n url = \"%s\" }\n' % (weblog_url))
f.write('k8s { launchDir = \"%s\" }\n' % (workflow_dir))
# update workflow status
workflow['status'] = 'running'
workflow['date_submitted'] = int(time.time() * 1000)
workflow['attempts'] += 1
await db.workflow_update(id, workflow)
# launch workflow as a child process
p = mp.Process(target=Workflow.launch, args=(db, workflow, self.resume))
p.start()
self.set_status(200)
self.write(message(200, 'Workflow \"%s\" was launched' % id))
except:
self.set_status(404)
self.write(message(404, 'Failed to launch workflow \"%s\"' % id))
class WorkflowResumeHandler(WorkflowLaunchHandler):
resume = True
class WorkflowCancelHandler(tornado.web.RequestHandler):
async def post(self, id):
db = self.settings['db']
try:
# get workflow
workflow = await db.workflow_get(id)
workflow = {**{ 'pid': -1 }, **workflow}
# cancel workflow
Workflow.cancel(workflow)
# update workflow status
workflow['status'] = 'failed'
workflow['pid'] = -1
await db.workflow_update(id, workflow)
self.set_status(200)
self.write(message(200, 'Workflow \"%s\" was canceled' % id))
except:
self.set_status(404)
self.write(message(404, 'Failed to cancel workflow \"%s\"' % id))
class WorkflowLogHandler(tornado.web.RequestHandler):
async def get(self, id):
db = self.settings['db']
try:
# get workflow
workflow = await db.workflow_get(id)
# append log if it exists
log_file = os.path.join(env.WORKFLOWS_DIR, id, '.workflow.log')
if os.path.exists(log_file):
f = open(log_file)
log = ''.join(f.readlines())
else:
log = ''
# construct response data
data = {
'_id': id,
'status': workflow['status'],
'attempts': workflow['attempts'],
'log': log
}
self.set_status(200)
self.set_header('content-type', 'application/json')
self.set_header('cache-control', 'no-store, no-cache, must-revalidate, max-age=0')
self.write(tornado.escape.json_encode(data))
except:
self.set_status(404)
self.write(message(404, 'Failed to fetch log for workflow \"%s\"' % id))
class WorkflowDownloadHandler(tornado.web.StaticFileHandler):
def parse_url_path(self, id):
# provide output file if path is specified, otherwise output data archive
filename_default = '%s-output.tar.gz' % id
filename = self.get_query_argument('path', filename_default)
self.set_header('content-disposition', 'attachment; filename=\"%s\"' % filename)
return os.path.join(id, filename)
class TaskQueryHandler(tornado.web.RequestHandler):
async def get(self):
page = int(self.get_query_argument('page', 0))
page_size = int(self.get_query_argument('page_size', 100))
db = self.settings['db']
tasks = await db.task_query(page, page_size)
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(tasks))
async def post(self):
db = self.settings['db']
# make sure request body is valid
try:
task = tornado.escape.json_decode(self.request.body)
except json.JSONDecodeError:
self.set_status(422)
self.write(message(422, 'Ill-formatted JSON'))
return
try:
# append id to task
task['_id'] = str(bson.ObjectId())
# extract input features for task
if task['event'] == 'process_completed':
# load execution log
filenames = ['.command.log', '.command.out', '.command.err']
filenames = [os.path.join(task['trace']['workdir'], filename) for filename in filenames]
files = [open(filename) for filename in filenames if os.path.exists(filename)]
lines = [line.strip() for f in files for line in f]
# parse input features from trace directives
PREFIX = '#TRACE'
lines = [line[len(PREFIX):] for line in lines if line.startswith(PREFIX)]
items = [line.split('=') for line in lines]
conditions = {k.strip(): v.strip() for k, v in items}
# append input features to task trace
task['trace'] = {**task['trace'], **conditions}
# save task
await db.task_create(task)
# update workflow status on completed event
if task['event'] == 'completed':
# get workflow
workflow_id = task['runName'].split('-')[1]
workflow = await db.workflow_get(workflow_id)
# update workflow status
success = task['metadata']['workflow']['success']
if success:
workflow['status'] = 'completed'
else:
workflow['status'] = 'failed'
await db.workflow_update(workflow['_id'], workflow)
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode({ '_id': task['_id'] }))
except:
self.set_status(404)
self.write(message(404, 'Failed to save task'))
class TaskLogHandler(tornado.web.RequestHandler):
async def get(self, id):
db = self.settings['db']
try:
# get workflow
task = await db.task_get(id)
workdir = task['trace']['workdir']
# construct response data
data = {
'_id': id,
'out': '',
'err': ''
}
# append log files if they exist
out_file = os.path.join(workdir, '.command.out')
err_file = os.path.join(workdir, '.command.err')
if os.path.exists(out_file):
f = open(out_file)
data['out'] = ''.join(f.readlines())
if os.path.exists(err_file):
f = open(err_file)
data['err'] = ''.join(f.readlines())
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(data))
except:
self.set_status(404)
self.write(message(404, 'Failed to fetch log for workflow \"%s\"' % id))
class TaskQueryPipelinesHandler(tornado.web.RequestHandler):
async def get(self):
db = self.settings['db']
try:
# query pipelines from database
pipelines = await db.task_query_pipelines()
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(pipelines))
except Exception as e:
self.set_status(404)
self.write(message(404, 'Failed to perform query'))
raise e
class TaskQueryPipelineHandler(tornado.web.RequestHandler):
async def get(self, pipeline):
db = self.settings['db']
try:
# query tasks from database
pipeline = pipeline.lower()
tasks = await db.task_query_pipeline(pipeline)
tasks = [task['trace'] for task in tasks]
# separate tasks into dataframes by process
process_names = list(set([task['process'] for task in tasks]))
dfs = {}
for process in process_names:
dfs[process] = [task for task in tasks if task['process'] == process]
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(dfs))
except Exception as e:
self.set_status(404)
self.write(message(404, 'Failed to perform query'))
raise e
class TaskArchiveHandler(tornado.web.RequestHandler):
async def get(self, pipeline):
db = self.settings['db']
try:
# query tasks from database
pipeline = pipeline.lower()
tasks = await db.task_query_pipeline(pipeline)
tasks = [task['trace'] for task in tasks]
# separate tasks into dataframes by process
process_names = list(set([task['process'] for task in tasks]))
dfs = {}
for process in process_names:
dfs[process] = pd.DataFrame([task for task in tasks if task['process'] == process])
# change to trace directory
os.chdir(env.TRACE_DIR)
# save dataframes to csv files
for process in process_names:
filename = 'trace.%s.txt' % (process)
dfs[process].to_csv(filename, sep='\t', index=False)
# create zip archive of trace files
zipfile = 'trace.%s.zip' % (pipeline.replace('/', '__'))
files = ['trace.%s.txt' % (process) for process in process_names]
subprocess.run(['zip', zipfile] + files, check=True)
subprocess.run(['rm', '-f'] + files, check=True)
# return to working directory
os.chdir('..')
self.set_status(200)
self.write(message(200, 'Archive was created'))
except Exception as e:
self.set_status(404)
self.write(message(404, 'Failed to create archive'))
raise e
class TaskArchiveDownloadHandler(tornado.web.StaticFileHandler):
def parse_url_path(self, pipeline):
# get filename of trace archive
filename = 'trace.%s.zip' % (pipeline.replace('/', '__'))
self.set_header('content-disposition', 'attachment; filename=\"%s\"' % filename)
return filename
class TaskVisualizeHandler(tornado.web.RequestHandler):
async def post(self):
db = self.settings['db']
try:
# parse request body
data = tornado.escape.json_decode(self.request.body)
# query task dataset from database
pipeline = data['pipeline'].lower()
tasks = await db.task_query_pipeline(pipeline)
tasks = [task['trace'] for task in tasks]
tasks_process = [task for task in tasks if task['process'] == data['process']]
df = pd.DataFrame(tasks_process)
# prepare visualizer args
args = data['args']
args['plot_name'] = str(bson.ObjectId())
if args['selectors'] == '':
args['selectors'] = []
else:
args['selectors'] = args['selectors'].split(' ')
# append columns from merge process if specified
if 'merge_process' in args:
# load merge data
tasks_merge = [task for task in tasks if task['process'] == args['merge_process']]
df_merge = pd.DataFrame(tasks_merge)
# remove duplicate columns
dupe_columns = set(df.columns).intersection(df_merge.columns)
dupe_columns.remove(args['merge_key'])
df_merge.drop(columns=dupe_columns, inplace=True)
# append merge columns to data
df = df.merge(df_merge, on=args['merge_key'], how='left', copy=False)
# create visualization
outfile = Visualizer.visualize(df, args)
# encode image file into base64
with open(outfile, 'rb') as f:
image_data = base64.b64encode(f.read()).decode('utf-8')
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(image_data))
except Exception as e:
self.set_status(404)
self.write(message(404, 'Failed to visualize data'))
raise e
class TaskEditHandler(tornado.web.RequestHandler):
async def get(self, id):
db = self.settings['db']
try:
task = await db.task_get(id)
self.set_status(200)
self.set_header('content-type', 'application/json')
self.write(tornado.escape.json_encode(task))
except:
self.set_status(404)
self.write(message(404, 'Failed to get task \"%s\"' % id))
class ModelTrainHandler(tornado.web.RequestHandler):
async def post(self):
db = self.settings['db']
try:
# parse request body
data = tornado.escape.json_decode(self.request.body)
# query task dataset from database
pipeline = data['pipeline'].lower()
tasks = await db.task_query_pipeline(pipeline)
tasks = [task['trace'] for task in tasks]
tasks_process = [task for task in tasks if task['process'] == data['process']]
df = | pd.DataFrame(tasks_process) | pandas.DataFrame |
import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.formats.printing import pprint_thing
from pandas.types.common import is_scalar
from pandas.sparse.array import SparseArray
from pandas.util.validators import validate_bool_kwarg
import pandas.sparse.libsparse as splib
class SparseList(PandasObject):
"""
Data structure for accumulating data to be converted into a
SparseArray. Has similar API to the standard Python list
Parameters
----------
data : scalar or array-like
fill_value : scalar, default NaN
"""
def __init__(self, data=None, fill_value=np.nan):
# see gh-13784
warnings.warn("SparseList is deprecated and will be removed "
"in a future version", FutureWarning, stacklevel=2)
self.fill_value = fill_value
self._chunks = []
if data is not None:
self.append(data)
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('%d out of range' % i)
i += len(self)
passed = 0
j = 0
while i >= passed + len(self._chunks[j]):
passed += len(self._chunks[j])
j += 1
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
raise NotImplementedError
@property
def nchunks(self):
return len(self._chunks)
@property
def is_consolidated(self):
return self.nchunks == 1
def consolidate(self, inplace=True):
"""
Internally consolidate chunks of data
Parameters
----------
inplace : boolean, default True
Modify the calling object instead of constructing a new one
Returns
-------
splist : SparseList
If inplace=False, new object, otherwise reference to existing
object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not inplace:
result = self.copy()
else:
result = self
if result.is_consolidated:
return result
result._consolidate_inplace()
return result
def _consolidate_inplace(self):
new_values = np.concatenate([c.sp_values for c in self._chunks])
new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
new_arr = SparseArray(new_values, sparse_index=new_index,
fill_value=self.fill_value)
self._chunks = [new_arr]
def copy(self):
"""
Return copy of the list
Returns
-------
new_list : SparseList
"""
new_splist = SparseList(fill_value=self.fill_value)
new_splist._chunks = list(self._chunks)
return new_splist
def to_array(self):
"""
Return SparseArray from data stored in the SparseList
Returns
-------
sparr : SparseArray
"""
self.consolidate(inplace=True)
return self._chunks[0]
def append(self, value):
"""
Append element or array-like chunk of data to the SparseList
Parameters
----------
value: scalar or array-like
"""
if | is_scalar(value) | pandas.types.common.is_scalar |
import sys
import os
import pandas as pd
from pii_benchmarking import pii_engine
def is_labeled(entities, start_char, end_char):
for entity in entities:
if entity['start_char'] <= start_char and entity['end_char'] >= end_char:
return entity['entity_label']
return 'NONE'
def main(eval_dir):
eval_files = os.listdir(eval_dir)
for file in eval_files:
if file.endswith('.csv'):
service_names = ['aws', 'gcp', 'presidio']
for service_name in service_names:
print(f"Masking {file} with {service_name}...")
df = pd.read_csv(os.path.join(eval_dir, file), index_col=0)
masker = pii_engine.Masker(service_name)
masked_results = masker.batch_mask(df['Text'].tolist())
assert df.shape[0] == len(masked_results)
masked_col_name = f"Masked_{service_name}"
df[masked_col_name] = | pd.Series([x['masked_input'] for x in masked_results]) | pandas.Series |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
| pd.Series(index=dr, data=temp_air) | pandas.Series |
#!python3
"""Module for working with student records and making Students tab"""
import numpy as np
import pandas as pd
from reports_modules.excel_base import safe_write, write_array
from reports_modules.excel_base import make_excel_indices
DEFAULT_FROM_TARGET = 0.2 # default prediction below target grad rate
MINUS1_CUT = 0.2 # minimum odds required to "toss" a college in minus1 pred
def _get_act_translation(x, lookup_df):
"""Apply function for calculating equivalent SAT for ACT scores.
Lookup table has index of ACT with value of SAT"""
act = x
if np.isreal(act):
if act in lookup_df.index: # it's an ACT value in the table
return lookup_df.loc[act, "SAT"]
return np.nan # default if not in table or not a number
def _get_sat_guess(x):
"""Returns a GPA guess based on regression constants from the
prior year. nan if GPA isn't a number"""
gpa = x
if np.isreal(gpa):
guess = 427.913068576 + 185.298880075 * gpa
return np.round(guess / 10.0) * 10.0
else:
return np.nan
def _pick_sat_for_use(x):
""" Returns the SAT we'll use in practice"""
sat_guess, interim, actual_sat = x
if np.isreal(actual_sat):
return actual_sat
elif np.isreal(interim):
return interim
elif np.isreal(sat_guess):
return sat_guess
else:
return np.nan
def _get_sat_max(x):
"""Returns the max of two values if both are numbers, otherwise
returns the numeric one or nan if neither is numeric"""
sat, act_in_sat = x
if np.isreal(sat):
if np.isreal(act_in_sat):
return max(sat, act_in_sat)
else:
return sat
else:
if np.isreal(act_in_sat):
return act_in_sat
else:
return np.nan
def reduce_roster(campus, cfg, dfs, counselor, advisor, debug, do_nonseminar):
"""Uses campus info and config file to reduce the active student list"""
df = dfs["full_roster"].copy()
if debug:
print("Starting roster of {} students".format(len(df)), flush=True, end="")
if campus == "All":
if "all_campuses" in cfg:
df = df[df["Campus"].isin(cfg["all_campuses"])]
else:
pass # we're using the entire dataframe
elif campus == "PAS": # special code for -1 EFC students
df = df[df["EFC"] == -1]
elif campus.startswith("list"): # special code for a list from a csv
df = df[df.index.isin(dfs["roster_list"].index)]
else:
df = df[df["Campus"] == campus]
if counselor != "All":
df = df.dropna(subset=["Counselor"])
df = df[df["Counselor"].str.contains(counselor)]
if advisor != "All":
df = df.dropna(subset=["Advisor"])
df = df[df["Advisor"].str.contains(advisor)]
if do_nonseminar:
df = df[df["SpEd"].str.endswith("NonS")]
else:
df = df[~df["SpEd"].str.endswith("NonS")]
if debug:
print("..ending at {} students.".format(len(df)), flush=True)
# Two calculated columns need to be added for the application
# analyses
df["local_act_in_sat"] = df["ACT"].apply(
_get_act_translation, args=(dfs["ACTtoSAT"],)
)
df["local_sat_guess"] = df["GPA"].apply(_get_sat_guess)
df["local_sat_used"] = df[["local_sat_guess", "InterimSAT", "SAT"]].apply(
_pick_sat_for_use, axis=1
)
df["local_sat_max"] = df[["local_sat_used", "local_act_in_sat"]].apply(
_get_sat_max, axis=1
)
dfs["roster"] = df
def _get_subgroup(x):
"""Apply function to return one of eight unique subgroups"""
race, gender = x
if race == "B":
subgroup = "Black"
elif race == "H":
subgroup = "Latinx"
elif race == "A" or race == "P":
subgroup = "Asian"
else:
subgroup = "Other"
if gender == "M":
return subgroup + " Male"
elif gender == "F":
return subgroup + " Female"
else:
return subgroup + " Other"
def _get_strategies(x, lookup_df):
"""Apply function for calculating strategies based on gpa and sat using the
lookup table (mirrors Excel equation for looking up strategy"""
gpa, sat = x
if np.isreal(gpa) and np.isreal(sat):
lookup = "{:.1f}:{:.0f}".format(
max(np.floor(gpa * 10) / 10, 1.5), max(sat, 710)
)
return lookup_df["Strategy"].get(lookup, np.nan)
else:
return np.nan
def _get_bucket(x, use_EFC=False):
"""Apply function to create a text field to "bucket" students"""
strat, gpa, efc, race = x
special_strats = [5, 6] # these are the ones split by 3.0 GPA
if | pd.isnull(gpa) | pandas.isnull |
# coding: utf-8
# Copyright (c) <NAME>.
# Distributed under the terms of the MIT License.
import numpy as np
import math
from io import StringIO
import os
import re
import pandas as pd
import math
import numbers
import sys
from mdgo.volume import molecular_volume
from pymatgen.core.structure import Molecule
__author__ = "<NAME>"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Feb 9, 2021"
MM_of_Elements = {'H': 1.00794, 'He': 4.002602, 'Li': 6.941, 'Be': 9.012182,
'B': 10.811, 'C': 12.0107, 'N': 14.0067, 'O': 15.9994,
'F': 18.9984032, 'Ne': 20.1797, 'Na': 22.98976928,
'Mg': 24.305, 'Al': 26.9815386, 'Si': 28.0855, 'P': 30.973762,
'S': 32.065, 'Cl': 35.453, 'Ar': 39.948, 'K': 39.0983,
'Ca': 40.078, 'Sc': 44.955912, 'Ti': 47.867, 'V': 50.9415,
'Cr': 51.9961, 'Mn': 54.938045, 'Fe': 55.845, 'Co': 58.933195,
'Ni': 58.6934, 'Cu': 63.546, 'Zn': 65.409, 'Ga': 69.723,
'Ge': 72.64, 'As': 74.9216, 'Se': 78.96, 'Br': 79.904,
'Kr': 83.798, 'Rb': 85.4678, 'Sr': 87.62, 'Y': 88.90585,
'Zr': 91.224, 'Nb': 92.90638, 'Mo': 95.94, 'Tc': 98.9063,
'Ru': 101.07, 'Rh': 102.9055, 'Pd': 106.42, 'Ag': 107.8682,
'Cd': 112.411, 'In': 114.818, 'Sn': 118.71, 'Sb': 121.760,
'Te': 127.6, 'I': 126.90447, 'Xe': 131.293, 'Cs': 132.9054519,
'Ba': 137.327, 'La': 138.90547, 'Ce': 140.116,
'Pr': 140.90465, 'Nd': 144.242, 'Pm': 146.9151, 'Sm': 150.36,
'Eu': 151.964, 'Gd': 157.25, 'Tb': 158.92535, 'Dy': 162.5,
'Ho': 164.93032, 'Er': 167.259, 'Tm': 168.93421, 'Yb': 173.04,
'Lu': 174.967, 'Hf': 178.49, 'Ta': 180.9479, 'W': 183.84,
'Re': 186.207, 'Os': 190.23, 'Ir': 192.217, 'Pt': 195.084,
'Au': 196.966569, 'Hg': 200.59, 'Tl': 204.3833, 'Pb': 207.2,
'Bi': 208.9804, 'Po': 208.9824, 'At': 209.9871,
'Rn': 222.0176, 'Fr': 223.0197, 'Ra': 226.0254,
'Ac': 227.0278, 'Th': 232.03806, 'Pa': 231.03588,
'U': 238.02891, 'Np': 237.0482, 'Pu': 244.0642,
'Am': 243.0614, 'Cm': 247.0703, 'Bk': 247.0703,
'Cf': 251.0796, 'Es': 252.0829, 'Fm': 257.0951,
'Md': 258.0951, 'No': 259.1009, 'Lr': 262, 'Rf': 267,
'Db': 268, 'Sg': 271, 'Bh': 270, 'Hs': 269, 'Mt': 278,
'Ds': 281, 'Rg': 281, 'Cn': 285, 'Nh': 284, 'Fl': 289,
'Mc': 289, 'Lv': 292, 'Ts': 294, 'Og': 294, 'ZERO': 0}
SECTION_SORTER = {
"atoms": {
"in_kw": None,
"in_header": ["atom", "charge", "sigma", "epsilon"],
"sec_number": None,
"desired_split": None,
"desired_cols": None,
"out_kw": None,
"ff_header": ["epsilon", "sigma"],
"topo_header": ["mol-id", "type", "charge", "x", "y", "z"]
},
"bonds": {
"in_kw": "Stretch",
"in_header": ["atom1", "atom2", "k", "r0"],
"sec_number": 5,
"desired_split": 2,
"desired_cols": 4,
"out_kw": ["Bond Coeffs", "Bonds"],
"ff_header": ["k", "r0"],
"topo_header": ["type", "atom1", "atom2"]
},
"angles": {
"in_kw": "Bending",
"in_header": ["atom1", "atom2", "atom3", "k", "theta0"],
"sec_number": 6,
"desired_split": 1,
"desired_cols": 5,
"out_kw": ["Angle Coeffs", "Angles"],
"ff_header": ["k", "theta0"],
"topo_header": ["type", "atom1", "atom2", "atom3"]
},
"dihedrals": {
"in_kw": "proper Torsion",
"in_header": [
"atom1", "atom2", "atom3", "atom4", "v1", "v2", "v3", "v4"
],
"sec_number": 7,
"desired_split": 1,
"desired_cols": 8,
"out_kw": ["Dihedral Coeffs", "Dihedrals"],
"ff_header": ["v1", "v2", "v3", "v4"],
"topo_header": ["type", "atom1", "atom2", "atom3", "atom4"]
},
"impropers": {
"in_kw": "improper Torsion",
"in_header": ["atom1", "atom2", "atom3", "atom4", "v2"],
"sec_number": 8,
"desired_split": 1,
"desired_cols": 5,
"out_kw": ["Improper Coeffs", "Impropers"],
"ff_header": ["v1", "v2", "v3", "v4"],
"topo_header": ["type", "atom1", "atom2", "atom3", "atom4"]
},
}
BOX = """{0:6f} {1:6f} xlo xhi
{0:6f} {1:6f} ylo yhi
{0:6f} {1:6f} zlo zhi"""
MOLAR_VOLUME = {"lipf6": 18, "litfsi": 100} # empirical value
ALIAS = {
"ethylene carbonate": "ec",
"ec": "ec",
"propylene carbonate": "pc",
"pc": "pc",
"dimethyl carbonate": "dmc",
"dmc": "dmc",
"diethyl carbonate": "dec",
"dec": "dec",
"ethyl methyl carbonate": "emc",
"emc": "emc",
"fluoroethylene carbonate": "fec",
"fec": "fec",
"vinyl carbonate": "vc",
"vinylene carbonate": "vc",
"vc": "vc",
"1,3-dioxolane": "dol",
"dioxolane": "dol",
"dol": "dol",
"ethylene glycol monomethyl ether": "egme",
"2-methoxyethanol": "egme",
"egme": "egme",
"dme": "dme",
"1,2-dimethoxyethane": "dme",
"glyme": "dme",
"monoglyme": "dme",
"2-methoxyethyl ether": "diglyme",
"diglyme": "diglyme",
"triglyme": "triglyme",
"tetraglyme": "tetraglyme",
"acetonitrile": "acn",
"acn": "acn"
}
# From PubChem
MOLAR_MASS = {
"ec": 88.06,
"pc": 102.09,
"dec": 118.13,
"dmc": 90.08,
"emc": 104.05,
"fec": 106.05,
"vc": 86.05,
"dol": 74.08,
"egme": 76.09,
"dme": 90.12,
"diglyme": 134.17,
"triglyme": 178.23,
"tetraglyme": 222.28,
"acn": 41.05
}
# from Sigma-Aldrich
DENSITY = {
"ec": 1.321,
"pc": 1.204,
"dec": 0.975,
"dmc": 1.069,
"emc": 1.006,
"fec": 1.454, # from qm-ht.com
"vc": 1.355,
"dol": 1.06,
"dme": 0.867,
"egme": 0.965,
"diglyme": 0.939,
"triglyme": 0.986,
"tetraglyme": 1.009,
"acn": 0.786
}
def atom_vec(atom1, atom2, dimension):
"""
Calculate the vector of the positions from atom2 to atom1.
"""
vec = [0, 0, 0]
for i in range(3):
diff = atom1.position[i]-atom2.position[i]
if diff > dimension[i]/2:
vec[i] = diff - dimension[i]
elif diff < - dimension[i]/2:
vec[i] = diff + dimension[i]
else:
vec[i] = diff
return np.array(vec)
def position_vec(pos1, pos2, dimension):
"""
Calculate the vector from pos2 to pos2.
"""
vec = [0, 0, 0]
for i in range(3):
diff = pos1[i]-pos2[i]
if diff > dimension[i]/2:
vec[i] = diff - dimension[i]
elif diff < - dimension[i]/2:
vec[i] = diff + dimension[i]
else:
vec[i] = diff
return np.array(vec)
def mass_to_name(df):
"""
Create a dict for mapping atom type id to element from the mass information.
Args:
df (pandas.DataFrame): The masses attribute from LammpsData object
Return:
dict: The element dict.
"""
atoms = {}
for row in df.index:
for item in MM_of_Elements.items():
if math.isclose(df["mass"][row], item[1], abs_tol=0.01):
atoms[row] = item[0]
return atoms
def ff_parser(ff_dir, xyz_dir):
"""
A parser to convert a force field field from Maestro format
to LAMMPS data format.
Args:
ff_dir (str): The path to the Maestro force field file.
xyz_dir (str): The path to the xyz structure file.
Return:
str: The output LAMMPS data string.
"""
with open(xyz_dir, 'r') as f_xyz:
molecule = pd.read_table(
f_xyz,
skiprows=2,
delim_whitespace=True,
names=['atom', 'x', 'y', 'z']
)
coordinates = molecule[["x", "y", "z"]]
lo = coordinates.min().min() - 0.5
hi = coordinates.max().max() + 0.5
with open(ff_dir, 'r') as f:
lines_org = f.read()
lines = lines_org.split("\n\n")
atoms = "\n".join(lines[4].split("\n", 4)[4].split("\n")[:-1])
dfs = dict()
dfs["atoms"] = pd.read_csv(
StringIO(atoms),
names=SECTION_SORTER.get("atoms").get("in_header"),
delim_whitespace=True,
usecols=[0, 4, 5, 6]
)
dfs["atoms"] = pd.concat(
[dfs["atoms"], coordinates],
axis=1
)
dfs["atoms"].index += 1
dfs["atoms"].index.name = "type"
dfs["atoms"] = dfs["atoms"].reset_index()
dfs["atoms"].index += 1
types = dfs["atoms"].copy().reset_index().set_index('atom')['type']
replace_dict = {
"atom1": dict(types),
"atom2": dict(types),
"atom3": dict(types),
"atom4": dict(types)
}
counts = dict()
counts["atoms"] = len(dfs["atoms"].index)
mass_list = list()
for index, row in dfs["atoms"].iterrows():
mass_list.append(
MM_of_Elements.get(re.split(r'(\d+)', row['atom'])[0])
)
mass_df = | pd.DataFrame(mass_list) | pandas.DataFrame |
"""
TODO Pendletoon, doc this whole module
"""
import logging
import pandas as pd
import capture.devconfig as config
from utils.data_handling import update_sheet_column
from utils import globals
from utils.globals import lab_safeget
modlog = logging.getLogger('capture.prepare.interface')
def _get_reagent_header_cells(column: str):
"""Get all cells in the rows that start each reagent for a given colum
:param column: (str) in {A, B, ..., Z, AA, AB, ...}
"""
startrow = lab_safeget(config.lab_vars, globals.get_lab(), 'reagent_interface_amount_startrow')
reagent_interface_step = int(lab_safeget(config.lab_vars, globals.get_lab(), 'maxreagentchemicals')) + 1
num_reagents = lab_safeget(config.lab_vars, globals.get_lab(), 'max_reagents')
stoprow = startrow + reagent_interface_step * num_reagents
result = [column + str(i) for i in range(startrow, stoprow, reagent_interface_step)]
return result
def get_reagent_target_volumes(erdf, deadvolume):
"""Target volumes for reagent preparation as dictionary"""
reagent_target_volumes = {}
for reagent in erdf.columns:
reagent_volume = erdf[reagent].sum() + deadvolume
reagentname = reagent.split(' ')[0]
reagent_target_volumes[reagentname] = reagent_volume
return reagent_target_volumes
def build_nominals_df(rdict,
chemicalnamedf,
target_final_volume,
liquidlist,
maxreagentchemicals,
chemdf):
''' calculate the mass of each chemical return dataframe
TODO: write out nominal molarity to google sheets, see issue#52
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version 2.x interface
'''
nominalsdf = pd.DataFrame()
itemcount = 1
chemicalnamedf.sort_index(inplace=True)
for index, row in chemicalnamedf.iterrows():
reagentname = row['reagentnames']
chemabbr = row['chemabbr']
if row['chemabbr'] == 'Final Volume = ':
formulavollist = []
formulavol = 'null'
itemcount = 1
finalvolindex = index
pass
else:
# stock solutions should be summed for final total volume
if chemabbr in liquidlist or chemabbr == 'FAH': # todo dejank
formulavol = (target_final_volume[reagentname]/1000).round(2)
formulavollist.append(formulavol)
nominalsdf.loc[index, "nominal_amount"] = formulavol
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
elif chemabbr == 'null':
nominalsdf.loc[index, "nominal_amount"] = 'null'
nominalsdf.loc[index, "Unit"] = 'null'
nominalsdf.loc[index, "actualsnull"] = 'null'
itemcount+=1
pass
else:
#calculate reagent amounts from formula
reagentnum = str(reagentname.split('t')[1])
nominalamount = (target_final_volume[reagentname]/1000/1000 * \
rdict[reagentnum].concs['conc_item%s' %(itemcount)] * \
float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])
).round(2)
nominalsdf.loc[index, "nominal_amount"] = nominalamount
nominalsdf.loc[index, "Unit"] = 'gram'
itemcount+=1
if itemcount == (maxreagentchemicals+1):
if len(formulavollist) > 0:
nominalsdf.loc[finalvolindex, "nominal_amount"] = sum(formulavollist)
nominalsdf.loc[finalvolindex, "Unit"] = 'milliliter'
else:
nominalsdf.loc[finalvolindex, "nominal_amount"] = formulavol
nominalsdf.loc[finalvolindex, "Unit"] = 'null'
nominalsdf.loc[finalvolindex, "actualsnull"] = 'null'
modlog.info((reagentname, "formula calculation complete"))
nominalsdf.sort_index(inplace=True)
return nominalsdf
def build_nominals_v1(rdict,
chemicalnamedf,
target_final_volume_dict,
liquidlist,
maxreagentchemicals,
chemdf):
''' calculate the mass of each chemical return dataframe
Uses model 1 of the density calculation to get a better approximation
for the contribution of solids to the final volume
TODO: write out nominal molarity to google sheets, see issue#52
TODO: ensure column integrity of read in chemical dataframe
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version 2.x interface
'''
nominalsdf = pd.DataFrame()
itemcount = 1
chemicalnamedf.sort_index(inplace=True)
reagentname = []
for index, row in chemicalnamedf.iterrows():
reagent_name_updater = row['reagentnames']
if reagentname != reagent_name_updater:
reagentname = row['reagentnames']
reagentnum = str(reagentname.split('t')[1])
total_remaining_volume = target_final_volume_dict[reagentname] / 1000 / 1000
target_final_volume = target_final_volume_dict[reagentname] / 1000 / 1000
chemabbr = row['chemabbr']
# First iteration should always lead with this string (formatting)
if row['chemabbr'] == 'Final Volume = ':
formulavollist = []
formulavol = 'null'
itemcount = 1
finalvolindex = index
else:
# stock solutions should be summed for final total volume
# Returns nulls to the dataframe where no chemicals / information is expected
if chemabbr == 'null':
nominalsdf.loc[index, "nominal_amount"] = 'null'
nominalsdf.loc[index, "Unit"] = 'null'
nominalsdf.loc[index, "actualsnull"] = 'null'
itemcount+=1
pass
else:
# If the chemical being considered is the final the remaining volume is assigned
if rdict[reagentnum].chemicals[-1] == chemabbr:
nominalsdf.loc[index, "nominal_amount"] = (total_remaining_volume * 1000).round(2)
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
elif chemabbr in liquidlist or chemabbr == 'FAH': # todo dejank
myvariable = rdict[reagentnum].concs['conc_item%s' %(itemcount)]
needed_mol = target_final_volume * rdict[reagentnum].concs['conc_item%s' %(itemcount)]
chemical_volume = needed_mol * float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])\
/ float(chemdf.loc["%s" %chemabbr, "Density (g/mL)"])
total_remaining_volume = total_remaining_volume - chemical_volume / 1000
nominalsdf.loc[index, "nominal_amount"] = chemical_volume.round(2)
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
else:
myvariable = rdict[reagentnum].concs['conc_item%s' %(itemcount)]
needed_mol = target_final_volume * (rdict[reagentnum].concs['conc_item%s' %(itemcount)])
chemical_mass_g = needed_mol * float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])
chemical_volume = needed_mol * float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])\
/ float(chemdf.loc["%s" %chemabbr, "Density (g/mL)"])
total_remaining_volume = total_remaining_volume - chemical_volume / 1000
nominalsdf.loc[index, "nominal_amount"] = chemical_mass_g.round(2)
nominalsdf.loc[index, "Unit"] = 'gram'
itemcount+=1
if itemcount == (maxreagentchemicals+1):
if total_remaining_volume == target_final_volume:
nominalsdf.loc[finalvolindex, "nominal_amount"] = 'null'
nominalsdf.loc[finalvolindex, "Unit"] = 'null'
nominalsdf.loc[finalvolindex, "actualsnull"] = 'null'
else:
nominalsdf.loc[finalvolindex, "nominal_amount"] = (target_final_volume * 1000).round(2)
nominalsdf.loc[finalvolindex, "Unit"] = 'milliliter'
modlog.info((reagentname, "formula calculation complete"))
nominalsdf.sort_index(inplace=True)
return nominalsdf
def build_chemical_names_df(rdict, maxreagentchemicals):
"""generates a dataframe of chemical names for reagent interface
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version:: 3.0 interface
"""
chemicalnamelist = []
reagentnamelist = []
holdreagentnum = 1
for reagentnum in sorted(rdict.keys()):
#ensure any reagents not used have placeholders
while int(reagentnum) > holdreagentnum:
chemicalnamelist.append('Final Volume = ')
chemicalnamelist.extend(['null'] * maxreagentchemicals)
maxinterfaceslots = maxreagentchemicals + 1
reagentnamelist.extend(['Reagent%s' %holdreagentnum] * maxinterfaceslots)
holdreagentnum += 1
else:
count = 0
holdreagentnum = int(reagentnum)+1
chemicalnamelist.append('Final Volume = ')
reagentnamelist.append('Reagent%s' %reagentnum)
for chemical in rdict[reagentnum].chemicals:
chemicalnamelist.append(chemical)
reagentnamelist.append('Reagent%s' %reagentnum)
count += 1
while count < maxreagentchemicals:
chemicalnamelist.append('null')
reagentnamelist.append('Reagent%s' %reagentnum)
count += 1
chemicalnamedf = pd.DataFrame(chemicalnamelist, columns=['chemabbr'])
reagentnamedf = | pd.DataFrame(reagentnamelist, columns=['reagentnames']) | pandas.DataFrame |
import pandas as pd
import pytest
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import KFold, StratifiedKFold
from feature_engine.selection import SmartCorrelatedSelection
@pytest.fixture(scope="module")
def df_single():
# create array with 4 correlated features and 2 independent ones
X, y = make_classification(
n_samples=1000,
n_features=6,
n_redundant=2,
n_clusters_per_class=1,
weights=[0.50],
class_sep=2,
random_state=1,
)
# trasform array into pandas df
colnames = ["var_" + str(i) for i in range(6)]
X = pd.DataFrame(X, columns=colnames)
return X, y
def test_model_performance_single_corr_group(df_single):
X, y = df_single
transformer = SmartCorrelatedSelection(
variables=None,
method="pearson",
threshold=0.8,
missing_values="raise",
selection_method="model_performance",
estimator=RandomForestClassifier(n_estimators=10, random_state=1),
scoring="roc_auc",
cv=3,
)
Xt = transformer.fit_transform(X, y)
# expected result
df = X[["var_0", "var_2", "var_3", "var_4", "var_5"]].copy()
# test init params
assert transformer.method == "pearson"
assert transformer.threshold == 0.8
assert transformer.variables is None
assert transformer.missing_values == "raise"
assert transformer.selection_method == "model_performance"
assert transformer.scoring == "roc_auc"
assert transformer.cv == 3
# test fit attrs
assert transformer.variables_ == [
"var_0",
"var_1",
"var_2",
"var_3",
"var_4",
"var_5",
]
assert transformer.correlated_feature_sets_ == [{"var_1", "var_2"}]
assert transformer.features_to_drop_ == ["var_1"]
# test transform output
pd.testing.assert_frame_equal(Xt, df)
def test_model_performance_2_correlated_groups(df_test):
X, y = df_test
transformer = SmartCorrelatedSelection(
variables=None,
method="pearson",
threshold=0.8,
missing_values="raise",
selection_method="model_performance",
estimator=RandomForestClassifier(n_estimators=10, random_state=1),
scoring="roc_auc",
cv=3,
)
Xt = transformer.fit_transform(X, y)
# expected result
df = X[
["var_0", "var_1", "var_2", "var_3", "var_5", "var_7", "var_10", "var_11"]
].copy()
# test fit attrs
assert transformer.correlated_feature_sets_ == [
{"var_0", "var_8"},
{"var_4", "var_6", "var_7", "var_9"},
]
assert transformer.features_to_drop_ == [
"var_4",
"var_6",
"var_8",
"var_9",
]
# test transform output
pd.testing.assert_frame_equal(Xt, df)
def test_error_if_select_model_performance_and_y_is_none(df_single):
X, y = df_single
transformer = SmartCorrelatedSelection(
variables=None,
method="pearson",
threshold=0.8,
missing_values="raise",
selection_method="model_performance",
estimator=RandomForestClassifier(n_estimators=10, random_state=1),
scoring="roc_auc",
cv=3,
)
with pytest.raises(ValueError):
transformer.fit(X)
def test_variance_2_correlated_groups(df_test):
X, y = df_test
transformer = SmartCorrelatedSelection(
variables=None,
method="pearson",
threshold=0.8,
missing_values="raise",
selection_method="variance",
estimator=None,
)
Xt = transformer.fit_transform(X, y)
# expected result
df = X[
["var_1", "var_2", "var_3", "var_5", "var_7", "var_8", "var_10", "var_11"]
].copy()
assert transformer.features_to_drop_ == [
"var_0",
"var_4",
"var_6",
"var_9",
]
# test transform output
pd.testing.assert_frame_equal(Xt, df)
def test_cardinality_2_correlated_groups(df_test):
X, y = df_test
X[["var_0", "var_6", "var_7", "var_9"]] = X[
["var_0", "var_6", "var_7", "var_9"]
].astype(int)
transformer = SmartCorrelatedSelection(
variables=None,
method="pearson",
threshold=0.8,
missing_values="raise",
selection_method="cardinality",
estimator=None,
)
Xt = transformer.fit_transform(X, y)
# expected result
df = X[
["var_1", "var_2", "var_3", "var_4", "var_5", "var_8", "var_10", "var_11"]
].copy()
assert transformer.features_to_drop_ == [
"var_0",
"var_6",
"var_7",
"var_9",
]
# test transform output
pd.testing.assert_frame_equal(Xt, df)
def test_automatic_variable_selection(df_test):
X, y = df_test
X[["var_0", "var_6", "var_7", "var_9"]] = X[
["var_0", "var_6", "var_7", "var_9"]
].astype(int)
# add 2 additional categorical variables, these should not be evaluated by
# the selector
X["cat_1"] = "cat1"
X["cat_2"] = "cat2"
transformer = SmartCorrelatedSelection(
variables=None,
method="pearson",
threshold=0.8,
missing_values="raise",
selection_method="cardinality",
estimator=None,
)
Xt = transformer.fit_transform(X, y)
# expected result
df = X[
[
"var_1",
"var_2",
"var_3",
"var_4",
"var_5",
"var_8",
"var_10",
"var_11",
"cat_1",
"cat_2",
]
].copy()
assert transformer.features_to_drop_ == [
"var_0",
"var_6",
"var_7",
"var_9",
]
# test transform output
| pd.testing.assert_frame_equal(Xt, df) | pandas.testing.assert_frame_equal |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
from datetime import datetime
from typing import cast, List
from unittest import TestCase
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import pytz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from kats.compat.pandas import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from kats.consts import (
DEFAULT_TIME_NAME,
DEFAULT_VALUE_NAME,
TimeSeriesData,
TSIterator,
)
def load_data(file_name: str) -> pd.DataFrame:
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
# pyre-fixme[6]: For 1st param expected `bytes` but got `Optional[bytes]`.
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
TIME_COL_NAME = "ds"
VALUE_COL_NAME = "y"
MULTIVAR_VALUE_DF_COLS: List[str] = [VALUE_COL_NAME, VALUE_COL_NAME + "_1"]
EMPTY_DF = pd.DataFrame()
EMPTY_TIME_SERIES = | pd.Series([], name=DEFAULT_TIME_NAME, dtype=float) | pandas.Series |
from abc import ABC
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.statespace.sarimax import SARIMAX
# Construct the model
class StateSpaceModel(sm.tsa.statespace.MLEModel, ABC):
def __init__(self, endog, exog, factors_x, factors_y):
# Initialize the state space model
endog_aug = pd.concat([exog, endog.to_frame()], axis=1)
k_states = k_posdef = factors_x + factors_y
super(StateSpaceModel, self).__init__(endog_aug, k_states=k_states, k_posdef=k_posdef,
initialization='approximate_diffuse')
self._covariates = endog_aug.columns
self._factors_x = factors_x
self._factors_y = factors_y
self._param_cov_state_f_idx = self._params_cov_state_z_idx = self._params_cov_obs_idx = None
self._params_phi_1_idx = self._params_phi_23_idx = self._params_phi_23_idx = None
self._params_a_1_idx = self._params_a_2_idx = None
# Setup the fixed components of the state space representation
transition_f = np.hstack((np.eye(factors_x), np.zeros((factors_x, factors_y))))
transition_z = np.hstack((np.zeros((factors_y, factors_x)), np.eye(factors_y)))
transition = np.vstack((transition_f, transition_z))
dims_x = endog_aug.shape[1] - 1
dims_y = 1
self._dims_x = dims_x
self._dims_y = dims_y
# Assume [x, y]'
design_x = np.hstack((np.ones((dims_x, factors_x)), np.zeros((dims_x, factors_y))))
design_y = np.hstack((np.zeros((dims_y, factors_x)), np.ones((dims_y, factors_y))))
design = np.vstack((design_x, design_y))
self.ssm['design'] = design.reshape((dims_x + 1, k_states, 1))
self.ssm['transition'] = transition.reshape((k_states, k_states, 1))
self.ssm['selection'] = np.eye(k_states)
self.ssm['obs_intercept'] = np.zeros(dims_x + dims_y).reshape(-1, 1)
# Cache some indices
self._state_cov_idx = np.diag_indices(k_posdef)
self._obs_cov_idx = np.diag_indices(dims_x + dims_y)
# grid_transition_f = (np.repeat(np.arange(factors_x), factors_x),
# np.tile(np.arange(factors_x), factors_x))
grid_transition_f = np.diag_indices(factors_x)
grid_transition_z = (np.repeat(np.arange(factors_x, k_states), k_states),
np.tile(np.arange(k_states), factors_y))
self._transition_f_idx = grid_transition_f
self._transition_z_idx = grid_transition_z
grid_design_x = (np.repeat(np.arange(dims_x), factors_x),
np.tile(np.arange(factors_x), dims_x))
grid_design_y = (np.repeat(np.arange(dims_x, dims_x + dims_y), factors_y),
np.tile(np.arange(factors_x, k_states), dims_y))
self._design_x_idx = grid_design_x
self._design_y_idx = grid_design_y
self.init_param_indx()
@staticmethod
def get_position(idx, i, row_offset=0, col_offset=0):
return idx[0][i]-row_offset, idx[1][i]-col_offset
def init_param_indx(self):
c = 0
params_cov_obs = ['sigma2.%s' % i for i in self._covariates]
self._params_cov_obs_idx = (c, c + len(params_cov_obs))
c += len(params_cov_obs)
params_cov_state_f = ['sigma2.f.%i' % i for i in range(self._factors_x)]
self._param_cov_state_f_idx = (c, c + len(params_cov_state_f))
c += len(params_cov_state_f)
params_cov_state_z = ['sigma2.z.%i' % i for i in range(self._factors_y)]
self._params_cov_state_z_idx = (c, c + len(params_cov_state_z))
c += len(params_cov_state_z)
params_cov = params_cov_state_f + params_cov_state_z + params_cov_obs
params_phi_1 = ['phi.1.%i%i' % self.get_position(self._transition_f_idx, i) for i in range(len(self._transition_f_idx[0]))]
self._params_phi_1_idx = (c, c+len(params_phi_1))
c += len(params_phi_1)
params_phi_23 = ['phi.23.%i%i' % self.get_position(self._transition_z_idx, i,
row_offset=self._factors_x) for i in range(len(self._transition_z_idx[0]))]
self._params_phi_23_idx = (c, c + len(params_phi_23))
c += len(params_phi_23)
params_phi = params_phi_1 + params_phi_23
params_a_1 = ['a.1.%i%i' % self.get_position(self._design_x_idx, i) for i in range(len(self._design_x_idx[0]))]
self._params_a_1_idx = (c, c+len(params_a_1))
c += len(params_a_1)
params_a_2 = ['a.2.%i%i' % self.get_position(self._design_y_idx, i,
row_offset=self._dims_x,
col_offset=self._factors_x) for i in range(len(self._design_y_idx[0]))]
self._params_a_2_idx = (c, c+len(params_a_2))
c += len(params_a_2)
params_a = params_a_1 + params_a_2
return params_cov + params_phi + params_a
@property
def param_names(self):
return self.init_param_indx()
# Describe how parameters enter the model
def update(self, params, *args, **kwargs):
params = super(StateSpaceModel, self).update(params, *args, **kwargs)
# Observation covariance
self.ssm[('obs_cov',) + self._obs_cov_idx] = params[self._params_cov_obs_idx[0]:self._params_cov_obs_idx[1]]
# State covariance
self.ssm[('state_cov',) + self._state_cov_idx] = params[self._param_cov_state_f_idx[0]:self._params_cov_state_z_idx[1]]
# Transition matrix
self.ssm[('transition',) + self._transition_f_idx] = params[self._params_phi_1_idx[0]:self._params_phi_1_idx[1]]
self.ssm[('transition',) + self._transition_z_idx] = params[self._params_phi_23_idx[0]:self._params_phi_23_idx[1]]
# Design matrix
self.ssm[('design',) + self._design_x_idx] = params[self._params_a_1_idx[0]:self._params_a_1_idx[1]]
self.ssm[('design',) + self._design_y_idx] = params[self._params_a_2_idx[0]:self._params_a_2_idx[1]]
# Specify start parameters and parameter names
@property
def start_params(self):
design, obs_cov, state_cov, transition = self.generate_start_matrices()
params_state_cov = state_cov[self._state_cov_idx]
params_obs_cov = obs_cov[self._obs_cov_idx]
params_phi = np.concatenate((transition[self._transition_f_idx],
transition[self._transition_z_idx]), axis=0)
params_a = np.concatenate((design[self._design_x_idx],
design[self._design_y_idx]), axis=0)
return np.concatenate((params_obs_cov, params_state_cov, params_phi, params_a))
def generate_start_matrices(self):
_exog = pd.DataFrame(self.endog[:, :-1], columns=self._covariates[:-1]).interpolate().fillna(0)
_endog = pd.Series(self.endog[:, -1], name=self._covariates[-1]).interpolate().fillna(0)
cov = _exog.cov()
w, v = np.linalg.eig(cov)
factors = pd.DataFrame(np.dot(_exog, v[:, :self._factors_x]), index=_exog.index)
_model = SARIMAX(endog=_endog, exog=factors, order=(self._factors_y, 0, 0))
res = _model.fit(disp=False, maxiter=100)
params_arx = res.params
phi1 = np.eye(self._factors_x)
factors_coeff = params_arx.values[:self._factors_x].reshape(1, -1)
ar_coeff = params_arx.values[self._factors_x:-1].reshape(1, -1)
phi2 = np.vstack([factors_coeff, np.zeros((self._factors_y - 1, self._factors_x))])
phi3 = np.vstack([ar_coeff, np.eye(self._factors_y)[:-1, :]])
transition = np.vstack([np.hstack([phi1, np.zeros((self._factors_x, self._factors_y))]),
np.hstack([phi2, phi3])])
a1 = v.T[:, :self._factors_x]
a2 = np.eye(self._dims_y, self._factors_y)
design_x = np.hstack([a1, np.zeros((self._dims_x, self._factors_y))])
design_y = np.hstack([np.zeros((self._dims_y, self._factors_x)), a2])
design = np.vstack([design_x, design_y])
state_cov = np.eye(self.k_states)
obs_cov = np.eye(len(self._covariates))
obs_cov[-1, -1] = params_arx.values[-1]
return design, obs_cov, state_cov, transition
def transform_params(self, unconstrained):
constrained = unconstrained.copy()
for i1, i2 in [self._param_cov_state_f_idx, self._params_cov_state_z_idx, self._params_cov_obs_idx]:
constrained[i1:i2] = unconstrained[i1:i2] ** 2
return constrained
def untransform_params(self, constrained):
unconstrained = constrained.copy()
for i1, i2 in [self._param_cov_state_f_idx, self._params_cov_state_z_idx, self._params_cov_obs_idx]:
unconstrained[i1:i2] = constrained[i1:i2] ** 0.5
return unconstrained
if __name__ == "__main__":
import os
from src.d01_data.dengue_data_api import DengueDataApi
from src.d04_modeling.abstract_sm import AbstractSM
os.chdir('../')
dda = DengueDataApi(interpolate=False)
x1, x2, y1, y2 = dda.split_data(random=False)
factors_x = 3
factors_y = 3
abstract_model = AbstractSM(x_train=x1, y_train=y1, bias=False)
city = 'sj'
endog, exog = dda.format_data(x1.loc[city].copy(), y1.loc[city].copy(), interpolate=False)
endog_mean = endog.mean(axis=0)
endog_std = endog.std(axis=0)
exog_mean = exog.mean(axis=0)
exog_std = exog.std(axis=0)
endog = (endog - endog_mean) / endog_std
exog = (exog - exog_mean) / exog_std
print("-------------- DFM Model --------------")
model_dfmq = sm.tsa.DynamicFactorMQ(exog,
factors=factors_x,
factor_orders=1,
idiosyncratic_ar1=False)
results_dfmq = model_dfmq.fit(method='em')
state_names = pd.Index(['f%i' % i for i in range(factors_x)])
transition_df = pd.DataFrame(model_dfmq.ssm['transition'], index=state_names, columns=state_names)
print(transition_df.round(4).to_string())
design_df = pd.DataFrame(model_dfmq.ssm['design'],
index=exog.columns,
columns=state_names)
print(design_df.round(4).to_string())
print("-------------- SSM Model --------------")
model = StateSpaceModel(endog=endog, exog=exog, factors_x=factors_x, factors_y=factors_y)
model.update(model.start_params)
state_names = pd.Index(['f%i' % i for i in range(factors_x)]).append(pd.Index(['z%i' % i for i in range(factors_y)]))
transition_df = | pd.DataFrame(model.ssm['transition'], index=state_names, columns=state_names) | pandas.DataFrame |
import pandas as pd # Version 1.1.1
# %%
region = ['Vermont', 'New Hampshire', 'Maine', 'Rhode Island', 'Massachusetts',
'Connecticut', 'New Jersey', 'Pennsylvania', 'Ohio', 'Maryland',
'District of Columbia', 'Delaware', 'Virginia', 'West Virginia',
'New York']
undetected_factor = 2.4
green = 1
yellow = 2
red = 3
# %% Defines the active case calculation
prop = [1, 0.94594234, 0.8585381, 0.76322904, 0.66938185,
0.58139261, 0.50124929, 0.42963663, 0.36651186, 0.31143254,
0.26375154, 0.22273485, 0.18763259, 0.15772068, 0.1323241,
0.11082822, 0.09268291, 0.077402, 0.06456005, 0.0537877,
0.04476636, 0.03722264, 0.03092299, 0.02566868, 0.02129114,
0.0176478, 0.01461838, 0.01210161, 0.01001242, 0.00827947]
prop = prop[::-1]
def active_cases(x):
tmp = []
for i, j in zip(prop, x):
tmp.append(i * j)
return sum(tmp) * undetected_factor
def status_num(x):
if x <= 400:
return green
elif x <= 800:
return yellow
else:
return red
# %% Takes population values from JHU, which in turn come from US Census
# estimates for 2019
pops_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/'\
'csse_covid_19_data/csse_covid_19_time_series/'\
'time_series_covid19_deaths_US.csv'
county_pops = pd.read_csv(pops_url)
county_pops = county_pops[county_pops.Province_State.isin(region)]
county_pops = county_pops[['FIPS', 'Population']]
county_pops.FIPS = county_pops.FIPS.astype('str')
county_pops.FIPS = county_pops.FIPS.str[:-2]
county_pops.loc[county_pops.FIPS.str.len() == 4, 'FIPS'] =\
'0' + county_pops.loc[county_pops.FIPS.str.len() == 4, 'FIPS']
# %% Takes county level infection data from JHU
hopkinsurl = 'https://raw.githubusercontent.com/CSSEGISandData/'\
'COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'\
'time_series_covid19_confirmed_US.csv'
hopkins = | pd.io.parsers.read_csv(hopkinsurl, dtype={'FIPS': 'str'}) | pandas.io.parsers.read_csv |
import pandas as pd
#importing all the data from CSV files
master_df = pd.read_csv('People.csv', usecols=['playerID', 'nameFirst', 'nameLast', 'bats', 'throws', 'debut', 'finalGame'])
fielding_df = pd.read_csv('Fielding.csv',usecols=['playerID','yearID','stint','teamID','lgID','POS','G','GS','InnOuts','PO','A','E','DP'])
batting_df = pd.read_csv('Batting.csv')
awards_df = pd.read_csv('AwardsPlayers.csv', usecols=['playerID','awardID','yearID'])
allstar_df = pd.read_csv('AllstarFull.csv', usecols=['playerID','yearID'])
hof_df = pd.read_csv('HallOfFame.csv',usecols=['playerID','yearid','votedBy','needed_note','inducted','category'])
appearances_df = pd.read_csv('Appearances.csv')
##################################DATA CLEANING AND PREPROCESSING##################################
#start w/ batting_df organizing
player_stats = {}
years_played={}
# Create dictionaries for player stats and years played from `batting_df`
for i, row in batting_df.iterrows():
playerID = row['playerID']
if playerID in player_stats:
player_stats[playerID]['G'] = player_stats[playerID]['G'] + row['G']
player_stats[playerID]['AB'] = player_stats[playerID]['AB'] + row['AB']
player_stats[playerID]['R'] = player_stats[playerID]['R'] + row['R']
player_stats[playerID]['H'] = player_stats[playerID]['H'] + row['H']
player_stats[playerID]['2B'] = player_stats[playerID]['2B'] + row['2B']
player_stats[playerID]['3B'] = player_stats[playerID]['3B'] + row['3B']
player_stats[playerID]['HR'] = player_stats[playerID]['HR'] + row['HR']
player_stats[playerID]['RBI'] = player_stats[playerID]['RBI'] + row['RBI']
player_stats[playerID]['SB'] = player_stats[playerID]['SB'] + row['SB']
player_stats[playerID]['BB'] = player_stats[playerID]['BB'] + row['BB']
player_stats[playerID]['SO'] = player_stats[playerID]['SO'] + row['SO']
player_stats[playerID]['IBB'] = player_stats[playerID]['IBB'] + row['IBB']
player_stats[playerID]['HBP'] = player_stats[playerID]['HBP'] + row['HBP']
player_stats[playerID]['SH'] = player_stats[playerID]['SH'] + row['SH']
player_stats[playerID]['SF'] = player_stats[playerID]['SF'] + row['SF']
years_played[playerID].append(row['yearID'])
else:
player_stats[playerID] = {}
player_stats[playerID]['G'] = row['G']
player_stats[playerID]['AB'] = row['AB']
player_stats[playerID]['R'] = row['R']
player_stats[playerID]['H'] = row['H']
player_stats[playerID]['2B'] = row['2B']
player_stats[playerID]['3B'] = row['3B']
player_stats[playerID]['HR'] = row['HR']
player_stats[playerID]['RBI'] = row['RBI']
player_stats[playerID]['SB'] = row['SB']
player_stats[playerID]['BB'] = row['BB']
player_stats[playerID]['SO'] = row['SO']
player_stats[playerID]['IBB'] = row['IBB']
player_stats[playerID]['HBP'] = row['HBP']
player_stats[playerID]['SH'] = row['SH']
player_stats[playerID]['SF'] = row['SF']
years_played[playerID] = []
years_played[playerID].append(row['yearID'])
# Iterate through `years_played` and add the number of years played to `player_stats`
for k, v in years_played.items():
player_stats[k]['Years_Played'] = len(list(set(v)))
#time for fielding_df organizing
fielder_list=[]
for i, row in fielding_df.iterrows():
playerID = row['playerID']
Gf = row['G']
GSf = row['GS']
POf = row['PO']
Af = row['A']
Ef = row['E']
DPf = row['DP']
if playerID in player_stats and playerID in fielder_list:
player_stats[playerID]['Gf'] = player_stats[playerID]['Gf'] + Gf
player_stats[playerID]['GSf'] = player_stats[playerID]['GSf'] + GSf
player_stats[playerID]['POf'] = player_stats[playerID]['POf'] + POf
player_stats[playerID]['Af'] = player_stats[playerID]['Af'] + Af
player_stats[playerID]['Ef'] = player_stats[playerID]['Ef'] + Ef
player_stats[playerID]['DPf'] = player_stats[playerID]['DPf'] + DPf
else:
fielder_list.append(playerID)
player_stats[playerID]['Gf'] = Gf
player_stats[playerID]['GSf'] = GSf
player_stats[playerID]['POf'] = POf
player_stats[playerID]['Af'] = Af
player_stats[playerID]['Ef'] = Ef
player_stats[playerID]['DPf'] = DPf
#time for awards_df organizing oooof
#Dataframes for each award
mvp = awards_df[awards_df['awardID'] == 'Most Valuable Player']
roy = awards_df[awards_df['awardID'] == 'Rookie of the Year']
gg = awards_df[awards_df['awardID'] == 'Gold Glove']
ss = awards_df[awards_df['awardID'] == 'Silver Slugger']
ws_mvp = awards_df[awards_df['awardID'] == 'World Series MVP']
# Include each DataFrame in `awards_list`
awards_list = [mvp, roy, gg, ss, ws_mvp]
# Initialize lists for each of the above DataFrames
mvp_list = []
roy_list = []
gg_list = []
ss_list = []
ws_mvp_list = []
# Include each of the above lists in `lists`
lists = [mvp_list,roy_list,gg_list,ss_list,ws_mvp_list] #lists[index] is yet another list
# Add a count for each award for each player in `player_stats`
for index, v in enumerate(awards_list):
for i, row in v.iterrows():
playerID = row['playerID']
award = row['awardID']
if playerID in player_stats and playerID in lists[index]: # if the player's id is both in player stats and lists[index]
player_stats[playerID][award] += 1
else:
lists[index].append(playerID)
player_stats[playerID][award] = 1
#organize allstar_df
allstar_list = []
for i,row in allstar_df.iterrows():
playerID = row['playerID'] # put into the thing
if playerID in player_stats and playerID in allstar_list: # if the player is already in the list, add an award
player_stats[playerID]['AS_games'] += 1
else:
allstar_list.append(playerID) #if not already in the list, add to list and start all star games counter
player_stats[playerID]['AS_games'] = 1
#organize hof_df
hof_df = hof_df[(hof_df['inducted'] == 'Y') & (hof_df['category'] == 'Player')] # filter `hof_df` to include only instances where a player was inducted into the Hall of Fame
for i, row in hof_df.iterrows(): # Indicate which players in `player_stats` were inducted into the Hall of Fame
playerID = row['playerID']
if playerID in player_stats:
player_stats[playerID]['HoF'] = 1
player_stats[playerID]['votedBy'] = row['votedBy']
#convert player_stats into a dataframe
stats_df = pd.DataFrame.from_dict(player_stats, orient='index')
# Add a column for playerID from the `stats_df` index
stats_df['playerID'] = stats_df.index
#join stats_df and master_df
master_df = master_df.join(stats_df, on = 'playerID', how ='inner', rsuffix = 'mstr')
#organize appearances.df
#jk there's no organizing here
#need to collect info about what time period the player played in
pos_dict = {}
# Iterate through `appearances_df`
# Add a count for the number of appearances for each player at each position
# Also add a count for the number of games played for each player in each era.
for i, row in appearances_df.iterrows():
ID = row['playerID']
year = row['yearID']
if ID in pos_dict:
pos_dict[ID]['G_all'] = pos_dict[ID]['G_all'] + row['G_all']
pos_dict[ID]['G_p'] = pos_dict[ID]['G_p'] + row['G_p']
pos_dict[ID]['G_c'] = pos_dict[ID]['G_c'] + row['G_c']
pos_dict[ID]['G_1b'] = pos_dict[ID]['G_1b'] + row['G_1b']
pos_dict[ID]['G_2b'] = pos_dict[ID]['G_2b'] + row['G_2b']
pos_dict[ID]['G_3b'] = pos_dict[ID]['G_3b'] + row['G_3b']
pos_dict[ID]['G_ss'] = pos_dict[ID]['G_ss'] + row['G_ss']
pos_dict[ID]['G_lf'] = pos_dict[ID]['G_lf'] + row['G_lf']
pos_dict[ID]['G_cf'] = pos_dict[ID]['G_cf'] + row['G_cf']
pos_dict[ID]['G_rf'] = pos_dict[ID]['G_rf'] + row['G_rf']
pos_dict[ID]['G_of'] = pos_dict[ID]['G_of'] + row['G_of']
pos_dict[ID]['G_dh'] = pos_dict[ID]['G_dh'] + row['G_dh']
if year < 1920:
pos_dict[ID]['pre1920'] = pos_dict[ID]['pre1920'] + row['G_all']
elif year >= 1920 and year <= 1941:
pos_dict[ID]['1920-41'] = pos_dict[ID]['1920-41'] + row['G_all']
elif year >= 1942 and year <= 1945:
pos_dict[ID]['1942-45'] = pos_dict[ID]['1942-45'] + row['G_all']
elif year >= 1946 and year <= 1962:
pos_dict[ID]['1946-62'] = pos_dict[ID]['1946-62'] + row['G_all']
elif year >= 1963 and year <= 1976:
pos_dict[ID]['1963-76'] = pos_dict[ID]['1963-76'] + row['G_all']
elif year >= 1977 and year <= 1992:
pos_dict[ID]['1977-92'] = pos_dict[ID]['1977-92'] + row['G_all']
elif year >= 1993 and year <= 2009:
pos_dict[ID]['1993-2009'] = pos_dict[ID]['1993-2009'] + row['G_all']
elif year > 2009:
pos_dict[ID]['post2009'] = pos_dict[ID]['post2009'] + row['G_all']
else:
pos_dict[ID] = {}
pos_dict[ID]['G_all'] = row['G_all']
pos_dict[ID]['G_p'] = row['G_p']
pos_dict[ID]['G_c'] = row['G_c']
pos_dict[ID]['G_1b'] = row['G_1b']
pos_dict[ID]['G_2b'] = row['G_2b']
pos_dict[ID]['G_3b'] = row['G_3b']
pos_dict[ID]['G_ss'] = row['G_ss']
pos_dict[ID]['G_lf'] = row['G_lf']
pos_dict[ID]['G_cf'] = row['G_cf']
pos_dict[ID]['G_rf'] = row['G_rf']
pos_dict[ID]['G_of'] = row['G_of']
pos_dict[ID]['G_dh'] = row['G_dh']
pos_dict[ID]['pre1920'] = 0
pos_dict[ID]['1920-41'] = 0
pos_dict[ID]['1942-45'] = 0
pos_dict[ID]['1946-62'] = 0
pos_dict[ID]['1963-76'] = 0
pos_dict[ID]['1977-92'] = 0
pos_dict[ID]['1993-2009'] = 0
pos_dict[ID]['post2009'] = 0
if year < 1920:
pos_dict[ID]['pre1920'] = row['G_all']
elif year >= 1920 and year <= 1941:
pos_dict[ID]['1920-41'] = row['G_all']
elif year >= 1942 and year <= 1945:
pos_dict[ID]['1942-45'] = row['G_all']
elif year >= 1946 and year <= 1962:
pos_dict[ID]['1946-62'] = row['G_all']
elif year >= 1963 and year <= 1976:
pos_dict[ID]['1963-76'] = row['G_all']
elif year >= 1977 and year <= 1992:
pos_dict[ID]['1977-92'] = row['G_all']
elif year >= 1993 and year <= 2009:
pos_dict[ID]['1993-2009'] = row['G_all']
elif year > 2009:
pos_dict[ID]['post2009'] = row['G_all']
# Convert the `pos_dict` to a DataFrame
pos_df = | pd.DataFrame.from_dict(pos_dict, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
import matplotlib.cm as cm
import os
Path_save = '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/'
Horizonte = 'Anio' ##-->'Anio' para los datos del 2018 y 2019y 'EXP' para los datos a partir del experimento.
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Código para la derteminacion de la frecuencia y a demas de la dimension fractal (con el fin de revelar relaciones'
'entre ambos conceptos). En la entrada anteriore se define el horizonte de tiempo con el cual se quiere trabajar.'
'Además se obtiene el scatter q relaciona las reflectancias con las anomalías de la radiación.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################
## ----------------LECTURA DE LOS DATOS DE LAS ANOMALIAS DE LA RADIACION--------------- ##
##########################################################################################
Anomal_df_975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix975_2018_2019.csv', sep=',')
Anomal_df_348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix348_2018_2019.csv', sep=',')
Anomal_df_350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/df_AnomalRad_pix350_2018_2019.csv', sep=',')
Anomal_df_975['fecha_hora'] = pd.to_datetime(Anomal_df_975['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_975.index = Anomal_df_975['fecha_hora']
Anomal_df_975 = Anomal_df_975.drop(['fecha_hora'], axis=1)
Anomal_df_975 = Anomal_df_975.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_975_h = Anomal_df_975.groupby(pd.Grouper(freq="H")).mean()
Anomal_df_350['fecha_hora'] = pd.to_datetime(Anomal_df_350['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_350.index = Anomal_df_350['fecha_hora']
Anomal_df_350 = Anomal_df_350.drop(['fecha_hora'], axis=1)
Anomal_df_350 = Anomal_df_350.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_350_h = Anomal_df_350.groupby(pd.Grouper(freq="H")).mean()
Anomal_df_348['fecha_hora'] = pd.to_datetime(Anomal_df_348['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Anomal_df_348.index = Anomal_df_348['fecha_hora']
Anomal_df_348 = Anomal_df_348.drop(['fecha_hora'], axis=1)
Anomal_df_348 = Anomal_df_348.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Anomal_df_348_h = Anomal_df_348.groupby(pd.Grouper(freq="H")).mean()
Anomal_df_348_h = Anomal_df_348_h.drop(['Radiacion_Med', 'radiacion',], axis=1)
Anomal_df_350_h = Anomal_df_350_h.drop(['Radiacion_Med', 'radiacion',], axis=1)
Anomal_df_975_h = Anomal_df_975_h.drop(['Radiacion_Med', 'radiacion',], axis=1)
Anomal_df_348_h = Anomal_df_348_h.loc[~Anomal_df_348_h.index.duplicated(keep='first')]
Anomal_df_350_h = Anomal_df_350_h.loc[~Anomal_df_350_h.index.duplicated(keep='first')]
Anomal_df_975_h = Anomal_df_975_h.loc[~Anomal_df_975_h.index.duplicated(keep='first')]
################################################################################################
## -------------------------------UMBRALES DE LAS REFLECTANCIAS------------------------------ ##
################################################################################################
Umbral_up_348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_348_Nuba.csv', sep=',', header = None)
Umbral_down_348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_348_Desp.csv', sep=',', header = None)
Umbral_up_348.columns=['Hora', 'Umbral']
Umbral_up_348.index = Umbral_up_348['Hora']
Umbral_up_348 = Umbral_up_348.drop(['Hora'], axis=1)
Umbral_down_348.columns=['Hora', 'Umbral']
Umbral_down_348.index = Umbral_down_348['Hora']
Umbral_down_348 = Umbral_down_348.drop(['Hora'], axis=1)
Umbral_up_350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_350_Nuba.csv', sep=',', header = None)
Umbral_down_350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_350_Desp.csv', sep=',', header = None)
Umbral_up_350.columns=['Hora', 'Umbral']
Umbral_up_350.index = Umbral_up_350['Hora']
Umbral_up_350 = Umbral_up_350.drop(['Hora'], axis=1)
Umbral_down_350.columns=['Hora', 'Umbral']
Umbral_down_350.index = Umbral_down_350['Hora']
Umbral_down_350 = Umbral_down_350.drop(['Hora'], axis=1)
Umbral_up_975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_975_Nuba.csv', sep=',', header = None)
Umbral_down_975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/Umbral_Hourly_975_Desp.csv', sep=',', header = None)
Umbral_up_975.columns=['Hora', 'Umbral']
Umbral_up_975.index = Umbral_up_975['Hora']
Umbral_up_975 = Umbral_up_975.drop(['Hora'], axis=1)
Umbral_down_975.columns=['Hora', 'Umbral']
Umbral_down_975.index = Umbral_down_975['Hora']
Umbral_down_975 = Umbral_down_975.drop(['Hora'], axis=1)
##------------------------------------------------UMBRALES DE TODA LA REGIÓN------------------------------------------------------------##
'Se obtiene el dataframe promedio de umbrales para obtener los umbrales horarios y ser usados cuando se tome la región entera. '
df_concat_down = pd.concat((Umbral_down_348, Umbral_down_350, Umbral_down_975))
Umbral_down_Prom = df_concat_down.groupby(df_concat_down.index).mean()
df_concat_up = pd.concat((Umbral_up_348, Umbral_up_350, Umbral_up_975))
Umbral_up_Prom = df_concat_up.groupby(df_concat_up.index).mean()
####################################################################################
## ----------------LECTURA DE LOS DATOS DE GOES CH2 MALLA GENERAL---------------- ##
####################################################################################
Rad = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_Anio.npy')
df_fh = pd.DataFrame()
df_fh ['fecha_hora'] = fechas_horas
df_fh['fecha_hora'] = pd.to_datetime(df_fh['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
df_fh.index = df_fh['fecha_hora']
w = pd.date_range(df_fh.index.min(), df_fh.index.max()).difference(df_fh.index)
df_fh = df_fh[df_fh.index.hour != 5]
#################################################################################################
##-------------------LECTURA DE LOS DATOS DE CH2 GOES PARA CADA PIXEL--------------------------##
#################################################################################################
Rad_pixel_975 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix975_Anio.npy')
Rad_pixel_350 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix350_Anio.npy')
Rad_pixel_348 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix348_Anio.npy')
fechas_horas = df_fh['fecha_hora'].values
## -- Selección del pixel de la TS
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la CI
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la JV
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_348.index = Rad_df_348['Fecha_Hora']
Rad_df_348 = Rad_df_348.drop(['Fecha_Hora'], axis=1)
#################################################################################################
##-------------------ENMASCARANDO LOS DATOS MENORES AL UMBRAL NUBLADO--------------------------##
#################################################################################################
Rad_nuba_348 = []
FH_Nuba_348 = []
for i in range(len(Rad_df_348)):
for j in range(len(Umbral_up_348.index)):
if (Rad_df_348.index[i].hour == Umbral_up_348.index[j]) & (Rad_df_348.Radiacias.values[i] >= Umbral_up_348.values[j]):
Rad_nuba_348.append(Rad_df_348.Radiacias.values[i])
FH_Nuba_348.append(Rad_df_348.index[i])
elif (Rad_df_348.index[i].hour == Umbral_up_348.index[j]) & (Rad_df_348.Radiacias.values[i] < Umbral_up_348.values[j]):
Rad_nuba_348.append(np.nan)
FH_Nuba_348.append(Rad_df_348.index[i])
df_348_nuba = pd.DataFrame()
df_348_nuba['Radiacias'] = Rad_nuba_348
df_348_nuba['Fecha_Hora'] = FH_Nuba_348
df_348_nuba['Fecha_Hora'] = pd.to_datetime(df_348_nuba['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
df_348_nuba.index = df_348_nuba['Fecha_Hora']
df_348_nuba = df_348_nuba.drop(['Fecha_Hora'], axis=1)
Rad_nuba_350 = []
FH_Nuba_350 = []
for i in range(len(Rad_df_350)):
for j in range(len(Umbral_up_350.index)):
if (Rad_df_350.index[i].hour == Umbral_up_350.index[j]) & (Rad_df_350.Radiacias.values[i] >= Umbral_up_350.values[j]):
Rad_nuba_350.append(Rad_df_350.Radiacias.values[i])
FH_Nuba_350.append(Rad_df_350.index[i])
elif (Rad_df_350.index[i].hour == Umbral_up_350.index[j]) & (Rad_df_350.Radiacias.values[i] < Umbral_up_350.values[j]):
Rad_nuba_350.append(np.nan)
FH_Nuba_350.append(Rad_df_350.index[i])
df_350_nuba = | pd.DataFrame() | pandas.DataFrame |
__author__ = 'nicksantos'
"""
Handles all interfacing with USGS, with the goal of making (parts) of their JSON
API available to Python as native objects
"""
import urllib
import urllib2
import json
from StringIO import StringIO
import gzip
from datetime import datetime
from logging import getLogger
log = getLogger("usgs_api")
log.info("usgs api loaded")
try:
import pandas
except ImportError:
log.info("failed to import pandas. Pandas object functionality will be unavailable (naturally). Install the 'pandas'"
"module if you need pandas/scientific functionality")
pass # pass - user should not be concerned.
config_max_windows = 52 # 52 * 2 weeks = 2 years - if window size changes, then this is just a multiplier for how many
# times to batch the request
config_window_size = 14 # days
config_min_timestamp = 1191196800
class gage():
def __init__(self, site_code=None, time_period="P7D", url_params={}, start_timestamp=None, end_timestamp=None):
"""
:param site_code: A USGS Site code for the gage this object represents. See `the USGS documentation
<http://help.waterdata.usgs.gov/codes-and-parameters/codes#search_station_nm>`_
:param time_period: A compatible period string as specified in
`the USGS time period documentation <http://waterservices.usgs.gov/rest/IV-Service.html#Specifying>`_ - this parameter
only accepts "period" values as explained in that documentation. If you would like to specify a time range
using startDT and endDT please use the url_params argument. If you specify both, current behavior
uses the time period as being more specific. An exception will not be raised.
:param url_params: A dictionary of other parameters to pass to the USGS server in key/value format. They
will be automatically added to the query. Case sensitive. For a full list of parameters, see
`the USGS web service documentation <http://waterservices.usgs.gov/rest/IV-Service.html>`_
"""
self.site_code = site_code
self.time_series = None
self.time_period = time_period
self.url_params = url_params # optional dict of params - url key value pairs passed to the api
self.data_frame = None
self.startDT = None
self.endDT = None
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.batch = False
self._json_string = None
self._base_url = "http://waterservices.usgs.gov/nwis/iv/"
def check_params(self, params=('site_code',)):
"""
Makes sure that we have the base level of information necessary to run a query
to prevent lazy setup errors
"""
for param in params:
if self.__dict__[param] is None and param not in self.url_params:
raise AttributeError("Required attribute %s must be set or provided in url_params before running this method" % param)
#if self.startDT and self.start_timestamp:
# pass
# both above lines are placeholders from a previous commit that was partway through.
def retrieve(self, return_pandas=False, automerge=True):
"""
Retrieves data from the server based upon class configuration. Returns the a list of dicts by default,
with keys set by the returned data from the server. If return_pandas is True, returns a pandas data frame.
:param return_pandas: specifies whether or not to return the pandas object. When True, returns a pandas
object. When False, returns the default list of dicts. If True and you have not installed pandas, will raise
ValueError
:param automerge: Not yet implemented! Warning! Intent is that when returning a pandas table, automerge
will allow you to run multiple separate requests for the same gage (different time series with gaps, etc)
and merge them into a single result for the gage
"""
# makes sure that the user didn't forget to set something after init
if return_pandas and not pandas: # do this first so we don't find out AFTER doing everything else
_pandas_no_exist()
self.batch = self.check_params() # TODO: Make check_params determine if it's a multiwindow query
if self.batch: # if we need to retrieve multiple windows, then call the batching function
self._batch()
else: # otherwise, if the time period is short enough, just retrieve the data
self._retrieve_data()
self._json_to_dataframe(create_pandas=return_pandas)
if return_pandas:
return self.data_frame
else:
return self.time_series
def _retrieve_data(self):
"""
requests retrieves, and stores the json
"""
# add the relevant parameters into the dictionary passed by the user (if any
self.url_params['format'] = "json"
self.url_params['sites'] = self.site_code
if self.time_period and not self.startDT and 'startDT' not in self.url_params:
# if we have a time period, but not a time range, use the period
self.url_params['period'] = self.time_period
else:
# otherwise, use the time range if it works (doesn't currently validate the dates)
# TODO: Validate the date formats
self.check_params(('startDT', 'endDT')) # it's possible that they won't be defined
self.url_params['startDT'] = self.startDT
self.url_params['endDT'] = self.endDT
# merge parameters into the url
request_url = self._base_url + "?" + urllib.urlencode(self.url_params)
# open the url and read in the json string to a private variable
request = urllib2.Request(request_url)
request.add_header('Accept-encoding', 'gzip') # be kind to the USGS servers - see http://waterservices.usgs.gov/rest/IV-Service.html#gzip
data_stream = urllib2.urlopen(request)
if data_stream.info().get('Content-Encoding') == 'gzip': # with thanks to StackOverflow for the efficient code
buf = StringIO(data_stream.read())
f = gzip.GzipFile(fileobj=buf)
self._json_string = f.read()
else:
self._json_string = data_stream.read()
self._json_data = json.loads(self._json_string)
def _json_to_dataframe(self, create_pandas = False):
"""
converts the json to a pandas data frame
"""
self.time_series = self._json_data['value']['timeSeries'][0]['values'][0]['value']
if create_pandas:
self.data_frame = | pandas.DataFrame(self.time_series) | pandas.DataFrame |
"""
Prepare training and testing datasets as CSV dictionaries
Created on 11/26/2018
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(slide, level, root_dir, label):
ids = []
try:
for id in os.listdir(root_dir):
if '.png' in id:
ids.append([slide, level, root_dir+'/'+id, label])
else:
print('Skipping ID:', id)
except FileNotFoundError:
print('Ignore:', root_dir)
return ids
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'level', 'path', 'label'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = | pd.concat([TCGA, CPTAC], sort=False) | pandas.concat |
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
from scipy.stats import rankdata
from impetuous.quantification import qvalues, permuter
from rankor.quantification import pi0
from rankor.contrasts import contrast
def svd_reduced_mean ( x,axis=0,keep=[0] ) :
if True :
sk = set ( keep )
if len ( np.shape(x) ) > 1 :
u , s , vt = np .linalg .svd( x , full_matrices=False )
xred = np.mean( np.dot(u*[s[i_] if i_ in sk else 0 for i_ in range(len(s))],vt) , axis)
if 'pandas' in str(type(x)) :
if not 'series' in str(type(x)) :
xname = x.index.values[0]
return ( pd.DataFrame( [xred] , index=[xname] , columns=x.columns ) )
else :
xname = x.name
return ( pd.Series( xred , name=xname , index=x.columns ) )
else :
return ( xred )
return ( x )
from sklearn.decomposition import PCA
dimred = PCA ( n_components = 1 )
def pca_reduced_mean( x ) :
if True :
if len ( np.shape(x) ) > 1 :
Xnew = dimred.fit_transform( x.T )
xred = Xnew . T [0] + np.mean(np.mean(x))
if 'pandas' in str(type(x)) :
if not 'series' in str(type(x)) :
xname = x.index.values[0]
return ( pd.DataFrame( [xred] , index=[xname] , columns=x.columns ) )
else :
xname = x.name
return ( pd.Series( xred , name=xname , index=x.columns ) )
return ( x )
def reduction ( a , power , centered=-1 ) :
if centered>0 :
a = ( a.T-np.mean(a,1) ).T
return( np.linalg.svd ( a**power , full_matrices=False ) )
def hyper_params ( df_ , label = 'generic' , sep = ',' , power=1., centered=-1 ):
#
idx_ = df_.index.values
N_s = len ( df_.columns )
u,s,vt = reduction( df_.values , power , centered=centered )
rdf_ = pd.Series ( np.sum(u**2,1) , index=idx_ , name = label+sep+"u" )
rdf_ = pd.concat ( [ pd.DataFrame(rdf_) ,
pd.DataFrame( pd.Series( np.mean( df_.values,1 ) ,
index = idx_ , name=label+sep+"m") ) ] , axis=1 )
w_ = rdf_ .loc[ :,label+sep+"u" ].values
r_ = rankdata ( [ w for w in w_ ] ,'average' )
N = len ( r_ )
#
df0_ = pd.DataFrame( [ [a for a in range(N)],w_,r_ ],index=['idx','w_','r_'], columns=idx_ ).T
#
from scipy.special import erf as erf_
loc_pval = lambda X , mean , stdev : [ 1. - 0.5*( 1. + erf_( ( x - mean )/stdev/np.sqrt(2.) ) ) for x in X ]
lvs = np.log( df0_.loc[ :,'w_'].values )
#
return ( np.mean(lvs) , np.std(lvs) )
def hyper_rdf ( df_ , label = 'generic' , sep = ',' , power=1. ,
diagnostic_output = False , MEAN=None , STD=None , centered=-1 ) :
#
idx_= df_.index.values
N_s = len ( df_.columns )
u,s,vt = reduction ( df_.values , power , centered=centered )
rdf_ = pd.Series ( np.sum( ( u )**2,1 ) , index=idx_ , name = label+sep+"u" )
rdf_ = pd.concat ( [ pd.DataFrame( rdf_ ) ,
pd.DataFrame( pd.Series( np.mean( df_.values,1 ) ,
index = idx_ , name=label+sep+"m") ) ] , axis=1 )
w_ = rdf_ .loc[ :,label+sep+"u" ].values
r_ = rankdata ( [ w for w in w_] ,'average' )
N = len ( r_ )
#
# HERE WE CONSTRUCT A DIANGOSTICS AND INTERMITTENT CALCULATION
# DATAFRAME FOR EASIER DEBUGGING AND INSPECTION
df0_ = pd.DataFrame( [ [a for a in range(N)],w_,r_ ],index=['idx','w_','r_'], columns=idx_ )
df0_ = df0_.T.sort_values ( by='r_' )
df0_ .loc[ : , 'd_' ] = [ v for v in ( df0_.loc[:, 'w_' ] * df0_.loc[:,'r_'] ) ]
df0_ .loc[ : , 'da_' ] = np.cumsum ( df0_ .loc[ : , 'd_' ].values )
#
# HOW MANY EQUALLY OR MORE EXTREME VALUES ARE THERE? ( RANK BASED )
df0_ .loc[ : , 'dt_' ] = np.cumsum ( df0_ .loc[ : , 'd_' ].values[::-1] )[::-1]
df0_ .loc[ : , 'rank_ps' ] = df0_ .loc[ :,'dt_' ] / np.sum( df0_ .loc[ :,'d_' ] )
#
# DRAW HISTOGRAM TO SEE DISTRIBUTION OF THE DISTANCES
from scipy.special import erf as erf_
loc_pval = lambda X , mean , stdev : [ 1. - 0.5*( 1. + erf_( ( x - mean )/stdev/np.sqrt(2.) ) ) for x in X ]
lvs = np.log( df0_.loc[ :,'w_'].values )
if MEAN is None or STD is None:
if len(lvs)>3 :
ps = loc_pval( lvs , np.mean( lvs ) , np.std( lvs ) )
else :
ps = df0_ .loc[ : , 'rank_ps' ]
else :
ps = loc_pval( lvs , MEAN , STD )
#
if diagnostic_output :
import scipy.stats as scs
NB = 100
lv = np.log( rdf_.loc[:,[label+sep+"u"]].values )
y , x = np.histogram( lv , bins=NB , density=True )
skw = scs.skew(lv)[0]
kur = scs.kurtosis(lv)[0]
shape_stats = "kurtosis: " + "{:.2f} ".format( kur ) + "skewness: "+ "{:.2f}".format( skw )
locd = lambda x,M,s : (1./s/np.sqrt(2.*np.pi))*np.exp(-0.5*((x-M)/s)**2 )
lin_x = 0.5 * ( x[1:] + x[:-1] )
his_y = y
rsy = sorted( [ (y_,x_) for (y_,x_) in zip(y,lin_x) ] )[::-1]
hm , hs = np.mean([rsy[i][1] for i in range(5)]) , np.mean([rsy[i][0] for i in range(5)])
h_mod_y = locd( lin_x , hm , 1.0/(hs*np.sqrt(2.*np.pi)) )
d_mod_y = locd( lv , np.mean(lv) , np.std(lv) )
rem_y = [ (y_-m_) for (y_,m_) in zip(y, locd(0.5*(x[1:]+x[:-1]),np.mean(lv),np.std(lv))) ]
prc_y = [ 100.*np.abs( contrast(y_,m_) ) for (y_,m_) in zip(y, locd(0.5*(x[1:]+x[:-1]),np.mean(lv),np.std(lv))) ]
RMSD = np.sqrt(np.sum([ ry**2 for ry in rem_y ]))
PMAE = np.mean(prc_y)
#
df0_ .loc[ : , 'pvalues' ] = ps
#
# ASSIGN THE P VALUES
rdf_.loc[df0_.index.values,label+sep+"p"] = df0_.loc[:,'pvalues']
rdf_.loc[df0_.index.values,label+sep+"q"] = [ qvs[0] for qvs in qvalues( df0_.loc[:,'pvalues'].values , pi0 = pi0(df0_.loc[:,'pvalues'].values) ) ]
rdf_.loc[df0_.index.values,label+sep+"r"] = df0_ .loc[ : , 'rank_ps' ]
#
# AND RETURN THE RESULTS
if diagnostic_output :
return ( rdf_ , RMSD , PMAE , kur , skw , rem_y )
else :
return ( rdf_ )
if __name__ == '__main__' :
#
print ( 'REDUCER :: TESTS ' )
#
a = 2*np.random.rand(10)
b = 4*np.random.rand(10)
X = [ [*(a[:5]+1),*a[5:]],[*(b[:5]+3),*(b[5:])] ]
Xdf = | pd.DataFrame( X , columns=['a','b','s','c','d','e','f','g','h','i'] , index=['i0','i1']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import pickle
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
import numpy as np
import datetime as dt
from LDA import remove_stopwords, lemmatization, make_bigrams, sent_to_words
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# LOAD CLUSTERING MODEL
with open("data/cluster_model.pkl", "rb") as f:
cluster_model = pickle.load(f)
# LOAD LDA MODEL
lda_model = gensim.models.LdaModel.load('data/LDA/lda.model')
id2word = corpora.Dictionary.load('data/LDA/lda.model.id2word')
def get_interests():
"""
Load the raw interest csv file.
:return: The full interest.csv file in pandas dataframe
"""
interest = pd.read_csv('data/interest.csv')
return(interest)
def get_posts():
"""
Load the raw posts csv file.
:return: The full posts.csv file in pandas dataframe
"""
posts = pd.read_csv('data/posts.csv')
return(posts)
def get_users():
"""
Load the raw users csv file.
:return: The full users.csv file in pandas dataframe
"""
users = pd.read_csv('data/users.csv')
return(users)
def filter_posts(uid,date):
"""
Returns posts that have been filtered to be before a given date and aren't owned by the user
:param uid (str): user-id to filter by
:param date (str): date value to filter by
:return: pandas dataframe filtered of any posts greater than date and not owned by user
"""
posts = get_posts()
posts = posts[posts['uid'] != uid]
posts = posts[posts['post_time'] < date]
return posts
def get_user_data(uid):
"""
Returns the selected user account information
:param uid (str): user-id
:return: single-row pandas dataframe of user account information
"""
users = get_users()
user = users[users['uid'] == uid].reset_index(drop=True)
return user
def get_user_interest(uid):
"""
Returns the selected user interest information
:param uid (str): user-id
:return: single-row pandas dataframe of user interest information
"""
interests = get_interests()
interest = interests[interests['uid'] == uid].reset_index(drop=True)
return interest
def cluster_user(uid):
"""
Returns categorised ID of the selected user from the clustering model
:param uid (str): user-id
:return: single integer value of ID category
"""
# Load needed data for user
users = get_user_data(uid)
interests = get_user_interest(uid)
# Create Age Buckets for clustering
users['date'] = pd.to_datetime(users['dob'], format='%d/%m/%Y', errors='coerce')
users['age'] = dt.datetime.now() - users['date']
users['age'] = (users['age']).dt.days
users['age'] = users['age']/365
users['age_cat'] = np.where(users['age']<20,1,
np.where((users['age']>=20) & (users['age']<25),2,
np.where((users['age']>=25) & (users['age']<30),3,
np.where((users['age']>=30) & (users['age']<35),4,
np.where((users['age']>=35) & (users['age']<40),5,
np.where((users['age']>=40) & (users['age']<45),6,
np.where((users['age']>=45) & (users['age']<50),7,
np.where((users['age']>=50) & (users['age']<55),8,
np.where((users['age']>=55) & (users['age']<60),9,
np.where((users['age']>=60) & (users['age']<65),10,11))))))))))
user_age = users[['uid', 'age_cat']]
user = pd.merge(users,interests, left_on='uid', right_on='uid', how='left')
# Load the categories in order used to cluster
cats = pd.read_csv('data/interest_cats.csv')
user = pd.merge(user,cats, left_on='interest', right_on='categories')
rows=len(users['uid'])
cols=len(cats['id']) # add 2 extra columns for age buckets and uid
matrix = pd.DataFrame(np.zeros((rows,cols)))
data = pd.concat([user_age.reset_index(drop=True), matrix], axis=1)
for i in range(1,len(user['uid'])):
#get row number
uid = user['uid'][i]
rnum = data.index[data['uid']==uid]
col = user['id'][i]+1
data.iloc[rnum.values[0],col] = 1
data.drop(columns='uid',axis=1,inplace=True)
# Use model to predict grouping of user
clusters = cluster_model.predict(data)
return clusters[0]
def get_post_topics(uid,date):
"""
Returns the filtered list of posts that are enriched by topic information
:param uid: user-id (str)
:param date: datetime (str)
:return: sorted dataframe of filtered posts with topic information
"""
# Filter posts for datetime given
posts = filter_posts(uid, date)
data = posts['text'] + posts['hashtags']
# Preprocess data
data_words = list(sent_to_words(data))
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
vector = lda_model[corpus]
def getKey(item):
return item[1]
topic_probability = []
for t in vector:
topic_probability.append(sorted(t[0], key=getKey, reverse=True))
results = pd.DataFrame(topic_probability, columns=['col 1', 'col 2',
'col 3', 'col 4', 'col 5'])
results['topic'] = 0
results['topic_val'] = float(0)
for i in range(0, len(results['col 1'])):
val = results.iloc[i, 0]
results['topic'][i] = val[0]
results['topic_val'][i] = val[1]
topics = results[['topic', 'topic_val']]
return topics
def enrich_posts(uid,date):
"""
Returns the final list of ranked posts
:param uid: user-id (str)
:return: sorted dataframe of ranked posts relevent to a certain user given a date
"""
# Convert date to datetime
timestamp = pd.Timestamp(date)
# Filter posts
posts = filter_posts(uid,date)
# Get relevent ranking system of post topic interests
cluster_cat=cluster_user(uid)
rankings = pd.read_csv('data/cluster_mapping.csv')
ranked = rankings[rankings['cluster']==cluster_cat]
topics = get_post_topics(uid,date)
ranked_topics = pd.merge(topics,ranked, left_on='topic', right_on = 'topic')
# Calculate statistics used to rank
enriched_posts = pd.concat([posts.reset_index(drop=True), ranked_topics], axis=1)
enriched_posts['parent_flag'] = np.where(enriched_posts['parent_id'] != '0',0,1)
enriched_posts['datetime'] = pd.to_datetime(enriched_posts['post_time'], format='%Y-%m-%d %H:%M:%S', errors='coerce')
enriched_posts['age_of_post'] = (timestamp - enriched_posts['datetime'])
enriched_posts['clean_parent_id'] = np.where(enriched_posts['parent_id']=='0',enriched_posts['post_id'],enriched_posts['parent_id'])
age_stats = enriched_posts.groupby(['clean_parent_id']).agg({'age_of_post':['mean','min','max']})
age_stats.columns = ['_'.join(col) for col in age_stats.columns]
age_stats.reset_index(level=0, inplace=True)
age_stats['age_rank'] = age_stats['age_of_post_min'].rank(ascending=True)
rating_stats = enriched_posts.groupby(['clean_parent_id']).agg({'ratio': ['mean', 'sum','count']})
rating_stats.columns = ['_'.join(col) for col in rating_stats.columns]
rating_stats.reset_index(level=0, inplace=True)
enriched_posts = | pd.merge(enriched_posts,age_stats,left_on='clean_parent_id',right_on='clean_parent_id',how='left') | pandas.merge |
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a streaming dataframe.
"""
import pickle
import os
from io import StringIO, BytesIO
from inspect import isfunction
import numpy
import numpy.random as nrandom
import pandas
from pandas.testing import assert_frame_equal
from pandas.io.json import json_normalize
from .dataframe_split import sklearn_train_test_split, sklearn_train_test_split_streaming
from .dataframe_io_helpers import enumerate_json_items, JsonIterator2Stream
class StreamingDataFrameSchemaError(Exception):
"""
Reveals an issue with inconsistant schemas.
"""
pass
class StreamingDataFrame:
"""
Defines a streaming dataframe.
The goal is to reduce the memory footprint.
The class takes a function which creates an iterator
on :epkg:`dataframe`. We assume this function can
be called multiple time. As a matter of fact, the
function is called every time the class needs to walk
through the stream with the following loop:
::
for df in self: # self is a StreamingDataFrame
# ...
The constructor cannot receive an iterator otherwise
this class would be able to walk through the data
only once. The main reason is it is impossible to
:epkg:`*py:pickle` (or :epkg:`dill`)
an iterator: it cannot be replicated.
Instead, the class takes a function which generates
an iterator on :epkg:`DataFrame`.
Most of the methods returns either a :epkg:`DataFrame`
either a @see cl StreamingDataFrame. In the second case,
methods can be chained.
By default, the object checks that the schema remains
the same between two chunks. This can be disabled
by setting *check_schema=False* in the constructor.
The user should expect the data to remain stable.
Every loop should produce the same data. However,
in some situations, it is more efficient not to keep
that constraints. Draw a random @see me sample
is one of these cases.
:param iter_creation: function which creates an iterator or an
instance of @see cl StreamingDataFrame
:param check_schema: checks that the schema is the same
for every :epkg:`dataframe`
:param stable: indicates if the :epkg:`dataframe` remains the same
whenever it is walked through
"""
def __init__(self, iter_creation, check_schema=True, stable=True):
self._delete_ = []
if isinstance(iter_creation, (pandas.DataFrame, dict,
numpy.ndarray, str)):
raise TypeError(
"Unexpected type %r for iter_creation. It must "
"be an iterator." % type(iter_creation))
if isinstance(iter_creation, StreamingDataFrame):
self.iter_creation = iter_creation.iter_creation
self.stable = iter_creation.stable
else:
self.iter_creation = iter_creation
self.stable = stable
self.check_schema = check_schema
def is_stable(self, do_check=False, n=10):
"""
Tells if the :epkg:`dataframe` is supposed to be stable.
@param do_check do not trust the value sent to the constructor
@param n number of rows used to check the stability,
None for all rows
@return boolean
*do_check=True* means the methods checks the first
*n* rows remains the same for two iterations.
"""
if do_check:
for i, (a, b) in enumerate(zip(self, self)):
if n is not None and i >= n:
break
try:
assert_frame_equal(a, b)
except AssertionError: # pragma: no cover
return False
return True
else:
return self.stable
def get_kwargs(self):
"""
Returns the parameters used to call the constructor.
"""
return dict(check_schema=self.check_schema)
def train_test_split(self, path_or_buf=None, export_method="to_csv",
names=None, streaming=True, partitions=None,
**kwargs):
"""
Randomly splits a :epkg:`dataframe` into smaller pieces.
The function returns streams of file names.
It chooses one of the options from module
:mod:`dataframe_split <pandas_streaming.df.dataframe_split>`.
@param path_or_buf a string, a list of strings or buffers, if it is a
string, it must contain ``{}`` like ``partition{}.txt``,
if None, the function returns strings.
@param export_method method used to store the partitions, by default
:epkg:`pandas:DataFrame:to_csv`, additional parameters
will be given to that function
@param names partitions names, by default ``('train', 'test')``
@param kwargs parameters for the export function and
:epkg:`sklearn:model_selection:train_test_split`.
@param streaming the function switches to a
streaming version of the algorithm.
@param partitions splitting partitions
@return outputs of the exports functions or two
@see cl StreamingDataFrame if path_or_buf is None.
The streaming version of this algorithm is implemented by function
@see fn sklearn_train_test_split_streaming. Its documentation
indicates the limitation of the streaming version and gives some
insights about the additional parameters.
"""
if streaming:
if partitions is not None:
if len(partitions) != 2:
raise NotImplementedError( # pragma: no cover
"Only train and test split is allowed, *partitions* "
"must be of length 2.")
kwargs = kwargs.copy()
kwargs['train_size'] = partitions[0]
kwargs['test_size'] = partitions[1]
return sklearn_train_test_split_streaming(self, **kwargs)
return sklearn_train_test_split(self, path_or_buf=path_or_buf,
export_method=export_method,
names=names, **kwargs)
@staticmethod
def _process_kwargs(kwargs):
"""
Filters out parameters for the constructor of this class.
"""
kw = {}
for k in ['check_schema']:
if k in kwargs:
kw[k] = kwargs[k]
del kwargs[k]
return kw
@staticmethod
def read_json(*args, chunksize=100000, flatten=False, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`json` file or buffer as an iterator
on :epkg:`DataFrame`. The signature is the same as
:epkg:`pandas:read_json`. The important parameter is
*chunksize* which defines the number
of rows to parse in a single bloc
and it must be defined to return an iterator.
If *lines* is True, the function falls back into
:epkg:`pandas:read_json`, otherwise it used
@see fn enumerate_json_items. If *lines* is ``'stream'``,
*enumerate_json_items* is called with parameter
``lines=True``.
Parameter *flatten* uses the trick described at
`Flattening JSON objects in Python
<https://towardsdatascience.com/flattening-json-objects-in-python-f5343c794b10>`_.
Examples:
.. runpython::
:showcode:
from io import BytesIO
from pandas_streaming.df import StreamingDataFrame
data = b'''{"a": 1, "b": 2}
{"a": 3, "b": 4}'''
it = StreamingDataFrame.read_json(BytesIO(data), lines=True)
dfs = list(it)
print(dfs)
.. runpython::
:showcode:
from io import BytesIO
from pandas_streaming.df import StreamingDataFrame
data = b'''[{"a": 1,
"b": 2},
{"a": 3,
"b": 4}]'''
it = StreamingDataFrame.read_json(BytesIO(data))
dfs = list(it)
print(dfs)
.. index:: IncompleteJSONError
The parsed json must have an empty line at the end otherwise
the following exception is raised:
`ijson.common.IncompleteJSONError: `
`parse error: unallowed token at this point in JSON text`.
"""
if not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError( # pragma: no cover
'chunksize must be a positive integer')
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
if isinstance(args[0], (list, dict)):
if flatten:
return StreamingDataFrame.read_df(
json_normalize(args[0]), **kwargs_create)
return StreamingDataFrame.read_df(args[0], **kwargs_create)
if kwargs.get('lines', None) == 'stream':
del kwargs['lines']
def localf(a0=args[0]):
if hasattr(a0, 'seek'):
a0.seek(0)
return enumerate_json_items(
a0, encoding=kwargs.get('encoding', None), lines=True,
flatten=flatten)
st = JsonIterator2Stream(localf)
args = args[1:]
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(
st, *args, chunksize=None, lines=True, **kwargs),
**kwargs_create)
def fct1(st=st, args=args, chunksize=chunksize, kw=kwargs.copy()):
st.seek(0)
for r in pandas.read_json(
st, *args, chunksize=chunksize, nrows=chunksize,
lines=True, **kw):
yield r
return StreamingDataFrame(fct1, **kwargs_create)
if kwargs.get('lines', False):
if flatten:
raise NotImplementedError(
"flatten==True is implemented with option lines='stream'")
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(*args, chunksize=None, **kwargs),
**kwargs_create)
def fct2(args=args, chunksize=chunksize, kw=kwargs.copy()):
for r in pandas.read_json(
*args, chunksize=chunksize, nrows=chunksize, **kw):
yield r
return StreamingDataFrame(fct2, **kwargs_create)
st = JsonIterator2Stream(
lambda a0=args[0]: enumerate_json_items(
a0, encoding=kwargs.get('encoding', None), flatten=flatten))
args = args[1:]
if 'lines' in kwargs:
del kwargs['lines']
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(
st, *args, chunksize=chunksize, lines=True, **kwargs),
**kwargs_create)
def fct3(st=st, args=args, chunksize=chunksize, kw=kwargs.copy()):
if hasattr(st, 'seek'):
st.seek(0)
for r in pandas.read_json(
st, *args, chunksize=chunksize, nrows=chunksize,
lines=True, **kw):
yield r
return StreamingDataFrame(fct3, **kwargs_create)
@staticmethod
def read_csv(*args, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`csv` file or buffer
as an iterator on :epkg:`DataFrame`.
The signature is the same as :epkg:`pandas:read_csv`.
The important parameter is *chunksize* which defines the number
of rows to parse in a single bloc. If not specified,
it will be equal to 100000.
"""
if not kwargs.get('iterator', True):
raise ValueError("If specified, iterator must be True.")
if not kwargs.get('chunksize', 100000):
raise ValueError("If specified, chunksize must not be None.")
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
kwargs['iterator'] = True
if 'chunksize' not in kwargs:
kwargs['chunksize'] = 100000
return StreamingDataFrame(lambda: pandas.read_csv(*args, **kwargs), **kwargs_create)
@staticmethod
def read_str(text, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`DataFrame` as an iterator on :epkg:`DataFrame`.
The signature is the same as :epkg:`pandas:read_csv`.
The important parameter is *chunksize* which defines the number
of rows to parse in a single bloc.
"""
if not kwargs.get('iterator', True):
raise ValueError("If specified, iterator must be True.")
if not kwargs.get('chunksize', 100000):
raise ValueError("If specified, chunksize must not be None.")
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
kwargs['iterator'] = True
if 'chunksize' not in kwargs:
kwargs['chunksize'] = 100000
if isinstance(text, str):
buffer = StringIO(text)
else:
buffer = BytesIO(text)
return StreamingDataFrame(
lambda: | pandas.read_csv(buffer, **kwargs) | pandas.read_csv |
import os.path
import json
import zipfile
import numpy as np
import pandas as pd
import requests
from openpyxl import load_workbook
import ukcensusapi.Nomisweb as Api
import ukpopulation.utils as utils
class SNPPData:
"""
Functionality for downloading and collating UK Subnational Population Projection (NPP) data
Nomisweb stores the England data (only)
Wales/Scotland/NI are not the responsiblity of ONS and are made avilable online by the relevant statistical agency
"""
def __init__(self, cache_dir=utils.default_cache_dir()):
self.cache_dir = cache_dir
self.data_api = Api.Nomisweb(self.cache_dir)
self.data = {}
self.data[utils.EN] = self.__do_england()
self.data[utils.WA] = self.__do_wales()
self.data[utils.SC] = self.__do_scotland()
self.data[utils.NI] = self.__do_nireland()
# LADs * 26 years * 91 ages * 2 genders
# assert len(self.data) == (326+22+32+11) * 26 * 91 * 2
def min_year(self, code):
"""
Returns the first year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return min(self.data[code].PROJECTED_YEAR_NAME.unique())
def max_year(self, code):
"""
Returns the final year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return max(self.data[code].PROJECTED_YEAR_NAME.unique())
def all_lads(self, countries):
"""
Returns all the LAD codes in the country or countries specified
Supports EN WA SC NI EW GB UK
"""
if isinstance(countries, str):
countries = [countries]
lads = []
for country in countries:
if country in self.data:
lads.extend(self.data[country].GEOGRAPHY_CODE.unique())
else:
# warn if missing or invalid
print("WARNING: no LAD codes for country %s", country)
return lads
def filter(self, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):
# convert inputs to arrays if single values supplied (for isin)
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
if np.isscalar(ages):
ages = [ages]
if np.isscalar(genders):
genders = [genders]
# Handle problem with empty list not being recognised as Null, was causing problem in utils.trim_range() below
if not years:
years = None
countries = utils.country(geog_codes)
# TODO fix incorrect assumption is that all countries have the same year range
years = utils.trim_range(years, self.min_year(countries[0]), self.max_year(countries[0]))
retval = pd.DataFrame() # {"GEOGRAPHY_CODE": [], "PROJECTED_YEAR_NAME": [], "C_AGE": [], "GENDER":[], "OBS_VALUE": []})
# loop over datasets as needed
for country in countries:
# apply filters
retval = retval.append(self.data[country][(self.data[country].GEOGRAPHY_CODE.isin(geog_codes)) &
(self.data[country].PROJECTED_YEAR_NAME.isin(years)) &
(self.data[country].C_AGE.isin(ages)) &
(self.data[country].GENDER.isin(genders))], ignore_index=True,
sort=False)
# check for any codes requested that werent present (this check is far easier to to on the result)
invalid_codes = np.setdiff1d(geog_codes, retval.GEOGRAPHY_CODE.unique())
if len(invalid_codes) > 0:
raise ValueError("Filter for LAD code(s): %s for years %s returned no data (check also age/gender filters)"
% (str(invalid_codes), str(years)))
return retval
def aggregate(self, categories, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):
data = self.filter(geog_codes, years, ages, genders)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
# year_range can include year that dont need to be extrapolated
# Filtering age and gender is not (currently) supported
def extrapolate(self, npp, geog_codes, year_range):
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
geog_codes = utils.split_by_country(geog_codes)
all_codes_all_years = pd.DataFrame()
for country in geog_codes:
if not geog_codes[country]: continue
max_year = self.max_year(country)
last_year = self.filter(geog_codes[country], max_year)
(in_range, ex_range) = utils.split_range(year_range, max_year)
# years that dont need to be extrapolated
all_years = self.filter(geog_codes[country], in_range) if in_range else pd.DataFrame()
for year in ex_range:
data = last_year.copy()
scaling = npp.year_ratio("ppp", country, max_year, year)
data = data.merge(scaling[["GENDER", "C_AGE", "OBS_VALUE"]], on=["GENDER", "C_AGE"])
data["OBS_VALUE"] = data.OBS_VALUE_x * data.OBS_VALUE_y
data.PROJECTED_YEAR_NAME = year
all_years = all_years.append(data.drop(["OBS_VALUE_x", "OBS_VALUE_y"], axis=1), ignore_index=True,
sort=False)
all_codes_all_years = all_codes_all_years.append(all_years, ignore_index=True, sort=False)
return all_codes_all_years
def extrapolagg(self, categories, npp, geog_codes, year_range):
"""
Extrapolate and then aggregate
"""
data = self.extrapolate(npp, geog_codes, year_range)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
def create_variant(self, variant_name, npp, geog_codes, year_range):
"""
Apply NPP variant to SNPP: SNPP(v) = SNPP(0) * sum(a,g) [ NPP(v) / NPP(0) ]
Preserves age-gender structure of SNPP data
"""
result = pd.DataFrame()
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
for geog_code in geog_codes:
(pre_range, in_range) = utils.split_range(year_range, npp.min_year() - 1)
# for any years prior to NPP we just use the SNPP data as-is (i.e. "ppp")
pre_data = self.filter(geog_code, pre_range) if pre_range else pd.DataFrame()
if len(pre_data) > 0:
print("WARNING: variant {} not applied for years {} that predate the NPP data".format(variant_name,
pre_range))
# return if there's nothing in the NPP range
if not in_range:
result.append(pre_data)
continue
data = self.extrapolate(npp, geog_code, in_range).sort_values(
["C_AGE", "GENDER", "PROJECTED_YEAR_NAME"]).reset_index(drop=True)
scaling = npp.variant_ratio(variant_name, utils.country(geog_code), year_range).reset_index().sort_values(
["C_AGE", "GENDER", "PROJECTED_YEAR_NAME"])
# scaling.to_csv(variant_name + ".csv", index=False)
print("DF: ", len(data), ":", len(scaling))
assert (len(data) == len(scaling))
data.OBS_VALUE = data.OBS_VALUE * scaling.OBS_VALUE
# prepend any pre-NPP data
result = result.append(pre_data.append(data))
return result
def __do_england(self):
# return self.__do_england_ons() # 2014
return self.__do_england_nomisweb() # 2018
# nomisweb data is now 2018-based
def __do_england_nomisweb(self):
print("Collating SNPP data for England...")
# need to do this in 2 batches as entire table has >1000000 rows
table_internal = "NM_2006_1" # SNPP
query_params = {
"gender": "1,2",
"c_age": "101...191",
"MEASURES": "20100",
"date": "latest", # 2018-based
"projected_year": "2018...2031",
"select": "geography_code,projected_year_name,gender,c_age,obs_value",
"geography": "1946157057...1946157382"
}
snpp_e = self.data_api.get_data(table_internal, query_params)
query_params["projected_year"] = "2032...2043"
snpp_e = snpp_e.append(self.data_api.get_data(table_internal, query_params))
# make age actual year
snpp_e.C_AGE = snpp_e.C_AGE - 101
# snpp_e[(snpp_e.GEOGRAPHY_CODE=="E08000021") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv("snpp_ncle_2016.csv")
# assert(len(snpp_e) == 26*2*91*326) # 326 LADs x 91 ages x 2 genders x 26 years
return snpp_e
# Alternative method of downloading the en data from ONS website(Only works with 2014 as it stands).
def __do_england_ons(self):
print("Collating SNPP data for England...")
england_src = "https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/localauthoritiesinenglandz1/2014based/snppz1population.zip"
england_raw = self.cache_dir + "/snpp_e.csv"
england_zip = self.cache_dir + "/snpp_e.zip"
if os.path.isfile(england_raw):
snpp_e = pd.read_csv(england_raw)
else:
response = requests.get(england_src)
with open(england_zip, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", england_zip)
z = zipfile.ZipFile(england_zip)
# print(z.namelist())
snpp_e = pd.DataFrame()
for gender in [1, 2]:
filename = "2014 SNPP Population " + ("males" if gender == 1 else "females") + ".csv"
chunk = pd.read_csv(z.open(filename)) \
.drop(["AREA_NAME", "COMPONENT", "SEX"], axis=1) \
.query('AGE_GROUP != "All ages"')
# .AGE_GROUP.replace({"90 and over": "90"}
chunk.AGE_GROUP = chunk.AGE_GROUP.replace({"90 and over": "90"})
chunk = chunk.melt(id_vars=["AREA_CODE", "AGE_GROUP"])
# chunk = chunk[chunk.AGE_GROUP != "all ages"]
# chunk = chunk.stack().reset_index()
chunk.columns = ["GEOGRAPHY_CODE", "C_AGE", "PROJECTED_YEAR_NAME", "OBS_VALUE"]
chunk["GENDER"] = gender
snpp_e = snpp_e.append(chunk)
# assert(len(snpp_e) == 26*2*91*326) # 326 districts x 91 ages x 2 genders x 26 years
snpp_e.to_csv(england_raw, index=False)
# snpp_e[(snpp_e.GEOGRAPHY_CODE=="E08000021") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv("snpp_ncle_2014.csv")
return snpp_e
# Wales
def __do_wales(self):
print("Collating SNPP data for Wales...")
cache_dir = utils.default_cache_dir()
wales_raw = cache_dir + "/snpp_w.csv"
if os.path.isfile(wales_raw):
snpp_w = pd.read_csv(wales_raw)
else:
fields = ['Area_AltCode1', 'Year_Code', 'Data', 'Gender_Code', 'Age_Code', 'Area_Hierarchy', 'Variant_Code']
# StatsWales is an OData endpoint, so select fields of interest
url = "http://open.statswales.gov.wales/dataset/popu6010?$select={}".format(",".join(fields))
# use OData syntax to filter P (persons), AllAges (all ages), Area_Hierarchy 691 (LADs)
url += "&$filter=Gender_Code ne 'P' and Area_Hierarchy gt 690 and Area_Hierarchy lt 694 and Variant_Code eq 'Principal'"
#
data = []
while True:
print(url)
r = requests.get(url)
r_data = r.json()
data += r_data['value']
if "odata.nextLink" in r_data:
url = r_data["odata.nextLink"]
else:
break
snpp_w = pd.DataFrame(data)
# Remove unwanted and rename wanted columns
snpp_w = snpp_w.drop(["Area_Hierarchy", "Variant_Code"], axis=1)
snpp_w = snpp_w.rename(columns={"Age_Code": "C_AGE",
"Area_AltCode1": "GEOGRAPHY_CODE",
"Data": "OBS_VALUE",
"Gender_Code": "GENDER",
"Year_Code": "PROJECTED_YEAR_NAME"})
# Remove all but SYOA and make numeric
snpp_w = snpp_w[(snpp_w.C_AGE != "AllAges") & (snpp_w.C_AGE != "00To15") & (snpp_w.C_AGE != "16To64") & (
snpp_w.C_AGE != "65Plus")]
snpp_w.loc[snpp_w.C_AGE == "90Plus", "C_AGE"] = "90"
snpp_w.C_AGE = pd.to_numeric(snpp_w.C_AGE)
# convert gender to census convention 1=M, 2=F
snpp_w.GENDER = snpp_w.GENDER.map({"M": 1, "F": 2})
# assert(len(snpp_w) == 26*2*91*22) # 22 LADs x 91 ages x 2 genders x 26 years
print(wales_raw)
snpp_w.to_csv(wales_raw, index=False)
return snpp_w
def __do_scotland(self):
lookup = {
'Aberdeen City': 'S12000033',
'Aberdeenshire': 'S12000034',
'Angus': 'S12000041',
'Argyll & Bute': 'S12000035',
'City of Edinburgh': 'S12000036',
'Clackmannanshire': 'S12000005',
'Dumfries & Galloway': 'S12000006',
'Dundee City': 'S12000042',
'East Ayrshire': 'S12000008',
'East Dunbartonshire': 'S12000045',
'East Lothian': 'S12000010',
'East Renfrewshire': 'S12000011',
'Falkirk': 'S12000014',
'Fife': 'S12000015',
'Glasgow City': 'S12000046',
'Highland': 'S12000017',
'Inverclyde': 'S12000018',
'Midlothian': 'S12000019',
'Moray': 'S12000020',
'Na h-Eileanan Siar': 'S12000013',
'North Ayrshire': 'S12000021',
'North Lanarkshire': 'S12000044',
'Orkney Islands': 'S12000023',
'Perth & Kinross': 'S12000024',
'Renfrewshire': 'S12000038',
'Scottish Borders': 'S12000026',
'Shetland Islands': 'S12000027',
'South Ayrshire': 'S12000028',
'South Lanarkshire': 'S12000029',
'Stirling': 'S12000030',
'West Dunbartonshire': 'S12000039',
'West Lothian': 'S12000040'
}
print("Collating SNPP data for Scotland...")
scotland_raw = self.cache_dir + "/snpp_s.csv"
scotland_src = "https://www.nrscotland.gov.uk/files//statistics/population-projections/sub-national-pp-18/detailed-tables/pop-proj-principal-2018-council-area.zip"
scotland_zip = self.cache_dir + "/snpp_s_2018.zip"
if os.path.isfile(scotland_raw):
snpp_s = pd.read_csv(scotland_raw)
else:
response = requests.get(scotland_src)
with open(scotland_zip, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", scotland_zip)
z = zipfile.ZipFile(scotland_zip)
snpp_s = pd.DataFrame()
for filename in z.namelist():
council_area = filename[37:-4]
if council_area in ["Metadata", "Scotland"]:
continue
GEOGRAPHY_CODE = lookup[council_area]
chunk = pd.read_csv(z.open(filename), encoding="ISO-8859-1", header=102)
# Drop Nan Rows
chunk = chunk.dropna(axis=0, how="all")
# Drop Last row with containing Copyright Cell.
chunk = chunk.drop(chunk.tail(1).index[0])
chunk = chunk.rename(columns={"Unnamed: 0": "C_AGE"})
chunk["GEOGRAPHY_CODE"] = GEOGRAPHY_CODE
chunk["GENDER"] = ''
# Drop rows where C_AGE == "All Ages"
chunk = chunk.drop(chunk.index[chunk["C_AGE"] == "All ages"])
chunk.loc[(chunk.C_AGE == '90 and over'), 'C_AGE'] = 90
chunk = chunk.reset_index(drop=True)
chunk.loc[
chunk.index[(chunk["C_AGE"] == "MALES")][0] + 1:chunk.index[(chunk["C_AGE"] == "FEMALES")][0] - 4,
"GENDER"] = 1
chunk.loc[chunk.index[(chunk["C_AGE"] == "FEMALES")][0] + 1:, "GENDER"] = 2
chunk = chunk[chunk.GENDER != '']
for year in range(2018, 2044):
appendable_chunk = chunk[["GEOGRAPHY_CODE", "C_AGE", str(year), "GENDER"]].rename(
columns={str(year): "OBS_VALUE"})
appendable_chunk["PROJECTED_YEAR_NAME"] = year
snpp_s = snpp_s.append(appendable_chunk)
snpp_s.reset_index(drop=True)
snpp_s['OBS_VALUE'] = snpp_s['OBS_VALUE'].str.replace(',', '')
snpp_s['OBS_VALUE'] = pd.to_numeric(snpp_s['OBS_VALUE'])
snpp_s.to_csv(scotland_raw, index=False)
return snpp_s
def __do_nireland(self):
# Niron
# (1 worksheet per LAD equivalent)
print("Collating SNPP data for Northern Ireland...")
ni_src = "https://www.nisra.gov.uk/sites/nisra.gov.uk/files/publications/SNPP16_LGD14_SYA_1641.xlsx"
ni_raw = self.cache_dir + "/snpp_ni.csv"
if os.path.isfile(ni_raw):
snpp_ni = pd.read_csv(ni_raw)
else:
response = requests.get(ni_src)
with open(self.cache_dir + "/ni_raw.xlsx", 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
# easier to hard-code the worksheet names we need (since unlikely to change frequently)
districts = ["Antrim & Newtownabbey",
"Ards & North Down",
"Armagh Banbridge & Craigavon",
"Belfast",
"Causeway Coast & Glens",
"Derry & Strabane",
"Fermanagh & Omagh",
"Lisburn & Castlereagh",
"Mid & East Antrim",
"Mid Ulster",
"Newry Mourne & Down"]
xls_ni = load_workbook(self.cache_dir + "/ni_raw.xlsx", read_only=True)
snpp_ni = pd.DataFrame()
for d in districts:
# 1 extra row compared to 2014 data (below was A2)
area_code = xls_ni[d]["A3"].value
# 2 extra rows compared to 2014 data (below was A3:A95)
males = utils.read_cell_range(xls_ni[d], "A5", "AA97")
females = utils.read_cell_range(xls_ni[d], "A100", "AA192")
dfm = pd.DataFrame(data=males[1:, 1:], index=males[1:, 0], columns=males[0, 1:]).drop(
["Age"]).stack().reset_index()
dfm.columns = ["C_AGE", "PROJECTED_YEAR_NAME", "OBS_VALUE"]
dfm["GENDER"] = pd.Series(1, dfm.index)
dfm["GEOGRAPHY_CODE"] = | pd.Series(area_code, dfm.index) | pandas.Series |
#%% [markdown]
# # Author : <NAME>
# ***
# ## Capstone Project for Qualifying IBM Data Science Professional Certification
# ***
#%% [markdown]
#
# # Import Packages
#
#%%
import numpy as np # library to handle data in a vectorized manner
import pandas as pd # library for data analsysis
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
| pd.set_option('display.width', 1000) | pandas.set_option |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dependency
----------------------------------
Dependency analysis class
Created on Nov 8, 2018
Last edited on Nov 8, 2018
@author: <NAME>
"""
import os
import io
import sys
import datetime
import numpy as np
from IPython import embed
import pandas as pd
import logging
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import re
import subprocess
import yaml
from glob import glob
import scipy
from statsmodels.robust.scale import mad
from collections import Counter
from collections import defaultdict as ddict
from sklearn.metrics import roc_curve, average_precision_score, f1_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection._search import ParameterGrid
import torch
import paths
from biovnn_model import BioVNNmodel
from utils import compute_AUC_bootstrap, plot_pred_true_r_by_gene_MAD, plot_pred_true_r_by_gene_mean, gene_level_cor, \
individual_auc, plot_ROC, plot_top_ROC, plot_hist_cor, plot_hist_auc
disease_mapping = {'Bladder Cancer': 'BLCA',
'Breast Cancer': 'BRCA',
'breast': 'BRCA',
'Cervical Cancer': 'CESC',
'Colon Cancer': 'COAD',
'Colon/Colorectal Cancer': 'COAD',
'colorectal': 'COAD',
'GBM/Brain Cancer': 'GBM',
'glioblastoma': 'GBM',
'Head and Neck Cancer': 'HNSC',
'upper_aerodigestive': 'HNSC',
'Liver Cancer': 'LIHC',
'liver': 'LIHC',
'Ovarian Cancer': 'OV',
'ovary': 'OV',
'Skin Cancer': 'SKCM',
'skin': 'SKCM',
'Gastric Cancer': 'STAD',
'Soft Tissue/ Thyroid Cancer': 'THCA',
'Thyroid Cancer': 'THCA',
'Endometrial Cancer': 'UCEC',
'Endometrial/Uterine Cancer': 'UCEC',
'uterus': 'UCEC',
'Esophageal Cancer': 'ESCA',
'esophagus': 'ESCA',
'Pancreatic Cancer': 'PAAD',
'pancreas': 'PAAD',
'Non-Small Cell Lung Cancer (NSCLC), Adenocarcinoma': 'LUAD',
'Non-Small Cell Lung Cancer (NSCLC), Squamous Cell Carcinoma': 'LUSC',
'Renal Carcinoma, clear cell': 'KIRC',
'Glioblastoma': 'GBM',
'Acute Myelogenous Leukemia (AML)': 'LAML',
'AML': 'LAML'}
def load_params(output_dir=None, param_f=None):
if param_f is None:
param_f = os.path.join(output_dir, 'param.yaml')
with open(param_f, 'r') as stream:
params = yaml.safe_load(stream)
return params
def save_params(output_dir, params):
with io.open(os.path.join(output_dir, 'param.yaml'), 'w', encoding='utf8') as outfile:
yaml.dump(params, outfile, default_flow_style=False, allow_unicode=True)
assert params == load_params(output_dir)
class Dependency(object):
def __init__(self, cancer_type, data_dir, result_dir, run_name, params,
depmap_ver='19Q3', use_hierarchy=True):
self.method = 'BioVNN'
self.cancer_type = cancer_type
self.n_cluster = None
self.run_name = run_name
self.patient_list = []
self.cancer_type_to_patients = ddict(list)
self.rna_dir = os.path.join(data_dir, 'DepMap', depmap_ver)
self.data_dir = data_dir
if 'ref_groups' in params and params['ref_groups'] == 'GO':
self.community_file = os.path.join(data_dir, 'GO', 'goa_human_20201212.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'GO', 'go_20201212_relation.txt')
else:
self.community_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'Reactome', 'ReactomePathwaysRelation.txt')
self.gene_id_file = os.path.join(self.data_dir, 'Reactome', 'Homo_sapiens_9606.gene_info')
self.gene_id_dict = pd.read_csv(self.gene_id_file, sep='\t', index_col=1)['Symbol'].to_dict()
self.Reactome_name_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.txt')
self.Reactome_name_dict = pd.read_csv(self.Reactome_name_file, sep='\t', index_col=0, header=None)[1].to_dict()
self.Reactome_reaction_file = os.path.join(self.data_dir, 'Reactome', 'NCBI2Reactome_PE_Reactions_human.txt')
self.Reactome_reaction_df = pd.read_csv(self.Reactome_reaction_file, sep='\t', index_col=None, header=None)
self.Reactome_gene_reaction_dict = ddict(list)
self.Reactome_reaction_gene_dict = ddict(list)
for i, row in self.Reactome_reaction_df.iterrows():
if 'HSA' in row[1] and 'HSA' in row[3]: # Make sure they are from human
if row[0] in self.gene_id_dict:
symbol = self.gene_id_dict[row[0]]
else:
symbol = row[2].split(' [')[0]
self.Reactome_gene_reaction_dict[symbol].append(row[3])
self.Reactome_reaction_gene_dict[row[3]].append(symbol)
self.community_dict = {}
self.community_hierarchy = []
self.community_hierarchy_all = None
self.community_hierarchy_random = []
self.community_hierarchy_random_all = None
self.community_hierarchy_ones = []
self.community_hierarchy_ones_all = None
self.community_hierarchy_dicts_all = {}
self.use_hierarchy = use_hierarchy
self.community_matrix = None
self.result_path = os.path.join(result_dir, self.__class__.__name__, run_name)
self.temp_path = os.path.join(result_dir, self.__class__.__name__, 'temp')
os.makedirs(self.result_path, exist_ok=True)
os.makedirs(self.temp_path, exist_ok=True)
self._dependency_classes = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Prospective',
'Timestamped', 'Interpret', 'Interpret_ts']
self._dependency_classes_plot = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Timestamped']
self.params = params
self.load_result = params.get('load_result', False)
self.load_result_dir_name = params.get('load_result_dir_name', False)
if self.load_result and self.load_result_dir_name:
if 'load_result_dir_suffix' in params:
if 'load_result_dir_full' in params:
if params['load_result_dir_full']:
self.load_result_dir = params['load_result_dir_suffix']
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = '/'.join(self.result_path.split('/')[:-1] + [self.load_result_dir_name])
params = load_params(self.load_result_dir)
if 'run_mode' in self.params:
run_mode = self.params['run_mode']
else:
run_mode = None
self.params.update(params)
params = self.params
if run_mode:
params['run_mode'] = run_mode
self.params['run_mode'] = run_mode
self.use_cuda = params.get('use_cuda', True)
self.data_types = params.get('data_types', ['rna'])
self.use_all_gene = params.get('use_all_gene', True)
self.exp_ratio_min = params.get('exp_ratio_min', 0.01)
self.feature_max = params.get('feature_max', 99999)
self.feature_per_group_max = params.get('feature_per_group_max', 100)
self.repeat_n = params.get('repeat_n', 1)
self.fold_n = params.get('fold_n', 5)
self.cv_fold = params.get('cv_fold', 0)
self.model_v = params.get('model_v', 'clh_v1')
self.cv_fold_only_run = params.get('cv_fold_only_run', 1)
self.other_cancer_types = params.get('other_cancer_types', [])
self.rna_top_n_std = params.get('rna_top_n_std', 10000)
self.community_affected_size_min = params.get('community_affected_size_min', 5)
self.community_affected_size_max = params.get('community_affected_size_max', 999999)
self.require_label_gene_in_gene_group = params.get('require_label_gene_in_gene_group', True)
self.clip_Xval_Xtest = params.get('clip_Xval_Xtest', [-1, 1])
self.use_MinMaxScaler = params.get('use_MinMaxScaler', False)
self.use_StandardScaler = params.get('use_StandardScaler', True)
self.use_tanh_feature = params.get('use_tanh_feature', False)
self.use_sigmoid_feature = params.get('use_sigmoid_feature', False)
self.use_community_filter = params.get('use_community_filter', True)
self.test_run = params.get('test_run', False)
self.select_genes_in_label = params.get('select_genes_in_label', 'dgidb_w_interaction')
self.use_classification = params.get('use_classification', True)
self.use_binary_dependency = params.get('use_binary_dependency', True)
self.use_class_weights = params.get('use_class_weights', True)
self.use_normalized_class_weights = params.get('use_normalized_class_weights', False)
self.use_sample_class_weights = params.get('use_sample_class_weights', False)
self.use_normalized_sample_class_weights = params.get('use_normalized_sample_class_weights', True)
self.use_all_dependency_gene = params.get('use_all_dependency_gene', True)
self.use_all_feature_for_random_group = params.get('use_all_feature_for_random_group', False)
self.use_all_feature_for_fully_net = params.get('use_all_feature_for_fully_net', False)
self.use_deletion_vector = params.get('use_deletion_vector', True)
self.use_consistant_groups_for_labels = params.get('use_consistant_groups_for_labels', False)
self.run_mode = params.get('run_mode',
'ref') # Could be ref, random_predictor, random, expression_control or full
self.random_group_permutation_ratio = params.get('random_group_permutation_ratio', 1)
self.random_group_hierarchy_permutation_ratio = params.get('random_group_hierarchy_permutation_ratio', 1)
self.random_group_permutation_seed = params.get('random_group_permutation_seed', 9527)
self.leaf_group_gene_in_label_max = params.get('leaf_group_gene_in_label_max', 50)
self.split_by_cancer_type = params.get('split_by_cancer_type', True)
self.save_model_ckpt = params.get('save_model_ckpt', True)
self.output_pred_small = ['RPS20', 'MYC', 'MYCN', 'PIK3CA']
self.GSP_min = params.get('GSP_min', 6)
self.GSN_min = params.get('GSN_min', 6)
self.gene_list = None
self.gene_list_name = None
self.accuracy = None
self.f1 = None
self.confusion_mat = None
self.mcc = None
self.pearson_r = None
self.spearman_rho = None
self.mse = None
self.feature_importance = []
metrics = ['accuracy', 'confusion_mat', 'f1', 'mcc', 'pearson_r', 'spearman_rho', 'mse', 'pearson_r2',
'AUC', 'PR']
data_splits = ['train', 'val', 'test']
for x in metrics:
self.__dict__[x] = ddict(dict)
for z in range(self.repeat_n + 1):
self.__dict__[x][z] = ddict(dict)
for y in data_splits:
self.__dict__[x][z][y] = ddict(list)
for x in ['pred', 'idx']:
self.__dict__[x] = ddict(dict)
for y in data_splits:
self.__dict__[x][y] = ddict(list)
self.metric_output = {}
for y in data_splits:
self.metric_output[y] = pd.DataFrame()
self.save_load_data = ['rna']
self.depmap_ver = depmap_ver
os.makedirs(self.rna_dir, exist_ok=True)
self.save_load_data = ['rna', 'dependency']
self.hdf5_df_file = os.path.join(self.temp_path,
'df_{}_depmap_{}.hdf5'.format('_'.join(sorted(self.data_types)),
self.depmap_ver))
def prepare_data(self):
self.load_communities()
self.load_known_genes()
self.load_selected_gene_list()
if not self.load_data():
self.load_dependency()
if 'rna' in self.data_types:
self.load_rna()
self.save_data()
self.align_data()
def load_selected_gene_list(self):
if isinstance(self.select_genes_in_label, str):
if self.select_genes_in_label.lower() == 'dgidb_w_interaction':
dgidb_file = os.path.join(self.data_dir, 'DGIdb_genes_w_interactions.txt')
else:
raise ValueError("Cannot recongnize select_genes_in_label {}".format(self.select_genes_in_label))
self.select_genes_in_label = pd.read_csv(dgidb_file, header=None)[0].tolist()
elif 'ref_leaf_group' in self.run_mode:
if isinstance(self.select_genes_in_label, list):
leaf_communities, df = self.load_leaf_communities()
initial_select = set(self.select_genes_in_label)
initial_n = len(initial_select)
logging.info("Selected genes {} were used to find additional genes in the same leaf gene groups".format(
self.select_genes_in_label))
leaf_communities_with_genes = {}
for group in leaf_communities:
if len(initial_select.intersection(self.community_dict[group])) > 0:
leaf_communities_with_genes[group] = len(self.community_dict[group])
# Select leaf groups from small to large groups until it reaches the self.leaf_group_gene_in_label_max
for group, size in sorted(leaf_communities_with_genes.items(), key=lambda x: x[1]):
if len(initial_select | set(self.community_dict[group])) < self.leaf_group_gene_in_label_max:
initial_select |= set(self.community_dict[group])
logging.info("{} gene group was added as genes in labels".format(group))
self.select_genes_in_label = sorted(list(initial_select))
logging.info(
"Additional {} genes in the same leaf gene groups with selected genes were added".format(
len(self.select_genes_in_label) - initial_n))
def save_label_genes(self, genes):
"""Save label genes to file."""
fout = open(os.path.join(self.result_path, 'dependency_genes.tsv'), 'w')
for x in genes:
fout.write('{}\n'.format(x))
fout.close()
def save_communities(self, d=None):
"""Save community genes to file."""
if d is None:
fout = open(os.path.join(self.result_path, 'community_list.tsv'), 'w')
d = self.community_dict
s = ''
else:
fout = open(os.path.join(self.result_path, 'community_random_list.tsv'), 'w')
s = '_random'
for k, v in d.items():
fout.write('{}\n'.format('\t'.join([k + s] + v)))
fout.close()
def save_data(self):
hf = pd.HDFStore(self.hdf5_df_file)
for x in self.save_load_data:
if x in self.__dict__:
hf[x] = self.__dict__[x]
hf['data_types'] = pd.DataFrame(self.data_types)
# hf['other_cancer_types'] = pd.DataFrame(self.other_cancer_types)
# hf['cancer_type'] = pd.DataFrame([self.cancer_type])
# for ct in set(self.cancer_type_to_patients.keys()) | set(self.other_cancer_types) | set([self.cancer_type]):
# hf[ct] = pd.DataFrame(self.cancer_type_to_patients[ct])
# if 'cancer_type_to_patients_target' in self.__dict__:
# for ct in set(self.cancer_type_to_patients_target.keys()) | set(self.other_cancer_types) | set(
# [self.cancer_type]):
# hf[ct + '_target'] = pd.DataFrame(self.cancer_type_to_patients_target[ct])
hf.close()
def load_data(self):
if os.path.isfile(self.hdf5_df_file):
hf = pd.HDFStore(self.hdf5_df_file)
try:
for x in self.save_load_data:
if x in hf:
self.__dict__[x] = hf[x]
self.__dict__[x + '_all'] = self.__dict__[x].copy()
logging.info("Loaded data from existing hdf5 file.")
hf.close()
return True
except:
logging.info(
"Current Data types, Cancer type or Other cancer types do not match that of existing hdf5 file.")
hf.close()
return False
else:
return False
def load_communities(self, load_original=True):
"""Parses out a geneset from file."""
if self.load_result and not load_original:
lines = open('{}/community_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
else:
lines = open('{}'.format(self.community_file)).readlines()
if 'pathway' in self.community_file.lower():
ind_key = 1
ind_gene = 3
elif self.community_file.lower().endswith('.gmt'):
ind_key = 1
ind_gene = 3
else:
ind_key = 0
ind_gene = 1
self.community_genes = set()
self.community_dict = {}
self.gene_community_dict = ddict(list)
self.community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
self.community_dict[line[ind_key]] = line[ind_gene:]
self.community_size_dict[line[ind_key]] = len(line[ind_gene:])
self.community_genes |= set(line[ind_gene:])
for g in line[ind_gene:]:
self.gene_community_dict[g].append(line[ind_key])
def load_random_communities(self, load_original=True):
"""Parses out a geneset from file."""
lines = open('{}/community_random_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
self.random_community_genes = set()
self.community_dict_random = {}
self.random_community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
group = line[ind_key].split('_')[0]
self.community_dict_random[group] = line[ind_gene:]
self.random_community_size_dict[group] = len(line[ind_gene:])
self.random_community_genes |= set(line[ind_gene:])
def load_leaf_communities(self):
f = self.community_hierarchy_file
# The first column 0 is the parent and the second column 1 is the child
df = pd.read_csv(f, sep='\t', header=None)
if 'Reactome' in f:
df = df.loc[df[0].str.contains('HSA')] # Get human-only pathways
# Make root as the parent of those gene groups without parents
df_root = pd.DataFrame(columns=df.columns)
for x in set(df[0]) - set(df[1]):
if x in self.community_dict or 'GO:' in x:
df_root = pd.concat([df_root, pd.DataFrame(['root', x]).T])
# Remove those relationship of groups not in the analysis
df = df.loc[df[1].isin(self.community_dict.keys()) & df[0].isin(self.community_dict.keys())]
df = pd.concat([df, df_root])
leaf_communities = sorted(list((set(df[1]) - set(df[0])) & set(self.community_dict.keys())))
return leaf_communities, df
def load_random_hierarchy(self):
f = '{}/random_group_hierarchy.tsv'.format(self.load_result_dir)
df = pd.read_csv(f, sep='\t', header=None)
return df
def load_known_genes(self, depmap_ver=None):
if depmap_ver is None:
depmap_ver = self.depmap_ver
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
depmap_dir = os.environ.get('DEPMAP_DIR')
if depmap_ver not in depmap_dir:
depmap_dir = regex.sub(depmap_ver, depmap_dir)
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
depmap_cell_line_file = os.path.join(depmap_dir, 'sample_info.csv')
else:
depmap_cell_line_file = os.path.join(depmap_dir, 'DepMap-20{}-celllines.csv'.format(depmap_ver.lower()))
self.cell_line_metadata = pd.read_csv(depmap_cell_line_file)
self.cell_line_metadata = self.cell_line_metadata.set_index('DepMap_ID')
try:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE_Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['Primary Disease'].to_dict()
except:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['lineage'].to_dict()
try:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['Subtype Disease'].to_dict()
except:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['lineage_subtype'].to_dict()
self.cell_line_id_mapping = ddict(lambda: None, self.cell_line_id_mapping)
self.cell_line_id_pri_dis = ddict(lambda: None, self.cell_line_id_pri_dis)
self.cell_line_id_sub_dis = ddict(lambda: None, self.cell_line_id_sub_dis)
def load_dependency(self, depmap_ver=None, dep_data_type='Dependency'):
depmap_genetic_vulnerabilities_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genetic_vulnerabilities_dir:
depmap_genetic_vulnerabilities_dir = regex.sub(depmap_ver, depmap_genetic_vulnerabilities_dir)
if dep_data_type == 'CERES':
depmap_file = 'Achilles_gene_effect.csv'
elif dep_data_type == 'Dependency':
depmap_file = 'Achilles_gene_dependency.csv'
self.dependency = pd.read_csv(os.path.join(depmap_genetic_vulnerabilities_dir, depmap_file), header=0,
index_col=0)
self.dependency.columns = [x.split(' (')[0] for x in self.dependency.columns]
self.dependency = self.dependency[sorted(self.dependency.columns)]
# Map cell line id to name
self.dependency.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
self.dependency.index]
self.dependency = self.dependency.loc[sorted(self.dependency.index)]
self.dependency = self.dependency.fillna(0)
def load_rna(self, depmap_ver=None):
depmap_genomic_characterization_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genomic_characterization_dir:
depmap_genomic_characterization_dir = regex.sub(depmap_ver, depmap_genomic_characterization_dir)
depmap_file = 'CCLE_expression.csv'
if '20Q2' in depmap_ver:
sep_str = '\t'
else:
sep_str = ','
self.rna = pd.read_csv(os.path.join(depmap_genomic_characterization_dir, depmap_file), header=0,
index_col=0, sep=sep_str)
self.rna.columns = [x.split(' (')[0] for x in self.rna.columns]
# Merge columns with the same gene symbol
dup_genes = [item for item, count in Counter(self.rna.columns).items() if count > 1]
unique_genes = list(set(self.rna.columns).difference(dup_genes))
RNAseq_gene = self.rna[unique_genes]
for col in set(dup_genes):
RNAseq_gene[col] = self.rna[col].sum(axis=1)
# Map cell line id to name
RNAseq_gene.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
RNAseq_gene.index]
for cell in set(self.dependency.index).intersection(RNAseq_gene.index):
cell_type = self.cell_line_id_pri_dis[cell]
cell_subtype = self.cell_line_id_sub_dis[cell]
if cell_type in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_type]]:
self.cancer_type_to_patients[disease_mapping[cell_type]].append(cell)
elif cell_subtype in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_subtype]]:
self.cancer_type_to_patients[disease_mapping[cell_subtype]].append(cell)
if cell not in self.cancer_type_to_patients[cell_type]:
self.cancer_type_to_patients[cell_type].append(cell)
self.rna = RNAseq_gene
self.rna = self.rna[sorted(self.rna.columns)]
self.rna = self.rna.loc[sorted(self.rna.index)]
self.rna_all = self.rna.copy()
def _subset_samples(self):
# Get overlapping patients among data types
overlapping_patients = set(self.dependency.index)
for x in self.data_types:
# Get patient ID
overlapping_patients &= set(self.__dict__[x].index)
if self.cancer_type == 'PANC':
selected_samples = sorted(list(overlapping_patients))
else:
selected_samples = sorted(list(set(self.cancer_type_to_patients[self.cancer_type])))
overlapping_patients &= set(selected_samples)
overlapping_patients = sorted(list(overlapping_patients))
for x in self.data_types:
self.__dict__[x] = self.__dict__[x].loc[overlapping_patients]
self.dependency = self.dependency.loc[overlapping_patients]
logging.info("Total {} samples have {} and dependency data".format(
len(overlapping_patients), " ".join(self.data_types)))
def _subset_target_genes(self):
try:
self.genes_in_label = pd.read_csv(self.load_result_dir + '/dependency_genes.tsv', sep='\t', header=None)
self.genes_in_label = list(self.genes_in_label.values.T[0])
except:
if self.use_all_dependency_gene:
self.genes_in_label = sorted(list(set(self.community_genes).intersection(self.dependency.columns)))
else:
self.genes_in_label = sorted(list(set(self.genes).intersection(self.dependency.columns)))
if len(self.select_genes_in_label) > 0:
self.genes_in_label = sorted(list(set(self.genes_in_label).intersection(self.select_genes_in_label)))
genes_not_found = set(self.select_genes_in_label).difference(self.genes_in_label)
logging.debug("Genes not found: {}".format(genes_not_found))
if 'Timestamped' not in self.__class__.__name__:
logging.info("{} out of {} selected genes are in dependency data.".format(
len(self.genes_in_label) - len(genes_not_found),
len(self.select_genes_in_label)))
gsp_total = (self.dependency[self.genes_in_label] >= 0.5).sum()
cond = (gsp_total >= self.GSP_min) & (self.dependency.shape[0] - gsp_total >= self.GSN_min)
cond_col = sorted([y for x, y in zip(cond, cond.index) if x])
logging.info("{} genes have at least {} gold standard positives and {} negatives".format(len(cond_col),
self.GSP_min,
self.GSN_min))
self.dependency = self.dependency[cond_col]
self.genes_in_label = cond_col
self.gsp_n = (self.dependency >= 0.5).sum().sum()
self.gsn_n = (self.dependency < 0.5).sum().sum()
if self.use_classification:
logging.info("Positive:negative samples = {}:{}".format(self.gsp_n, self.gsn_n))
def _select_feature_genes(self):
overlapping_genes = set(self.community_genes)
try:
self.rna_mad = pd.read_csv(self.load_result_dir + '/RNA_mad.tsv', sep='\t', index_col=0)
self.rna_mad.columns = [0]
except:
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
expressed_genes = ((self.rna >= 1).sum() > (self.rna.shape[0]) * self.exp_ratio_min)
self.rna_mad = self.rna.apply(mad)
self.rna_mad = pd.DataFrame(self.rna_mad, index=self.rna.columns)
self.rna_mad = self.rna_mad.loc[expressed_genes]
self.rna_mad = self.rna_mad.sort_values(by=0, ascending=False)
self.rna_mad.to_csv(os.path.join(self.result_path, 'RNA_mad.tsv'), sep='\t')
top_mad_genes = self.rna_mad.head(min(self.rna_top_n_std, self.rna_mad.shape[0])).index
self.output_pred_small += list(top_mad_genes)[0:20]
self.output_pred_small += list(top_mad_genes)[
int(self.rna_top_n_std / 2 - 10):int(self.rna_top_n_std / 2 + 10)]
self.output_pred_small += list(top_mad_genes)[-20:]
self.rna = self.rna[top_mad_genes]
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
logging.info("Total {} genes have top {} mad and gene group data".format(
len(overlapping_genes), self.rna.shape[1]))
def _filter_community(self):
com_to_drop = []
modeled_com_genes = set()
modeled_genes = set()
for data_type in self.data_types:
modeled_genes |= set(self.__dict__[data_type].columns)
for com, members in self.community_dict.items():
if self.use_all_dependency_gene:
self.community_dict[com] = sorted(
list((set(modeled_genes) & set(members)) | (set(members) & set(self.genes_in_label))))
else:
self.community_dict[com] = sorted(list(set(modeled_genes).intersection(members)))
if len(self.community_dict[com]) < self.community_affected_size_min:
com_to_drop.append(com)
elif len(self.community_dict[com]) > self.community_affected_size_max:
com_to_drop.append(com)
elif len(set(members) & set(self.genes_in_label)) < 1:
if self.require_label_gene_in_gene_group:
com_to_drop.append(com)
else:
modeled_com_genes |= set(self.community_dict[com])
else:
modeled_com_genes |= set(self.community_dict[com])
for com in com_to_drop:
self.community_dict.pop(com, None)
def _run_create_filter(self):
self.feature_genes = set()
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
self.community_filter = self.__create_filter(self.gene_community_dict, self.community_dict,
self.community_size_dict, random=False)
def __create_filter(self, gene_community_dict, community_dict, community_size_dict, random=False):
community_filter = ddict(set)
if not random:
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
i = 0
for g in self.genes_in_label:
coms = gene_community_dict[g]
coms = list(set(coms) & (community_dict.keys()))
com_size = [community_size_dict[x] for x in coms]
community_filter[g] |= set([g])
for s, com in sorted(zip(com_size, coms)):
genes = set(community_dict[com])
# Choose top n genes so that not too many features were used per gene group
if 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
if len(added_genes) == 0:
continue
if isinstance(self.feature_per_group_max, int):
choose_n = min(self.feature_per_group_max, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
choose_n = min(top_n, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
else:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
variable_genes = self.mut_freq.loc[list(added_genes)].sort_values(0, ascending=False)
if isinstance(self.feature_per_group_max, int):
top_genes = variable_genes.head(self.feature_per_group_max).index
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
top_genes = variable_genes.head(top_n).index
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
community_filter[g] |= set(top_genes)
if len(community_filter[g]) >= self.feature_max:
break
if not random:
if len(community_filter[g]) > 0:
self.genes_in_label_idx[g] = i
self.idx_genes_in_label[i] = g
i += 1
else:
logging.info("Gene {} could not find feature genes".format(g))
if not random:
logging.info(
"The dependency of total {} genes will be predicted".format(len(self.genes_in_label_idx.keys())))
return community_filter
def _build_hierarchy(self):
leaf_communities, df = self.load_leaf_communities()
child = leaf_communities
# The layer having only gene children
level = 1
self.community_level_dict = dict()
self.level_community_dict = dict()
count_dict = ddict(int)
for x in child:
self.community_level_dict[x] = level
count_dict[x] += 1
self.level_community_dict[level] = child
# logging.info("Layer {} has {} gene groups".format(level, len(child)))
while 1:
df_level = df.loc[df[1].isin(child)]
if df_level.shape[0] == 0:
break
level += 1
parent = sorted(list(set(df_level[0])))
for parent_group in parent:
self.community_level_dict[parent_group] = level
count_dict[parent_group] += 1
self.level_community_dict[level] = parent
child = parent
# Make the layer number of each community unique
self.level_community_dict = ddict(list)
for g, level in self.community_level_dict.items():
self.level_community_dict[level].append(g)
for level, groups in sorted(self.level_community_dict.items()):
logging.info("Layer {} has {} gene groups".format(level, len(groups)))
gene_groups_all = sorted(list(self.community_dict.keys())) + ['root']
logging.info(
"Total {} layers of {} gene groups in the hierarchy including the root".format(level, len(gene_groups_all)))
feature_genes_all = []
self.feature_n = []
np.random.RandomState(self.params['seeds'][0])
for data_type in self.data_types:
feat_n = len(self.__dict__[data_type].columns)
self.feature_n.append(feat_n)
# Randomly reselect features for each feature matrix
if 'full' in self.run_mode and self.use_all_feature_for_fully_net:
feat_pool = sorted(list(self.__dict__[data_type + '_all'].columns))
feature_genes_all += feat_pool
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, feat_pool]
logging.info(
"Use all {} genes from {} as features to form fully connected networks".format(feat_n, data_type))
elif 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
feat_pool = list(self.__dict__[data_type + '_all'].columns)
# Require gene labels in the features
pre_select = set(feat_pool) & set(self.genes_in_label)
feat_pool = sorted(list(set(feat_pool) - set(self.genes_in_label)))
random_feat = sorted(list(np.random.choice(feat_pool, feat_n - len(pre_select), replace=False)))
feature_genes_all += random_feat + list(pre_select)
feature_genes_all = sorted(feature_genes_all)
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, random_feat]
logging.info(
"Randomly select {} genes including {} gene of prediction from {} as features to form random gene groups".format(
feat_n, len(self.genes_in_label), data_type))
else:
feature_genes_all += sorted(list(self.__dict__[data_type].columns))
del_genes_all = sorted(list(self.genes_in_label_idx.keys()))
self.feature_n.append(len(del_genes_all))
self.genes_in_label = del_genes_all
self.save_label_genes(self.genes_in_label)
self.y = self.dependency[self.genes_in_label]
self.y_binary = ((self.y >= 0.5) + 0).astype(int)
# The order of indexed genes and gen groups:
if self.use_deletion_vector:
entity_all = feature_genes_all + del_genes_all + gene_groups_all
else:
entity_all = feature_genes_all + gene_groups_all
self.idx_name = {i: k for i, k in enumerate(entity_all)}
name_idx = ddict(list)
for k, v in self.idx_name.items():
name_idx[v].append(k)
if len(self.data_types) > 1:
self.mut_genes_idx = {}
self.rna_genes_idx = {}
for k, v in name_idx.items():
for idx in v:
if idx < self.feature_n[0]:
self.mut_genes_idx[k] = idx
elif self.feature_n[0] <= idx < self.feature_n[0] + self.feature_n[1]:
self.rna_genes_idx[k] = idx
self.feature_genes_idx = {x: min(name_idx[x]) for x in feature_genes_all}
self.del_genes_idx = {x: max(name_idx[x]) for x in del_genes_all}
self.gene_group_idx = {x: name_idx[x][0] for x in gene_groups_all}
self.community_hierarchy_dicts_all = {'idx_name': self.idx_name,
'feature_genes_idx': self.feature_genes_idx,
'del_genes_idx': self.del_genes_idx,
'gene_group_idx': self.gene_group_idx}
self.child_map_all = []
self.child_map_all_random = []
self.child_map_all_ones = []
feature_only_genes = set(feature_genes_all) - set(del_genes_all)
dep_only_genes = set(del_genes_all) - set(feature_genes_all)
feature_dep_both_genes = set(feature_genes_all) & set(del_genes_all)
gene_pool = sorted(list(set(feature_genes_all) | set(del_genes_all)))
self.community_filter_random = ddict(list)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
self.load_random_communities()
random_hierarchy = self.load_random_hierarchy()
else:
self.community_dict_random = {}
random_hierarchy = pd.DataFrame()
self.gene_community_dict_random = ddict(list)
self.community_size_dict_random = {}
prng = np.random.RandomState(self.params['seeds'][0])
logging.info("Building gene group hierarchy")
if self.run_mode == 'random':
idx_gene_pool = {i: g for i, g in enumerate(gene_pool)}
gene_pool_idx = {g: i for i, g in enumerate(gene_pool)}
partially_shuffled_membership = self.__partially_shuffle_gene_group(gene_pool, gene_pool_idx)
idx_gene_group = {i: g for g, i in self.gene_group_idx.items()}
partially_shuffled_relation = self.__partially_shuffle_gene_group_hierarchy(df, idx_gene_group)
else:
partially_shuffled_membership = None
partially_shuffled_relation = None
idx_gene_group = None
idx_gene_pool = None
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
genes = self.community_dict[group]
gene_idx = self._genes_to_feat_del_idx(genes)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
genes_random = self.community_dict_random[group]
else:
if partially_shuffled_membership is not None:
genes_random_idx = partially_shuffled_membership[idx - min_group_idx].nonzero()[0]
genes_random = sorted([idx_gene_pool[x] for x in genes_random_idx])
else:
if self.use_consistant_groups_for_labels:
gene_pool = sorted(list(set(gene_pool) - set(self.genes_in_label)))
pre_select = set(genes) & set(self.genes_in_label)
if len(set(genes) & set(self.genes_in_label)) > 0:
random_feat = list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False))
genes_random = sorted(random_feat + list(pre_select))
else:
genes_random = sorted(
list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False)))
else:
genes_random = sorted(list(prng.choice(gene_pool, len(genes), replace=False)))
self.community_dict_random[group] = genes_random
for g in genes_random:
self.gene_community_dict_random[g].append(group)
self.community_size_dict_random[group] = len(genes_random)
feat_genes = set(genes_random) & set(self.feature_genes_idx.keys())
del_genes = set(genes_random) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx_random = feat_gene_idx + del_gene_idx
else:
gene_idx = []
gene_idx_random = []
child = sorted(df.loc[df[0] == group, 1].tolist())
child_idx = sorted([self.gene_group_idx[x] for x in child if x in self.gene_group_idx])
self.child_map_all.append(sorted(gene_idx + child_idx))
if len(self.child_map_all[-1]) == 0:
logging.info("Gene group {} does not have children".format(group))
# Build random group hierarchy
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
child_random = sorted(random_hierarchy.loc[random_hierarchy[0] == group, 1].tolist())
child_idx_random = sorted([self.gene_group_idx[x] for x in child_random if x in self.gene_group_idx])
else:
if partially_shuffled_relation is not None:
child_idx_random = partially_shuffled_relation[idx - min_group_idx, :].nonzero()[0]
child_idx_random = [x + min_group_idx for x in child_idx_random]
child_random = sorted([idx_gene_group[x] for x in child_idx_random])
else:
child_idx_random = []
child_random = []
for c in child:
child_level = self.community_level_dict[c]
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
child_random.append(random_child)
random_c_idx = self.gene_group_idx[random_child]
child_idx_random.append(random_c_idx)
for rc in sorted(child_random):
random_hierarchy = pd.concat([random_hierarchy, pd.DataFrame([group, rc]).T], axis=0)
self.child_map_all_random.append(sorted(gene_idx_random + child_idx_random))
try:
assert len(gene_idx) == len(gene_idx_random), "Random gene number does not match"
except AssertionError:
pass
# Children for fully connected neural networks
if group in leaf_communities:
gene_idx_ones = list(self.feature_genes_idx.values())
else:
gene_idx_ones = []
parent_level = self.community_level_dict[group]
child_level = parent_level - 1
if child_level in self.level_community_dict:
child_ones = self.level_community_dict[child_level]
else:
child_ones = []
child_idx_ones = [self.gene_group_idx[x] for x in child_ones if x in self.gene_group_idx]
self.child_map_all_ones.append(sorted(gene_idx_ones + child_idx_ones))
self.save_communities(self.community_dict_random)
# Save random hierarchy as file
random_hierarchy.to_csv(os.path.join(self.result_path, 'random_group_hierarchy.tsv'),
index=None, sep='\t', header=None)
self.community_filter_random = self.__create_filter(self.gene_community_dict_random, self.community_dict_random,
self.community_size_dict_random, random=True)
self.community_filter_map = []
self.community_filter_map_random = []
feature_n = len(feature_genes_all)
for g in del_genes_all:
feat_genes = set(self.community_filter[g])
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
feat_gene_idx = sorted(feat_gene_idx)
else:
feat_gene_idx = sorted([self.feature_genes_idx[x] for x in feat_genes if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_gene_idx] = 1
self.community_filter_map.append(feat_genes_array)
feat_genes_random = set(self.community_filter_random[g])
if len(self.data_types) > 1:
feat_genes_random_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_genes_random_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_genes_random_idx.append(self.rna_genes_idx[g])
feat_genes_random_idx = sorted(feat_genes_random_idx)
else:
feat_genes_random_idx = sorted(
[self.feature_genes_idx[x] for x in feat_genes_random if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_genes_random_idx] = 1
self.community_filter_map_random.append(feat_genes_array)
def __partially_shuffle_gene_group(self, gene_pool, gene_pool_idx):
group_gene_membership_matrix = np.zeros([len(self.gene_group_idx), len(gene_pool)])
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
idx -= min_group_idx
genes = self.community_dict[group]
gene_idx = [gene_pool_idx[gene] for gene in genes]
group_gene_membership_matrix[idx, gene_idx] = 1
all_idx = group_gene_membership_matrix.nonzero()
prng = np.random.RandomState(self.random_group_permutation_seed)
shuffled_number = int(self.random_group_permutation_ratio * len(all_idx[0]))
shuffled_relationship_idx = prng.choice(range(len(all_idx[0])), shuffled_number, replace=False)
logging.info(
f"{self.random_group_permutation_ratio*100}% ({shuffled_number}) of gene membership was randomly shuffled")
# No shuffling
if self.random_group_permutation_ratio == 0:
return group_gene_membership_matrix
connections_to_shuffled = np.zeros([len(self.gene_group_idx), len(gene_pool)])
connections_to_shuffled[all_idx[0][shuffled_relationship_idx], all_idx[1][shuffled_relationship_idx]] = 1
partially_shuffled_membership = np.zeros([len(self.gene_group_idx), len(gene_pool)])
for i in range(group_gene_membership_matrix.shape[0]):
original = group_gene_membership_matrix[i].nonzero()[0]
to_shuffled = connections_to_shuffled[i].nonzero()[0]
if len(to_shuffled) > 0:
keep = list(set(original) - set(to_shuffled))
pool = sorted(list(set(range(len(group_gene_membership_matrix[i]))) - set(keep)))
after_shuffled = list(prng.choice(pool, len(to_shuffled), replace=False))
partially_shuffled_membership[i][keep + after_shuffled] = 1
else:
partially_shuffled_membership[i][original] = 1
return partially_shuffled_membership
def __partially_shuffle_gene_group_hierarchy(self, df, idx_gene_group):
gene_group_relation_matrix = np.zeros([len(self.gene_group_idx), len(self.gene_group_idx)])
min_group_idx = min(self.gene_group_idx.values())
for _, row in df.iterrows():
parent = self.gene_group_idx[row[0]] - min_group_idx
child = self.gene_group_idx[row[1]] - min_group_idx
gene_group_relation_matrix[parent, child] = 1
all_idx = gene_group_relation_matrix.nonzero()
prng = np.random.RandomState(self.random_group_permutation_seed)
shuffled_number = int(self.random_group_hierarchy_permutation_ratio * len(all_idx[0]))
shuffled_relationship_idx = prng.choice(range(len(all_idx[0])), shuffled_number, replace=False)
logging.info(
f"{self.random_group_hierarchy_permutation_ratio*100}% ({shuffled_number}) of gene group hierarchy was randomly shuffled")
connections_to_shuffled = np.zeros(gene_group_relation_matrix.shape)
connections_to_shuffled[all_idx[0][shuffled_relationship_idx], all_idx[1][shuffled_relationship_idx]] = 1
partially_shuffled_relation = np.zeros(gene_group_relation_matrix.shape)
# No shuffling
if self.random_group_hierarchy_permutation_ratio == 0:
return gene_group_relation_matrix
# Shuffle child group for each parent
for i in range(gene_group_relation_matrix.shape[0]):
original = gene_group_relation_matrix[i].nonzero()[0]
to_shuffled = connections_to_shuffled[i].nonzero()[0]
if len(to_shuffled) > 0:
keep = list(set(original) - set(to_shuffled))
children = [idx_gene_group[x + min_group_idx] for x in to_shuffled]
child_levels = [self.community_level_dict[child] for child in children]
after_shuffled = []
for child_level in child_levels:
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
random_child_idx = self.gene_group_idx[random_child] - min_group_idx
after_shuffled.append(random_child_idx)
after_shuffled = list(set(after_shuffled))
partially_shuffled_relation[i][keep + after_shuffled] = 1
else:
partially_shuffled_relation[i][original] = 1
return partially_shuffled_relation
def _genes_to_feat_del_idx(self, genes):
feat_genes = set(genes) & set(self.feature_genes_idx.keys())
del_genes = set(genes) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx = feat_gene_idx + del_gene_idx
return gene_idx
def _get_genes_in_child_group(self, group, genes_in_child_gene_group=set()):
_, df = self.load_leaf_communities()
children = df.loc[df[0] == group, 1].tolist()
for child in children:
if child in self.community_dict:
genes = self.community_dict[child]
genes_in_child_gene_group |= set(genes)
self._get_genes_in_child_group(child, genes_in_child_gene_group)
return genes_in_child_gene_group
def align_data(self):
self._subset_samples()
self._subset_target_genes()
self._select_feature_genes()
self._filter_community()
self._run_create_filter()
if len(self.data_types) > 1:
self.X = pd.concat([self.mut, self.rna], axis=1)
else:
self.X = self.__dict__[self.data_types[0]]
self.X_all = self.X
self._build_hierarchy()
# self._refine_community()
logging.info("Generating data splits for {} repeats and {} folds".format(self.repeat_n, self.fold_n))
self.split_data()
def split_data(self):
self.split_idx = dict()
for repeat in range(self.repeat_n):
seed = self.params['seeds'][repeat]
if self.split_by_cancer_type and self.cancer_type == 'PANC':
cancer_type_id = ddict(list)
for x in self.X.index:
t = '_'.join(x.split('_')[1:])
cancer_type_id[t].append(x)
self.split_idx[repeat] = [ddict(list) for _ in range(self.fold_n)]
for j, (cancer_type, idx) in enumerate(cancer_type_id.items()):
logging.debug("{} has {} cell lines".format(cancer_type, len(idx)))
if len(idx) >= self.fold_n + 1:
logging.debug("{} has {} cell lines splitting".format(cancer_type, len(idx)))
split_subidx = self._split_data(self.X.loc[idx], self.y.loc[idx], seed)
for fold, split_dict in enumerate(split_subidx):
for split_type in split_dict.keys():
self.split_idx[repeat][fold][split_type] += list(split_dict[split_type])
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
target_idx = set(self.dependency_target.index) & set(self.rna_target_all.index)
target_idx_only = target_idx - set(self.dependency.index)
target_idx_only = sorted(list(target_idx_only))
for fold in range(len(self.split_idx[repeat])):
self.split_idx[repeat][fold]['test'] = target_idx_only
self.X_all = pd.concat([self.X_all, self.rna_target.loc[target_idx_only, self.X_all.columns]])
self.y = pd.concat([self.y, self.dependency_target.loc[target_idx_only, self.y.columns]])
y_binary_target = ((self.y.loc[target_idx_only] >= 0.5) + 0).astype(int)
self.y_binary = pd.concat([self.y_binary, y_binary_target])
else:
self.split_idx[repeat] = self._split_data(self.X, self.y, seed)
def _split_data(self, X, y, seed):
kf1 = KFold(n_splits=self.fold_n, random_state=seed)
split_idx = []
for fold, (train_index, test_index) in enumerate(kf1.split(X, y)):
split_dict = dict()
split_dict['test'] = list(X.index[test_index])
# Generate validation data by splitting part of training data
X_train, y_train = X.loc[X.index[train_index]], y.loc[X.index[train_index]]
if X_train.shape[0] < self.fold_n:
return []
kf = KFold(n_splits=self.fold_n, random_state=seed)
for fold_2, (train_index, test_index) in enumerate(kf.split(X_train, y_train)):
split_dict['train'] = list(X_train.index[train_index])
split_dict['val'] = list(X_train.index[test_index])
if fold_2 == fold: # Taking the different splits to differentiate it
break
split_idx.append(split_dict)
return split_idx
def get_split_data(self, i, j):
self.idx['train'] = self.split_idx[i][j]['train']
self.idx['val'] = self.split_idx[i][j]['val']
self.idx['test'] = self.split_idx[i][j]['test']
if self.use_binary_dependency:
y = self.y_binary
else:
y = self.y
self.X_train, self.y_train = self.X_all.loc[self.idx['train']].values, y.loc[self.idx['train']].values
self.X_val, self.y_val = self.X_all.loc[self.idx['val']].values, y.loc[self.idx['val']].values
self.X_test, self.y_test = self.X_all.loc[self.idx['test']].values, y.loc[self.idx['test']].values
if 'cl3_' in self.model_v or 'cl5_' in self.model_v:
scaler = StandardScaler()
self.y_train2 = scaler.fit_transform(self.y_train)
self.y_val2 = scaler.transform(self.y_val)
self.y_test2 = scaler.transform(self.y_test)
elif 'clh_' in self.model_v:
self.y_train2 = self.y_train
self.y_val2 = self.y_val
self.y_test2 = self.y_test
else:
self.y_train2 = None
self.y_val2 = None
self.y_test2 = None
logging.info("Repeat {}, fold {}".format(i, j))
logging.info("Training data shape X: {}, y: {}".format(self.X_train.shape, self.y_train.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_train, axis=1))))
logging.info("Validation data shape X: {}, y: {}".format(self.X_val.shape, self.y_val.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_val, axis=1))))
logging.info("Test data shape X: {}, y: {}".format(self.X_test.shape, self.y_test.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_test, axis=1))))
def perform(self, model_name, params=None):
if params is None:
params = self.params
save_params(self.result_path, params)
if self.cv_fold != 0:
if 'models' in params and 'random_forest' in self.run_mode:
self.perform_cv('random_forest', params)
else:
self.perform_cv(model_name, params)
else:
self.prepare_data()
# self.community_filter_ones = np.ones(self.community_filter.shape)
model_name_base = model_name
for repeat in range(self.repeat_n):
params['seed'] = params['seeds'][repeat]
# self.community_matrix_random = lil_matrix(self.community_matrix.shape)
np.random.seed(params['seed'])
if 'clh_v' in self.model_v:
mask = self.child_map_all
mask_random = self.child_map_all_random
mask_ones = self.child_map_all_ones
else:
mask = self.community_hierarchy
mask_random = self.community_hierarchy_random
mask_ones = self.community_hierarchy_ones
for fold in range(len(self.split_idx[repeat])):
model_suffix = str(params['seed']) + 'repeat' + str(repeat) + 'fold' + str(fold)
self.get_split_data(repeat, fold)
self.calculate_weights()
self.normalize_data()
if 'ref' in self.run_mode:
self.run_exp(model_name_base, model_suffix,
params, mask, repeat, fold, self.community_filter_map)
elif 'random_forest' in self.run_mode.lower():
self.run_exp('random_forest', model_suffix,
params, mask, repeat, fold, None, mask_ones)
elif 'random_predictor' in self.run_mode:
self.run_exp('random_predictor', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'random' in self.run_mode:
self.run_exp('random_control', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'expression_control' in self.run_mode:
self.run_exp('expression_control', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'full' in self.run_mode:
self.run_exp('gene_control', model_suffix,
params, mask, repeat, fold, None, mask_ones)
def calculate_weights(self):
if self.use_class_weights:
gsp_n = (self.y_train >= 0.5).sum().sum()
gsn_n = (self.y_train < 0.5).sum().sum()
if self.use_normalized_class_weights:
self.class_weight_neg = (gsp_n + gsn_n) / (2.0 * (gsn_n))
self.class_weight_pos = (gsp_n + gsn_n) / (2.0 * (gsp_n))
else:
self.class_weight_neg = gsp_n / gsn_n
self.class_weight_pos = 1
else:
self.class_weight_neg = None
self.class_weight_pos = None
if self.use_sample_class_weights:
gsp_n = (self.y_train >= 0.5).sum(axis=0)
gsn_n = (self.y_train < 0.5).sum(axis=0)
if self.use_normalized_sample_class_weights:
self.sample_class_weight_neg = (gsp_n + gsn_n) / (2.0 * (gsn_n))
self.sample_class_weight_pos = (gsp_n + gsn_n) / (2.0 * (gsp_n))
else:
self.sample_class_weight_neg = gsp_n / gsn_n
self.sample_class_weight_pos = np.array([1] * len(gsn_n))
else:
self.sample_class_weight_neg = None
self.sample_class_weight_pos = None
def split_data_cv(self):
self.split_idx_cv = ddict(list)
for repeat in range(self.repeat_n):
seed = self.params['seeds'][repeat]
kf1 = KFold(n_splits=self.cv_fold, random_state=seed)
idx = sorted(list(self.idx['train']) + list(self.idx['val']))
X_train_val = self.X_all.loc[idx]
y_train_val = self.y.loc[idx]
for train_index, val_index in kf1.split(X_train_val, y_train_val):
split_dict = {}
split_dict['train'] = X_train_val.index[train_index]
split_dict['val'] = X_train_val.index[val_index]
self.split_idx_cv[repeat].append(split_dict)
def get_split_data_cv(self, i, j):
self.idx['train'] = self.split_idx_cv[i][j]['train']
self.idx['val'] = self.split_idx_cv[i][j]['val']
if self.use_binary_dependency:
y = self.y_binary
else:
y = self.y
self.X_train, self.y_train = self.X_all.loc[self.idx['train']].values, y.loc[self.idx['train']].values
self.X_val, self.y_val = self.X_all.loc[self.idx['val']].values, y.loc[self.idx['val']].values
if 'cl3_' in self.model_v or 'cl5_' in self.model_v:
scaler = StandardScaler()
self.y_train2 = scaler.fit_transform(self.y_train)
self.y_val2 = scaler.transform(self.y_val)
self.y_test2 = scaler.transform(self.y_test)
elif 'clh_' in self.model_v:
self.y_train2 = self.y_train
self.y_val2 = self.y_val
self.y_test2 = self.y_test
else:
self.y_train2 = None
self.y_val2 = None
self.y_test2 = None
logging.info("Repeat {}, cv_fold {}".format(i, j))
logging.info("Training data shape X: {}, y: {}".format(self.X_train.shape, self.y_train.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_train, axis=1))))
logging.info("Validation data shape X: {}, y: {}".format(self.X_val.shape, self.y_val.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_val, axis=1))))
def _normalize_rna(self, X_train, X_val, X_test):
# scaler = MinMaxScaler()
# self.X_train = scaler.fit_transform(self.X_train)
# self.X_val = scaler.transform(self.X_val)
# self.X_test = scaler.transform(self.X_test)
# self.X_train = np.log2(self.X_train + 1)
# self.X_val = np.log2(self.X_val + 1)
# self.X_test = np.log2(self.X_test + 1)
if self.use_StandardScaler:
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
# feature_no_info = ((self.X_train.sum(axis=0) == 0) + 0).nonzero()[0]
X_val = scaler.transform(X_val)
# self.X_val[self.X_val > self.X_train.max()] = self.X_train.max()
# self.X_val[:, feature_no_info] = 0
X_test = scaler.transform(X_test)
if self.use_sigmoid_feature:
X_train = 1 / (1 + np.exp(-X_train))
X_val = 1 / (1 + np.exp(-X_val))
X_test = 1 / (1 + np.exp(-X_test))
if self.use_tanh_feature:
X_train = np.tanh(X_train)
X_val = np.tanh(X_val)
X_test = np.tanh(X_test)
if self.use_MinMaxScaler:
scaler = MinMaxScaler(feature_range=(0, 1))
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
if self.clip_Xval_Xtest is not None:
logging.info("Before cliping,\n"
"Val data (min,max) = ({}, {})\n"
"Test data (min,max) = ({}, {})".format(
X_val.min(),
X_val.max(),
X_test.min(),
X_test.max(),
))
X_val = np.clip(X_val, self.clip_Xval_Xtest[0], self.clip_Xval_Xtest[1])
X_test = np.clip(X_test, self.clip_Xval_Xtest[0], self.clip_Xval_Xtest[1])
return X_train, X_val, X_test
def normalize_data(self):
self.X_train = np.nan_to_num(self.X_train)
self.X_val = np.nan_to_num(self.X_val)
self.X_test = np.nan_to_num(self.X_test)
self.X_train, self.X_val, self.X_test = self._normalize_rna(self.X_train, self.X_val, self.X_test)
def run_exp(self, model_name, model_suffix, params, com_mat, repeat, fold,
community_filter=None, com_mat_fully=None):
logging.info("Running {} repeat {} fold {}".format(model_name, repeat, fold))
output_prefix = model_name + model_suffix
if 'random_predictor' in model_name:
self.compute_metric(None, 'test', model_name, model_suffix, self.y_train, self.y_test, com_mat, repeat,
self.y_test2)
elif 'mean_control' in model_name:
# self.compute_metric(cm, 'train', model_name, model_suffix, self.y_train, self.y_train, com_mat, repeat,
# self.y_train2)
# self.compute_metric(cm, 'val', model_name, model_suffix, self.y_train, self.y_val, com_mat, repeat,
# self.y_val2)
self.compute_metric(None, 'test', model_name, model_suffix, self.y_train, self.y_test, com_mat, repeat,
self.y_test2)
elif 'expression_control' in model_name:
self.compute_metric(None, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
elif 'random_forest' in model_name:
sk_all = []
params['n_jobs'] = -1
for i in range(self.y_train.shape[1]):
sk = SklearnModel(model_name + model_suffix, params)
sk.train(self.X_train, self.y_train[:, i])
sk_all.append(sk)
self.compute_metric(sk_all, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat,
self.y_train2)
self.compute_metric(sk_all, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self.compute_metric(sk_all, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
else:
if self.use_community_filter:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all, community_filter,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
else:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
if hasattr(self, 'load_result_dir'):
load_ckpt = os.path.join(self.load_result_dir,
'{}_{}_{}.tar'.format(model_name + model_suffix, self.model_v,
params['seed']))
cm.train(self.X_train, com_mat, self.y_train, load_weight_dir=load_ckpt, mask_fully=com_mat_fully)
else:
y_val_index = self.idx['val']
y_col = self.y.columns
cm.train(self.X_train, com_mat, self.y_train, None, self.X_val, self.y_val, y_train2=self.y_train2,
y_val2=self.y_val2, output_prefix=output_prefix, y_val_index=y_val_index, y_col=y_col,
mask_fully=com_mat_fully)
self._clear_gpu(model_name, model_suffix)
cm.train(self.X_train, com_mat, self.y_train, mask_fully=com_mat_fully)
# self.analyze_weights(cm, model_name, model_suffix)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat,
self.y_train2)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
self._clear_gpu(model_name, model_suffix)
model_suffix = str(params['seed']) + 'repeat' + str(repeat)
self.compute_metric_all_test('test', model_name, model_suffix, self.X_test, self.y_test, repeat)
self.output_metric()
def run_exp_cv(self, model_name, model_suffix, params, com_mat, repeat, fold,
community_filter=None, com_mat_fully=None, grid_name=None):
logging.info("Running {}".format(model_suffix))
if 'random_forest' in self.run_mode:
embed()
sys.exit(0)
params['n_jobs'] = -1
sk = SklearnModel(model_name + model_suffix, params)
sk.train(self.X_train, self.y_train)
self.compute_metric(sk, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat)
self.compute_metric(sk, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat)
# sk_all = []
# for i in range(self.y_train.shape[1]):
# sk = SklearnModel(model_name + model_suffix, params)
# sk.train(self.X_train, self.y_train[:, i])
# sk_all.append(sk)
#
# self.compute_metric(sk_all, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat)
# self.compute_metric(sk_all, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat)
else:
if self.use_community_filter:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all, community_filter,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
else:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
y_val_index = self.idx['val']
y_col = self.y.columns
output_prefix = model_name + model_suffix
cm.train(self.X_train, com_mat, self.y_train, None, self.X_val, self.y_val, y_train2=self.y_train2,
y_val2=self.y_val2, output_prefix=output_prefix, y_val_index=y_val_index, y_col=y_col,
mask_fully=com_mat_fully)
self._clear_gpu(model_name, model_suffix)
cm.train(self.X_train, com_mat, self.y_train)
self.compute_metric(cm, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self._clear_gpu(model_name, model_suffix)
if not self.save_model_ckpt:
cm._rm_ckpt()
self.output_metric()
model_suffix = str(params['seed']) + 'repeat' + str(repeat) + '_' + grid_name
self.compute_metric_all_test('val', model_name, model_suffix, self.X_test, self.y_test, repeat)
self.output_metric()
metric_output = {}
for x in self.metric_output:
if self.metric_output[x].shape[0] > 0:
df = self.metric_output[x].copy()
df = df.loc[['fold' not in y for y in df.index]]
if df.shape[0] > 0:
grid_df = self.grid_df.copy().T
grid_df.index = df.index
metric_output[x] = pd.concat([df, grid_df], axis=1)
self.output_metric(metric_output, '_all')
def perform_cv(self, model_name, params):
grid = ParameterGrid(params['grid_search'])
params_backbone = params.copy()
self.grid_df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import constants
from utils import create_dir, snake_case
from tqdm import tqdm
import shutil
create_dir(constants.RAW_DATA_PATH, remove_if_exists=True)
if constants.PYTORCH_STRUCTURE:
classes = [snake_case(doggo_class) for doggo_class in list(set(constants.DOGGO_CLASSES.keys()))]
for doggo_class in classes:
create_dir(constants.RAW_DATA_PATH + '/' + doggo_class)
def check_equivalent(breed):
if breed in constants.EQUIVALENT_BREEDS.keys():
return constants.EQUIVALENT_BREEDS[breed]
def get_breeds_paths():
udacity_breeds_paths = {}
oxford_breeds_paths = {}
stanford_breeds_paths = {}
for folder_name in os.listdir(constants.UDACITY_RAW_DATA_PATH + '/train'):
breed = snake_case(folder_name.split('.')[1])
udacity_breed_paths_train = [(constants.UDACITY_RAW_DATA_PATH + '/train/' + folder_name + '/' + file_name) for file_name in os.listdir(constants.UDACITY_RAW_DATA_PATH + '/train/' + folder_name)]
udacity_breed_paths_test = [(constants.UDACITY_RAW_DATA_PATH + '/test/' + folder_name + '/' + file_name) for file_name in os.listdir(constants.UDACITY_RAW_DATA_PATH + '/test/' + folder_name)]
udacity_breed_paths_valid = [(constants.UDACITY_RAW_DATA_PATH + '/valid/' + folder_name + '/' + file_name) for file_name in os.listdir(constants.UDACITY_RAW_DATA_PATH + '/valid/' + folder_name)]
udacity_breeds_paths[breed] = udacity_breed_paths_train + udacity_breed_paths_test + udacity_breed_paths_valid
for file_name in os.listdir(constants.OXFORD_RAW_DATA_PATH):
breed = snake_case(('_').join(file_name.split('_')[:-1]))
if breed not in oxford_breeds_paths:
oxford_breeds_paths[breed] = []
oxford_breeds_paths[breed] = oxford_breeds_paths[breed] + [constants.OXFORD_RAW_DATA_PATH + '/' + file_name]
for folder_name in os.listdir(constants.STANFORD_RAW_DATA_PATH):
breed = snake_case(('-').join(folder_name.split('-')[1:]))
stanford_breeds_paths[breed] = [(constants.STANFORD_RAW_DATA_PATH + '/' + folder_name + '/' + file_name) for file_name in os.listdir(constants.STANFORD_RAW_DATA_PATH + '/' + folder_name)]
return {
'udacity' : udacity_breeds_paths,
'oxford' : oxford_breeds_paths,
'stanford' :stanford_breeds_paths
}
print('Paths retrieved, processing breeds ...')
df_doggos = | pd.DataFrame() | pandas.DataFrame |
import datetime as dt
import io
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from spaced_repetition.domain.problem import Difficulty, ProblemCreator
from spaced_repetition.domain.problem_log import ProblemLogCreator, Result
from spaced_repetition.domain.tag import TagCreator
from spaced_repetition.presenters.cli_presenter import CliPresenter
# pylint: disable=protected-access, no-self-use
class TestCommonFormatters(unittest.TestCase):
def test_format_difficulty(self):
difficulty = pd.Series({1: Difficulty.MEDIUM})
expected_res = | pd.Series({1: Difficulty.MEDIUM.name}) | pandas.Series |
from src.preprocess.preprocessor import SeqProcessor
from src.config.static_config import StaticConfig
from src.train.bidirectional_lstm_model import Bidirectional_LSTM_Model
from src.train.pretrained_embedding_bidirectional_lstm_model import Bidirectional_LSTM_Model_Pretrained_Embedding
from src.train.attention_lstm_model import Attention_LSTM_Model
from src.train.bidirectional_lstm_model_layers_above import Bidirectional_LSTM_Layers_Model
import pandas as pd
import pickle
import numpy as np
from src.utils.utils import list_files_under_folder, create_folder, is_dir_exist
import sys
from src.train.bidirectional_lstm_model_layers_no_embedding import Bidirectional_LSTM_Model_Layers_No_Embedding
class Predictor(object):
def __init__(self):
self.x_test = None
self.global_config = StaticConfig()
self.preprocessor = None
def load_data(self, test_data_file_path, preprocessor_folder_path):
self.x_test = pd.read_csv(test_data_file_path)
tokenizer = pickle.load(open('{}/{}'.format(preprocessor_folder_path, self.global_config.tokenizer_save_name)
, "rb"))
self.preprocessor = SeqProcessor(tokenizer)
def predict(self, empty_model_object, models_parent_folder_path, prediction_save_path, submission=False
, load_sample_submission_file_path=None, use_attention= False):
print("##################### predict starts ########################")
create_folder(prediction_save_path)
if not submission:
original_labels = self.preprocessor.extract_y(self.x_test)
original_labels.to_csv("{}/{}".format(prediction_save_path, self.global_config.original_label_file_name))
predict = None
for folder_name in self.global_config.model_names:
print("processing ",folder_name)
x_test = self.preprocessor.preprocess_train(self.x_test, submission)
predict_for_model = self.predict_for_model_under_same_folder(
empty_model_object.get_model(folder_name, preprocessor=self.preprocessor),
models_parent_folder_path,
folder_name,
prediction_save_path, x_test[0], is_attention=use_attention)
if predict_for_model is None:
continue
if predict is None:
predict = predict_for_model
else:
predict += predict_for_model
predict = predict/len(self.global_config.labels)
if submission:
sample = pd.read_csv(load_sample_submission_file_path)
sample[self.global_config.labels] = predict
sample.to_csv("{}/{}".format(prediction_save_path, self.global_config.ensembled_submission_file_name), index=False)
else:
predict.to_csv("{}/{}".format(prediction_save_path, self.global_config.ensembled_predict_file_name))
print("##################### predict ends ########################")
def predict_for_model_under_same_folder(self,
empty_model_object,
models_folder,
folder_name,
prediction_save_path, x_test, is_attention=False):
model_path = "{}/{}".format(models_folder, folder_name)
print('predict_for_model_under_same_folder model_path', model_path)
if not is_dir_exist(model_path):
return None
model_names = list_files_under_folder(model_path)
y_test_list = []
model_folder = prediction_save_path + "/" + folder_name
create_folder(model_folder)
for model_name in model_names:
one_model_path = model_path+"/"+model_name
new_model = empty_model_object
new_model.load_weights(one_model_path)
if is_attention:
x_test = np.expand_dims(x_test, axis=-1)
y_test = new_model.predict(x_test, batch_size=self.global_config.batch_size)
save_path_for_one = model_folder +"/"+self.global_config.predict_save_name
| pd.DataFrame(y_test,columns=self.global_config.labels) | pandas.DataFrame |
"""Tools for summarizing experimental results."""
from pathlib import Path
import numpy as np
import pandas as pd
from pandas import DataFrame
from utils.preprocessor import preprocess_datasets
datasets = ['yahoo', 'coat']
metrics = ['mae (w/o at)', 'mae (w/ at)', 'mse (w/o at)', 'mse (w/ at)', 'ndcg (w/o at)', 'ndcg (w/ at)']
model_names = ['uniform', 'uniform-at', 'user', 'user-at', 'item', 'item-at',
'both', 'both-at', 'nb', 'nb-at', 'nb_true', 'nb_true-at']
stats_idx = ['#User', '#Item', '#Rating', 'Sparsity',
'Avg. rating of train', 'Avg. rating of test', 'KL div']
def calc_kl_div(train: np.ndarray, test: np.ndarray) -> float:
"""Estimate KL divergence of rating distributions between training and test sets."""
p = np.unique(train[:, 2], return_counts=True)[1] / \
np.unique(train[:, 2], return_counts=True)[1].sum()
q = np.unique(test[:, 2], return_counts=True)[1] / \
np.unique(test[:, 2], return_counts=True)[1].sum()
return np.round(np.sum(np.where(p != 0, p * np.log(p / q), 0)), 4)
def summarize_data_statistics() -> None:
"""Save dataset statistics with Tex Table Format."""
stat_data_list = []
Path('../paper_results').mkdir(exist_ok=True)
for data in datasets:
train, _, test, num_users, num_items = preprocess_datasets(data=data)
num_data = train.shape[0]
spasity = f'{100 * (num_data / (num_users * num_items)).round(4)}%'
avg_train, avg_test = train[:, 2].mean().round(3), test[:, 2].mean().round(3)
kl = calc_kl_div(train, test)
stat_data = DataFrame(data=[num_users, num_items, num_data, spasity, avg_train, avg_test, kl],
index=stats_idx, columns=[data]).T
stat_data_list.append(stat_data)
pd.concat(stat_data_list).to_csv('../paper_results/data_stat.csv', sep='&')
model_names = ['uniform', 'uniform-at', 'user', 'user-at', 'item', 'item-at',
'both', 'both-at', 'nb', 'nb-at', 'nb_true', 'nb_true-at']
def summarize_experimental_results(data: str) -> None:
"""Summarize results with Tex Table format."""
raw_results_path = Path(f'../logs/{data}')
paper_results_path = Path(f'../paper_results/{data}')
paper_results_path.mkdir(exist_ok=True, parents=True)
results_mse_dict = {}
results_mae_dict = {}
results_ndcg_dict = {}
results_mse_dict_at = {}
results_mae_dict_at = {}
results_ndcg_dict_at = {}
for model_name in model_names:
results_ = pd.read_csv(str(raw_results_path / f'{model_name}/results.csv'), index_col=0)
if '-at' in model_name:
results_mse_dict_at[model_name[:-3]] = results_['MSE']
results_mae_dict_at[model_name[:-3]] = results_['MAE']
results_ndcg_dict_at[model_name[:-3]] = results_['nDCG@3']
else:
results_mse_dict[model_name] = results_['MSE']
results_mae_dict[model_name] = results_['MAE']
results_ndcg_dict[model_name] = results_['nDCG@3']
results_mae = DataFrame(results_mae_dict).describe().round(5).T
results_mse = | DataFrame(results_mse_dict) | pandas.DataFrame |
import uuid
import pandas as pd
import numpy as np
class agentsCreator:
def __init__(self):
self.excelBuildingInformation = "BuildingInSimulationAndStatsApril12.xlsx"
self.buildingsDf = | pd.read_excel(self.excelBuildingInformation) | pandas.read_excel |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__contact__ = "gambrosio[at]uma.es"
__copyright__ = "Copyright 2021, <NAME>"
__date__ = "2021/07/27"
__license__ = "MIT"
import sys
import datetime as dt
import sqlite3
import os
import cv2
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet import image
from mxnet import context
import gluoncv as gcv
from gluoncv import model_zoo, data, utils
# import fiftyone as fo
# from matplotlib import pyplot as plt
# import helpers
import robotathome as rh
# import fire
class RobotAtHome():
"""RobotAtHome class with methods for Robot@Home dataset v2.x.y
The RobotAtHome class encapsulates methods to access the RobotAtHome
database. <https://doi.org/10.5281/zenodo.4530453>
Attributes:
rh_path (str, optional):
root path for robotathome database, usually rh.db
wspc_path (str, optional):
workspace path where temporary files are stored
db_filename (str, optional):
default database name
rgbd_path (str, optional):
path that completes rh_path, where rgbd files are stored
scene_path (str, optional):
path that coompletes rh_path, where scene files are stored
"""
def __init__(self,
rh_path='.',
wspc_path='.',
db_filename='rh.db',
rgbd_path='files/rgbd',
scene_path='files/scene'):
""" RobotAtHome constructor method """
self.__rh_path = rh_path
self.__wspc_path = wspc_path
self.__db_filename = db_filename
self.__rgbd_path = rgbd_path
self.__scene_path = scene_path
self.__con = None
self.__rgbd_views = []
# Initialization functions
self.__open_dataset()
self.__create_temp_views()
def __del__(self):
""" Robot@Home destructor method"""
def __open_dataset(self):
"""
This function makes the connection with the database and calls the
initialization functions, e.g. create temporal views
"""
db_full_path = os.path.join(self.__rh_path, self.__db_filename)
rh.logger.debug("db_full_path: {}", db_full_path)
try:
self.__con = sqlite3.connect(db_full_path)
rh.logger.info("Connection is established: {}", self.__db_filename)
except NameError:
rh.logger.error("Error while trying to open database: {}", NameError)
def __close_dataset(self):
"""
This function closes the connection with the database
"""
self.__con.close()
rh.logger.info("The connection with the database has been successfully closed")
def __create_temp_views(self):
"""
This function creates temporary views to work on the class environment
"""
sql_str = '''
begin transaction;
drop view if exists rh_temp_lblrgbd;
create temp view rh_temp_lblrgbd as
select
rh_lblrgbd.id,
rh_lblrgbd.home_session_id as hs_id,
rh_home_sessions.name as hs_name,
rh_lblrgbd.home_subsession_id as hss_id,
rh_lblrgbd.home_id as h_id,
rh_homes.name as h_name,
rh_lblrgbd.room_id as r_id,
rh_rooms.name as r_name,
rh_lblrgbd.sensor_id as s_id,
rh_sensors.name as s_name,
rh_lblrgbd.time_stamp as t,
rh_lblrgbd.sensor_pose_x as s_px,
rh_lblrgbd.sensor_pose_y as s_py,
rh_lblrgbd.sensor_pose_z as s_pz,
rh_lblrgbd.sensor_pose_yaw as s_pya,
rh_lblrgbd.sensor_pose_pitch as s_ppi,
rh_lblrgbd.sensor_pose_roll as s_pro,
rh2_old2new_rgbd_files.new_file_1 as f1,
rh2_old2new_rgbd_files.new_file_2 as f2,
rh2_old2new_rgbd_files.new_file_3 as f3,
rh2_old2new_rgbd_files.new_path as pth
from rh_lblrgbd
inner join rh_home_sessions on home_session_id = rh_home_sessions.id
inner join rh_homes on rh_lblrgbd.home_id = rh_homes.id
inner join rh_rooms on rh_lblrgbd.room_id = rh_rooms.id
inner join rh_sensors on rh_lblrgbd.sensor_id = rh_sensors.id
inner join rh2_old2new_rgbd_files on rh2_old2new_rgbd_files.id = rh_lblrgbd.id;
commit;
'''
# Get a cursor to execute SQLite statements
cur = self.__con.cursor()
cur.executescript(sql_str)
self.__rgbd_views.append("rh_temp_lblrgbd")
rh.logger.trace("The view rh_temp_lblrgbd has been created")
def get_con(self):
"""
This function returns the sql connection variable
"""
return self.__con
def select_column(self, column_name, table_name):
'''
Returns a dataframe with grouped column values
(without repetition)
'''
# Get a cursor to execute SQLite statements
cur = self.__con.cursor()
# Build the query
# sql_str = ("select " + column_name + " from " + table_name + " group by " + column_name + ";")
# rows = cur.execute(sql_str)
# rh.logger.debug(rows)
# for row in rows:
# print(row)
# rh.logger.debug(rows2list(rows))
sql_str = (f"select {column_name} from {table_name} group by {column_name};")
df_rows = pd.read_sql_query(sql_str, self.__con)
return df_rows
def __get_temp_sql_object_names(self):
''' Return a list with temporary/internal created views'''
return self.select_column('tbl_name', 'sqlite_temp_master')
def get_rh2_rgbd_views(self):
"""
Return a list with RGB-D views
"""
# There is only one view for now
return self.__rgbd_views
def query(self, sql, df=True):
"""Execute a sqlquery over robotathome database
Parameters
----------
sql: can be a string with a sql query or a file name that contains the
sql query
df: boolean indicating if result is returned as a DataFrame (True) or
as a sqlite row list (False)
Returns
-------
ans: a DataFrame or a sqlite row list
"""
if os.path.isfile(sql):
script = open(sql, 'r')
query = script.read()
else:
query = sql
if df:
ans = pd.read_sql_query(query, self.__con)
else:
cur = self.__con.cursor()
cur.executescript(query)
ans = cur.fetchall()
if os.path.isfile(sql):
script.close()
return ans
def get_home_session_names(self):
"""
Return a list with home session names
"""
return self.select_column('name', 'rh_home_sessions')
def get_home_names(self):
"""
Return a list with home names '''
"""
return self.select_column('name', 'rh_homes')
def get_room_names(self):
"""
Return a list with room names
"""
return self.select_column('name', 'rh_rooms')
def get_room_type_names(self):
"""
Return a list with room type names
"""
return self.select_column('name', 'rh_room_types')
def get_sensor_names(self):
"""
Return a list with sensor names
"""
return self.select_column('name', 'rh_sensors')
def get_sensor_type_names(self):
"""
Return a list with sensor type names
"""
return self.select_column('name', 'rh_sensor_types')
def get_object_type_names(self):
"""
Return a list with room type names
"""
return self.select_column('name', 'rh_object_types')
def get_locators(self):
""" Return a dataframe table with main indexes values (id and name),
i.e., home_session, home, room and home_subsession
"""
sql_str = """
select
home_session_id, rh_home_sessions.name as home_session_name,
rh_raw.home_id, rh_homes.name as home_name,
rh_raw.room_id, rh_rooms.name as room_name,
rh_raw.home_subsession_id
from rh_raw
inner join rh_home_sessions on home_session_id = rh_home_sessions.id
inner join rh_homes on rh_raw.home_id = rh_homes.id
inner join rh_rooms on rh_raw.room_id = rh_rooms.id
group by
home_session_id,
rh_raw.home_id,
rh_raw.room_id,
rh_raw.home_subsession_id
order by
rh_raw.home_session_id
"""
df_rows = pd.read_sql_query(sql_str, self.__con)
return df_rows
def get_sensor_observation_files(self,
source='lblrgbd',
home_session_name='alma-s1',
home_subsession=0,
room_name='alma_masterroom1',
sensor_name='RGBD_1'
):
"""
This functions queries the database to extract sensor observation
files filtered by home_session_name, home_subsession, room_name, and
sensor_name.
"""
# Get a cursor to execute SQLite statements
cur = self.__con.cursor()
switcher = {
# rh_temp_lblrgbd created in _create_temp_views at the begining
"lblrgbd": "rh_temp_lblrgbd",
}
sensor_observation_table = switcher.get(source, lambda: "Invalid argument")
# Build the query
# sql_str = (
# '''
# select id, t, pth, f1, f2, f3
# from
# ''' +
# sensor_observation_table +
# '''
# where
# hs_name = ? and
# hss_id = ? and
# r_name = ? and
# s_name = ?
# order by t
# '''
# )
#
# parms = (home_session_name,
# home_subsession,
# room_name,
# sensor_name)
# cur.execute(sql_str, parms)
# cur.execute(sql_str)
# rows = cur.fetchall()
sql_str = (
f'''
select id, t, pth, f1, f2, f3
from {sensor_observation_table}
where
hs_name = '{home_session_name}' and
hss_id = {home_subsession} and
r_name = '{room_name}' and
s_name = '{sensor_name}'
order by t
'''
)
rh.logger.debug(sql_str)
df_rows = pd.read_sql_query(sql_str, self.__con)
return df_rows
def get_video_from_rgbd(self,
source='lblrgbd',
home_session_name='alma-s1',
home_subsession=0,
room_name='alma_masterroom1',
sensor_name='RGBD_1',
video_file_name=None
):
"""
This functions ...
"""
rows = self.get_sensor_observation_files(source,
home_session_name,
home_subsession,
room_name,
sensor_name)
# Computing frames per second
num_of_frames = len(rows)
seconds = (rows.iloc[-1]['t'] - rows.iloc[0]['t']) / 10**7
frames_per_second = num_of_frames / seconds
rh.logger.debug("frames per second: {:.2f}", frames_per_second)
# Get frame size
image_path = rows.iloc[0]['pth']
file_name = rows.iloc[0]['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
img = cv2.imread(image_path_file_name, cv2.IMREAD_COLOR)
img_h, img_w, _ = img.shape
# Opening video file
if video_file_name is None:
video_file_name = ''.join(
[
home_session_name,
'_', str(home_subsession),
'_', room_name,
'_', sensor_name,
dt.datetime.now().strftime("_%Y%m%d%H%M%S"),
'.avi'
]
)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path_file_name = os.path.abspath(os.path.join(self.__wspc_path,
video_file_name)
)
out = cv2.VideoWriter(video_path_file_name,
fourcc,
frames_per_second,
(img_w, img_h))
for _, row in rows.iterrows():
image_path = row['pth']
file_name = row['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
img = cv2.imread(image_path_file_name, cv2.IMREAD_COLOR)
if rh.is_being_logged():
cv2.imshow('Debug mode (press q to exit)', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.write(img)
out.release()
if rh.is_being_logged():
cv2.destroyAllWindows()
return video_file_name
def get_composed_video_from_lblrgbd(self,
home_session_name='alma-s1',
home_subsession=0,
room_name='alma_masterroom1',
video_file_name=None
):
''' Docstring '''
rows_rgbd_1 = self.get_sensor_observation_files('lblrgbd',
home_session_name,
home_subsession,
room_name,
'RGBD_1')
rows_rgbd_2 = self.get_sensor_observation_files('lblrgbd',
home_session_name,
home_subsession,
room_name,
'RGBD_2')
rows_rgbd_3 = self.get_sensor_observation_files('lblrgbd',
home_session_name,
home_subsession,
room_name,
'RGBD_3')
rows_rgbd_4 = self.get_sensor_observation_files('lblrgbd',
home_session_name,
home_subsession,
room_name,
'RGBD_4')
# Computing frames per second
num_of_frames = len(rows_rgbd_1)
seconds = (rows_rgbd_1.iloc[-1]['t'] - rows_rgbd_1.iloc[0]['t']) / 10**7
frames_per_second = num_of_frames / seconds
rh.logger.debug("frames per second: {:.2f}", frames_per_second)
# Get frame size
image_path = rows_rgbd_1.iloc[0]['pth']
file_name = rows_rgbd_1.iloc[0]['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
img = cv2.imread(image_path_file_name, cv2.IMREAD_COLOR)
img_h, img_w, _ = img.shape
# Opening video file
if video_file_name is None:
video_file_name = ''.join(
[
home_session_name,
'_', str(home_subsession),
'_', room_name,
'_RGBD_3412',
dt.datetime.now().strftime("_%Y%m%d%H%M%S"),
'.avi'
]
)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path_file_name = os.path.abspath(os.path.join(self.__wspc_path,
video_file_name))
out = cv2.VideoWriter(video_path_file_name,
fourcc,
frames_per_second,
(4 * img_w, img_h))
for i in range(len(rows_rgbd_1)):
image_rgbd_1_path = rows_rgbd_1.iloc[i, 2]
file_rgbd_1_name = rows_rgbd_1.iloc[i, 4]
image_rgbd_1_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_rgbd_1_path,
file_rgbd_1_name)
image_rgbd_2_path = rows_rgbd_2.iloc[i, 2]
file_rgbd_2_name = rows_rgbd_2.iloc[i, 4]
image_rgbd_2_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_rgbd_2_path,
file_rgbd_2_name)
image_rgbd_3_path = rows_rgbd_3.iloc[i, 2]
file_rgbd_3_name = rows_rgbd_3.iloc[i, 4]
image_rgbd_3_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_rgbd_3_path,
file_rgbd_3_name)
image_rgbd_4_path = rows_rgbd_4.iloc[i, 2]
file_rgbd_4_name = rows_rgbd_4.iloc[i, 4]
image_rgbd_4_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_rgbd_4_path,
file_rgbd_4_name)
img_rgbd_1 = cv2.imread(image_rgbd_1_path_file_name, cv2.IMREAD_COLOR)
img_rgbd_2 = cv2.imread(image_rgbd_2_path_file_name, cv2.IMREAD_COLOR)
img_rgbd_3 = cv2.imread(image_rgbd_3_path_file_name, cv2.IMREAD_COLOR)
img_rgbd_4 = cv2.imread(image_rgbd_4_path_file_name, cv2.IMREAD_COLOR)
img = cv2.hconcat([img_rgbd_3, img_rgbd_4, img_rgbd_1, img_rgbd_2])
if rh.is_being_logged():
cv2.imshow('Debug mode (press q to exit)', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.write(img)
out.release()
if rh.is_being_logged():
cv2.destroyAllWindows()
return video_file_name
def get_labels_from_lblrgbd(self, so_id):
"""
This function return labels rows for the observations referenced by
sensor_observation_id
SQL query
select * from rh_lblrgbd_labels
where sensor_observation_id = so_id
Parameters
----------
so_id : int
The primary key value to identify a row in the table
rh_lbl_rgbd_labels.
Returns
-------
A dataframe with the query result. An empty dataframe is returned when
no rows are available, i.e., when the sensor observation does not
belong to rh_lblrgbd (labelled rgbd)
"""
# Get a cursor to execute SQLite statements
cur = self.__con.cursor()
# # Build the query
# sql_str = (
# '''
# select * from rh_lblrgbd_labels
# where sensor_observation_id = ?
# '''
# )
# parms = (so_id,)
# cur.execute(sql_str, parms)
# rows = cur.fetchall()
sql_str = (
'''
select * from rh_lblrgbd_labels
where sensor_observation_id = {}
'''.format(so_id)
)
df_rows = pd.read_sql_query(sql_str, self.__con)
# print(df.shape)
# rows = df.to_records()
# for row in rows:
# print(row)
return df_rows
def __get_mask(self, label_path_file_name):
mask = []
with open(label_path_file_name, "r") as file_handler:
line = file_handler.readline()
while line:
words = line.strip().split()
if words[0][0] != '#':
num_of_labels = int(words[0])
break
line = file_handler.readline()
for i in range(num_of_labels):
line = file_handler.readline()
words = line.strip().split()
num_of_rows = 0
line = file_handler.readline()
while line:
num_of_rows += 1
words = line.strip().split()
mask.append(list(map(int, words)))
line = file_handler.readline()
rh.logger.debug("mask height: {}", len(mask))
rh.logger.debug("mask width : {}", len(mask[0]))
mask = np.array(mask)
mask = np.rot90(mask)
return mask
def get_mask_from_lblrgbd(self, so_id):
"""
This function
Parameters
----------
Returns
-------
"""
# Get a cursor to execute SQLite statements
# cur = self.__con.cursor()
# sql_str = (
# '''
# select pth, f3
# from rh_temp_lblrgbd
# where id = {}
# '''.format(so_id)
# )
sql_str = (
f'''
select pth, f3
from rh_temp_lblrgbd
where id = {so_id}
'''
)
df_rows = pd.read_sql_query(sql_str, self.__con)
rh.logger.debug("df_rows.shape: {}", df_rows.shape)
# print(df_rows)
# print(df_rows.loc[0,"pth"])
# print(df_rows.loc[0,"f3"])
label_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
df_rows.loc[0, "pth"],
df_rows.loc[0, "f3"])
rh.logger.debug("label_path_file_name: {}", label_path_file_name)
mask = self.__get_mask(label_path_file_name)
return mask
def get_label_mask(self, mask, labels):
"""
Returns a binary 2D array (pixels being 1s and 0s)
"""
masks = []
for label in labels:
arr = mask & (2**(label))
np.clip(arr, 0, 1, out=arr)
arr = np.uint8(arr[:,2:-2])
masks.append(arr)
return masks
def get_rgb_image_from_lblrgbd(self, so_id):
"""
This function
Parameters
----------
Returns
-------
A BGR cv2 image
"""
sql_str = f"""
select pth, f2
from rh_temp_lblrgbd
where id = {so_id}
"""
df_rows = pd.read_sql_query(sql_str, self.__con)
# rh.logger.debug("df_rows.shape: {}", df_rows.shape)
rgb_image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
df_rows.loc[0, "pth"],
df_rows.loc[0, "f2"])
# rh.logger.debug("rgb_image_path_file_name: {}",
# rgb_image_path_file_name)
bgr_img = cv2.imread(rgb_image_path_file_name, cv2.IMREAD_COLOR)
return bgr_img
def get_depth_image_from_lblrgbd(self, so_id):
"""
This function
Parameters
----------
Returns
-------
A gray levels cv2 image
"""
sql_str = f"""
select pth, f1
from rh_temp_lblrgbd
where id = {so_id}
"""
df_rows = pd.read_sql_query(sql_str, self.__con)
rh.logger.debug("df_rows.shape: {}", df_rows.shape)
rgb_image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
df_rows.loc[0, "pth"],
df_rows.loc[0, "f1"])
rh.logger.debug("rgb_image_path_file_name: {}",
rgb_image_path_file_name)
img = cv2.imread(rgb_image_path_file_name, cv2.IMREAD_COLOR)
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def lblrgbd_plot_labels(self, so_id):
img = self.get_rgb_image_from_lblrgbd(so_id)
labels = self.get_labels_from_lblrgbd(so_id)
rh.logger.debug("labels: {}", labels)
mask = self.get_mask_from_lblrgbd(so_id)
label_mask = self.get_label_mask(mask, labels['local_id'])
alpha = 0.7
out_img, colors = rh.overlay_mask(cv2.cvtColor(img, cv2.COLOR_BGR2RGB),
label_mask,
alpha)
rh.plot_mask(out_img, labels['name'], colors)
"""
MXNet + GluoncV
"""
def lblrgbd_rgb_image_object_detection(self, so_id,
model='yolo3_darknet53_coco'):
bgr_img = self.get_rgb_image_from_lblrgbd(so_id)
chw_img, class_names, nn_out = rh.object_detection_with_gluoncv(
cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB),
model
)
return bgr_img, chw_img, class_names, nn_out
def lblrgbd_object_detection(self,
source='lblrgbd',
home_session_name='alma-s1',
home_subsession=0,
room_name='alma_masterroom1',
sensor_name='RGBD_1',
video_file_name=None,
model='yolo3_darknet53_coco',
gpu=False
):
"""
This functions ...
"""
rows = self.get_sensor_observation_files(source,
home_session_name,
home_subsession,
room_name,
sensor_name)
# Computing frames per second
num_of_frames = len(rows)
seconds = (rows.iloc[-1]['t'] - rows.iloc[0]['t']) / 10**7
frames_per_second = num_of_frames / seconds
rh.logger.debug("frames per second: {:.2f}", frames_per_second)
# Get frame size
img = self.get_rgb_image_from_lblrgbd(rows.iloc[0]['id'])
img_h, img_w, _ = img.shape
# Opening video file
if video_file_name is None:
video_file_name = ''.join(
[
home_session_name,
'_', str(home_subsession),
'_', room_name,
'_', sensor_name,
'_by_', model,
dt.datetime.now().strftime("_%Y%m%d%H%M%S"),
'.avi'
]
)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path_file_name = os.path.abspath(os.path.join(self.__wspc_path,
video_file_name)
)
rh.rename_if_exist(video_path_file_name)
out = cv2.VideoWriter(video_path_file_name,
fourcc,
frames_per_second,
(img_w, img_h))
##############################################
# NN
##############################################
yolo_models = rh.get_yolo_models()
rcnn_models = rh.get_rcnn_models()
if model not in yolo_models + rcnn_models:
raise Exception(f"Sorry, the model '{model}' is not allowed")
# Set context to cpu or gpu
ctx_ = mx.context.gpu() if gpu else mx.context.cpu()
# Load Pretrained Model from the CV model zoo
net = gcv.model_zoo.get_model(model,
pretrained=True,
ctx=ctx_)
class_names_ = net.classes
nn_out_list = []
i = 0
for _, row in rows.iterrows():
img = self.get_rgb_image_from_lblrgbd(row['id'])
i += 1
if rh.is_being_logged('INFO'):
sys.stdout.write("\rProcessing frame %i of %i" % (i, len(rows)))
sys.stdout.flush()
short_edge_size = min(img.shape[0:2])
if model in yolo_models:
trnf_img, _ = gcv.data.transforms.presets.yolo.transform_test(mx.nd.array(img),
short=short_edge_size)
if model in rcnn_models:
trnf_img, _ = gcv.data.transforms.presets.rcnn.transform_test(mx.nd.array(img),
short=short_edge_size)
class_ids, scores, bounding_boxs = net(trnf_img)
df_nn_out = rh.nn_out2df(class_ids, scores, bounding_boxs)
nn_out_list.append(df_nn_out)
utils.viz.cv_plot_bbox(img,
bounding_boxs[0],
scores[0],
class_ids[0],
class_names=class_names_,
thresh=0.2,
linewidth=1)
if rh.is_being_logged():
cv2.imshow('Debug mode (press q to exit)', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.write(img)
out.release()
if rh.is_being_logged():
cv2.destroyAllWindows()
df_nn_out = pd.DataFrame(nn_out_list, columns=['class_ids',
'scores',
'bounding_boxs'])
return df_nn_out, video_file_name
def process_with_yolo(self,
source='lblrgbd',
home_session_name='alma-s1',
home_subsession=0,
room_name='alma_masterroom1',
sensor_name='RGBD_1',
video_file_name=None,
gpu=False
):
"""
This functions ...
"""
rh.logger.warning("This function is DEPRECATED, use lblrgbd_object_detection()")
rh.logger.warning("with model='yolo3_darknet53_coco' instead ")
rows = self.get_sensor_observation_files(source,
home_session_name,
home_subsession,
room_name,
sensor_name)
# Computing frames per second
num_of_frames = len(rows)
seconds = (rows.iloc[-1]['t'] - rows.iloc[0]['t']) / 10**7
frames_per_second = num_of_frames / seconds
rh.logger.debug("frames per second: {:.2f}", frames_per_second)
# Get frame size
image_path = rows.iloc[0]['pth']
file_name = rows.iloc[0]['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
img = cv2.imread(image_path_file_name, cv2.IMREAD_COLOR)
img_h, img_w, _ = img.shape
# Opening video file
if video_file_name is None:
video_file_name = ''.join(
[
home_session_name,
'_', str(home_subsession),
'_', room_name,
'_', sensor_name,
'_by_YOLO',
dt.datetime.now().strftime("_%Y%m%d%H%M%S"),
'.avi'
]
)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path_file_name = os.path.abspath(os.path.join(self.__wspc_path,
video_file_name)
)
rh.rename_if_exist(video_path_file_name)
out = cv2.VideoWriter(video_path_file_name,
fourcc,
frames_per_second,
(img_w, img_h))
##############################################
# NN
##############################################
# get NN model
ctx_ = context.gpu() if gpu else context.cpu()
net = model_zoo.get_model('yolo3_darknet53_coco',
pretrained=True,
ctx=ctx_)
nn_out = []
i = 0
for _, row in rows.iterrows():
image_path = row['pth']
file_name = row['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
i += 1
if rh.is_being_logged('INFO'):
sys.stdout.write("\rProcessing frame %i of %i" % (i, len(rows)))
sys.stdout.flush()
# core
try:
img = image.imread(image_path_file_name)
except:
print('%s is not a valid raster image' % image_path_file_name)
# long_edge_size = img.shape[0]
short_edge_size = img.shape[1]
x, img = data.transforms.presets.yolo.load_test(image_path_file_name,
short=short_edge_size)
# rh.logger.debug('Shape of pre-processed image: {}', x.shape)
class_ids, scores, bounding_boxs = net(x)
# to DataFrame
df_class_ids = pd.DataFrame(class_ids.asnumpy()[0].tolist(),
columns=['class_ids'])
df_scores = pd.DataFrame(scores.asnumpy()[0].tolist(),
columns=['scores'])
df_bounding_boxs = pd.DataFrame(
bounding_boxs.asnumpy()[0].tolist(),
columns=['xmin', 'ymin', 'xmax', 'ymax'])
nn_out.append([df_class_ids,
df_scores,
df_bounding_boxs])
utils.viz.cv_plot_bbox(img,
bounding_boxs[0],
scores[0],
class_ids[0],
class_names=net.classes,
thresh=0.2,
linewidth=1)
if rh.is_being_logged():
cv2.imshow('Debug mode (press q to exit)', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.write(img)
out.release()
if rh.is_being_logged():
cv2.destroyAllWindows()
df_nn_out = pd.DataFrame(nn_out, columns=['class_ids',
'scores',
'bounding_boxs'])
return df_nn_out, video_file_name
def process_with_rcnn(self,
source='lblrgbd',
home_session_name='alma-s1',
home_subsession=0,
room_name='alma_masterroom1',
sensor_name='RGBD_1',
video_file_name=None,
gpu=False
):
"""
This functions ...
"""
rh.logger.warning("This function is DEPRECATED, use lblrgbd_object_detection()")
rh.logger.warning("with model='faster_rcnn_resnet50_v1b_coco' instead ")
rows = self.get_sensor_observation_files(source,
home_session_name,
home_subsession,
room_name,
sensor_name)
# Computing frames per second
num_of_frames = len(rows)
seconds = (rows.iloc[-1]['t'] - rows.iloc[0]['t']) / 10**7
frames_per_second = num_of_frames / seconds
rh.logger.debug("frames per second: {:.2f}", frames_per_second)
# Get frame size
image_path = rows.iloc[0]['pth']
file_name = rows.iloc[0]['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
img = cv2.imread(image_path_file_name, cv2.IMREAD_COLOR)
img_h, img_w, _ = img.shape
# Opening video file
if video_file_name is None:
video_file_name = ''.join(
[
home_session_name,
'_', str(home_subsession),
'_', room_name,
'_', sensor_name,
'_by_RCNN',
dt.datetime.now().strftime("_%Y%m%d%H%M%S"),
'.avi'
]
)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path_file_name = os.path.abspath(os.path.join(self.__wspc_path,
video_file_name)
)
out = cv2.VideoWriter(video_path_file_name,
fourcc,
frames_per_second,
(img_w, img_h))
##############################################
# NN
##############################################
# get NN model
# import mxnet as mx
# ctx = mx.gpu() if gpu else mx.cpu() # Set context
# ctx = context.cpu()
# ctx = context.cpu_pinned()
# ctx = context.gpu(dev_id)
ctx_ = context.gpu() if gpu else context.cpu()
net = model_zoo.get_model('faster_rcnn_resnet50_v1b_coco',
pretrained=True,
ctx=ctx_)
nn_out = []
i = 0
for _, row in rows.iterrows():
image_path = row['pth']
file_name = row['f2']
image_path_file_name = os.path.join(self.__rh_path,
self.__rgbd_path,
image_path,
file_name)
i += 1
sys.stdout.write("\rProcessing frame %i of %i" % (i, len(rows)))
sys.stdout.flush()
# core
try:
img = image.imread(image_path_file_name)
except:
print('%s is not a valid raster image' % image_path_file_name)
# long_edge_size = img.shape[0]
short_edge_size = img.shape[1]
x, img = data.transforms.presets.rcnn.load_test(image_path_file_name,
short=short_edge_size)
# rh.logger.debug('Shape of pre-processed image: {}', x.shape)
class_ids, scores, bounding_boxs = net(x)
# to DataFrame
df_class_ids = pd.DataFrame(class_ids.asnumpy()[0].tolist(),
columns=['class_ids'])
df_scores = pd.DataFrame(scores.asnumpy()[0].tolist(),
columns=['scores'])
df_bounding_boxs = pd.DataFrame(
bounding_boxs.asnumpy()[0].tolist(),
columns=['xmin', 'ymin', 'xmax', 'ymax'])
nn_out.append([df_class_ids,
df_scores,
df_bounding_boxs])
utils.viz.cv_plot_bbox(img,
bounding_boxs[0],
scores[0],
class_ids[0],
class_names=net.classes,
thresh=0.2,
linewidth=1)
if rh.is_being_logged():
cv2.imshow('Debug mode (press q to exit)', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.write(img)
out.release()
if rh.is_being_logged():
cv2.destroyAllWindows()
df_nn_out = pd.DataFrame(nn_out, columns=['class_ids',
'scores',
'bounding_boxs'])
return df_nn_out, video_file_name
"""
Fiftyone
"""
"""
Lab
"""
def create_table_linking_observations_and_lblrgbd(self):
"""
This function
Parameters
----------
Returns
-------
"""
# Get a cursor to execute SQLite statements
# cur = self.__con.cursor()
sql_str_lblrgbd = (
f'''
select id, sensor_id
from rh_lblrgbd
order by id
limit 860
'''
)
sql_str_observations = (
f'''
select id, sensor_id
from rh_observations
order by id
-- limit 20
'''
)
df_rows_lblrgbd = | pd.read_sql_query(sql_str_lblrgbd, self.__con) | pandas.read_sql_query |
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import unittest
import pytest
import numpy as np
import pandas as pd
from plaidcloud.utilities import frame_manager
from plaidcloud.utilities.frame_manager import coalesce
__author__ = "<NAME>"
__copyright__ = "© Copyright 2009-2014, Tartan Solutions, Inc"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
nan = np.nan
# Test to see that 2 data frames are equal
# http://stackoverflow.com/questions/14224172/equality-in-pandas-dataframes-column-order-matters
def assertFrameEqual(df1, df2, **kwargs):
""" Assert that two dataframes are equal, ignoring ordering of columns
Args:
df1 (`pandas.DataFrame`): The DataFrame to compare against `df2`
df2 (`pandas.DataFrame`): The DataFrame to compare against `df1`
**kwargs (dict): A dict to pass to `pandas.util.testing.assert_frame_equal`
"""
from pandas.util.testing import assert_frame_equal
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
class TestFrameManager(unittest.TestCase):
"""These tests validate the data model methods"""
def setUp(self):
"Constructs a test environment if necessary"
self.df = frame_manager.pd.DataFrame([('Andrew', 31, 500), ('Optimus', 30, 1000), ('Iron Man', 51, 1250), ('Batman', 75, 50), ('Andrew', 31, 2500)], columns=['Name', 'Age', 'Points'])
# duplicate
self.df2 = frame_manager.pd.DataFrame([('Andrew', 31, 500), ('Optimus', 30, 1000), ('Iron Man', 51, 1250), ('Batman', 75, 50), ('Andrew', 31, 2500)], columns=['Name', 'Age', 'Points'])
self.df9 = frame_manager.pd.DataFrame([('Andrew', 31, 5), ('Optimus', 30, 10), ('Iron Man', 51, 12), ('Batman', 75, 11)], columns=['Name', 'age', 'Level'])
# Deadpool is villain aged 23... not listed
self.df3 = frame_manager.pd.DataFrame([(30, 'Autobot'), (51, 'Superhero'), (75, 'Superhero'), (23, 'Villain')], columns=['Age', 'Title'])
self.df_blank = frame_manager.pd.DataFrame()
self.df_mon_val = frame_manager.pd.DataFrame([('Jan', 5), ('Feb', 10), ('Mar', 15), ('Jan', 20), ('Feb', 25), ('Mar', 30)], columns = ['mon', 'val'])
self.df6 = frame_manager.pd.DataFrame([(30, 'Autobot', 2354, 0), (30, 'Decepticon', 18, 0), (51, 'Superhero', 234, 0), (75, 'Superhero', 897, 0), (23, 'Villain', 46546, 0)], columns=['Age', 'Title', 'DropMe', 'Points'])
# def test_get_frame_model_path(self):
# pass
# def test_get_frame_zone_path(self):
# pass
# def test_load_frame(self):
# pass
# def test_load_frame_meta(self):
# pass
# def test_clear_frame(self):
# pass
# def test_clear_zone_frame(self):
# pass
# def test_load_zone_frame(self):
# pass
# def test_load_zone_frame_meta(self):
# pass
# def test_save_frame(self):
# pass
# def test_get_tmp_frame_path(self):
# pass
# def test_compress_frame(self):
# pass
# def test_uncompress_frame(self):
# pass
# def test_append_frame(self):
# #x = frame_manager.append_frame(
# pass
def test_describe(self):
"""Tests to verify descriptive statistics about data frame"""
x = frame_manager.describe(self.df)
self.assertEqual(x['Age']['max'], max(self.df['Age']))
self.assertEqual(x['Points']['min'], min(self.df['Points']))
self.assertEqual(x['Age']['mean'], np.mean(self.df['Age']))
self.assertEqual(x['Points']['mean'], np.mean(self.df['Points']))
def test_count_unique(self):
"""Tests to verify count of distinct records in data frame"""
x = frame_manager.count_unique('Name', 'Points', self.df)
y = self.df.groupby('Name').count()['Age']['Andrew']
z = self.df.groupby('Name').count()['Age']['Iron Man']
self.assertEqual(x['Andrew'], y)
self.assertEqual(x['Iron Man'], z)
def test_sum(self):
"""Tests to verify sum of records in data frame"""
x = frame_manager.sum('Name', self.df)
y = self.df.groupby('Name').sum()
self.assertEqual(x['Points']['Andrew'], y['Points']['Andrew'])
self.assertEqual(x['Age']['Batman'], y['Age']['Batman'])
def test_std(self):
"""Tests to verify standard deviation of records in data frame"""
x = frame_manager.std('mon', self.df_mon_val)
y = self.df_mon_val.groupby('mon').std()
assertFrameEqual(x, y)
def test_mean(self):
"""Tests to verify mean of records in data frame"""
x = frame_manager.mean('Name', self.df)
y = self.df.groupby(['Name']).mean()
self.assertEqual(x['Points'][1], y['Points'][1])
def test_count(self):
"""Tests to verify count of records in data frame"""
x = frame_manager.count('Name', self.df)
y = self.df.groupby('Name').count()
self.assertEqual(x['Points'][1], y['Points'][1])
def test_inner_join(self):
"""Tests to verify inner join capability"""
x = frame_manager.inner_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'inner', ['Age'])
assertFrameEqual(x, y)
def test_outer_join(self):
"""Tests to verify outer join capability"""
x = frame_manager.outer_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'outer', ['Age'])
assertFrameEqual(x, y)
def test_left_join(self):
"""Tests to verify left join capability"""
x = frame_manager.left_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'left', ['Age'])
assertFrameEqual(x, y)
def test_right_join(self):
"""Tests to verify right join capability"""
x = frame_manager.right_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'right', ['Age'])
assertFrameEqual(x, y)
# def test_memoize(self):
# pass
# def test_geo_distance(self):
# pass
# def test_geo_location(self):
# pass
# def test_trailing_negative(self):
# pass
def test_now(self):
"""Tests to verify current time"""
x = frame_manager.now()
y = frame_manager.utc.timestamp()
self.assertEqual(x, y)
# def test_concat(self):
# df2 = self.df
# x = frame_manager.concat([self.df, df2], [self.df])
# print x
# def test_covariance(self):
# pass
# def test_correlation(self):
# pass
# def test_apply_agg(self):
# pass
# def test_distinct(self):
# pass
# def test_find_duplicates(self):
# pass
# def test_sort(self):
# pass
# def test_replace_column(self):
# pass
def test_replace(self):
"""Tests to verify replacement using dictionary key/value combinations"""
replace_dict = {'Optimus': 'Optimus Prime', 50: 5000}
x = frame_manager.replace(self.df, replace_dict)
y = self.df.replace(replace_dict)
assertFrameEqual(x, y)
# def test_reindex(self):
# pass
def test_rename_columns(self):
"""Tests to verify renamed columns using dictionary key/value combinations"""
rename_dict = {'Name': 'Title', 'Points': 'Salary'}
x = frame_manager.rename_columns(self.df, rename_dict)
y = self.df.rename(columns=rename_dict)
assertFrameEqual(x, y)
# def test_column_info(self):
# pass
@pytest.mark.skip('Dtypes seem to be wrong, should be passing sql types?')
def test_set_column_types(self):
"""Tests to verify data type conversion for columns"""
type_dict = {'Name': 's32', 'Points': 'float16', 'Age': 'int8'}
self.assertNotEqual('int8', self.df['Age'].dtypes)
self.assertNotEqual('float16', self.df['Points'].dtypes)
x = frame_manager.set_column_types(self.df, type_dict)
self.assertEqual('float32', x['Points'].dtypes)
self.assertEqual('int64', x['Age'].dtypes)
self.assertEqual('object', x['Name'].dtypes)
def test_drop_column(self):
"""Tests to verify columns dropped appropriately"""
x = frame_manager.drop_column(self.df, ['Age'])
y = self.df2
del y['Age']
assertFrameEqual(x, y)
def test_has_data(self):
"""Tests to verify a data frame does/doesn't have data"""
x = frame_manager.has_data(self.df_blank)
y = frame_manager.has_data(self.df)
self.assertFalse(x)
self.assertTrue(y)
# def test_in_column(self):
# pass
# def test_frame_source_reduce(self):
# """Tests to verify that data is filtered as expected (aka SQL Where)"""
# x = frame_manager.frame_source_reduce(self.df)
# assertFrameEqual(x, self.df2)
# def test_apply_variables(self):
# pass
# def test_frame_map_update(self):
# pass
# def test_get_entity_frame(self):
# pass
# def test_save_entity_frame(self):
# pass
def test_lookup(self):
"""Tests to verify lookup capability"""
# x = frame_manager.lookup(self.df, self.df6, ['Age'], None, ['Age', 'Title'])
orig_lookup = self.df6.copy()
w = frame_manager.lookup(self.df, self.df9, left_on=['Name', 'Age'], right_on=['Name', 'age'])
print(w)
x = frame_manager.lookup(self.df, self.df6, ['Age'])
y = frame_manager.distinct(self.df6, ['Age'])
z = frame_manager.left_join(self.df, y, ['Age'])
print(x)
print(z)
assertFrameEqual(x, z)
# ensure lookup frame integrity
assertFrameEqual(orig_lookup, self.df6)
def tearDown(self):
"Clean up any test structure or records generated during the testing"
del self.df
del self.df2
del self.df_blank
del self.df_mon_val
del self.df6
class TestCoalesce(unittest.TestCase):
def setUp(self):
self.reference_data = {
'A': [nan, 'aa', nan, nan, nan],
'B': ['b', 'bb', None, nan, 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', '', nan, nan, nan],
'E': ['e', 'ee', nan, None, 7],
'one': [1, nan, nan, nan, nan], # float64
'two': [2, 2, 2.2, nan, 0], # float64
'three': [nan, nan, nan, 3, 3]
}
def test_string_columns(self):
"""Test the basic case with strings."""
df = pd.DataFrame(data=self.reference_data)
# Two columns
result = coalesce(df['A'], df['C'])
self.assertTrue(
(result == pd.Series(['c', 'aa', 'ccc', 'cccc', 'ccccc']))
.all()
)
# Three columns
result = coalesce(df['A'], df['D'], df['C'])
self.assertTrue(
(result == pd.Series(['d', 'aa', 'ccc', 'cccc', 'ccccc']))
.all()
)
# None is equivalent to NaN
result = coalesce(df['B'], df['C'])
self.assertTrue(
(result == pd.Series(['b', 'bb', 'ccc', 'cccc', 'bbbbb']))
.all()
)
def test_one_column(self):
"""Test that using one column is a no-op, returning no changes."""
df = pd.DataFrame(data=self.reference_data)
for c in df.columns:
col = df.loc[:, c]
result = coalesce(col)
self.assertTrue((result.fillna('nan') == col.fillna('nan')).all())
self.assertTrue((result.index == col.index).all())
def test_value_preservation(self):
"""Make sure valid values aren't overwritten by nulls."""
df = pd.DataFrame(data=self.reference_data)
result = coalesce(df['C'], df['A'])
self.assertTrue((result == df['C']).all())
def test_numeric_columns(self):
"""Test the basic case with numbers."""
df = pd.DataFrame(data=self.reference_data)
# Two columns
result = coalesce(df['one'], df['two'])
result = result.fillna('nan')
self.assertTrue(
(result == pd.Series([1., 2., 2.2, 'nan', 0.]))
.all()
)
# Three columns
result = coalesce(df['one'], df['two'], df['three'])
self.assertTrue(
(result == pd.Series([1., 2., 2.2, 3., 0.]))
.all()
)
def test_index_mismatch(self):
"""Indexes can be different as long as they're the same length.
The returned Series will have an index matching the first column's."""
df = pd.DataFrame(data=self.reference_data)
# Same-length columns with mismatched indexes compare just fine.
a = df.loc[:, 'A']
a.index = test_index = ['v', 'w', 'x', 'y', 'z']
result = coalesce(a, df['C'])
self.assertTrue(
(result.index == test_index)
.all()
)
self.assertTrue(
(result.index != df['C'].index)
.all()
)
self.assertTrue(
(result.values == pd.Series(['c', 'aa', 'ccc', 'cccc', 'ccccc']).values)
.all()
)
# Columns must be the same length, however.
too_short = pd.Series(['foo', 'bar'])
too_long = pd.Series(['foo', 'bar', 'baz', 'qux', 'quux', 'corge'])
with self.assertRaises(Exception):
result = coalesce(a, too_short)
with self.assertRaises(Exception):
result = coalesce(a, too_long)
def test_cross_type_comparison(self):
"""Cross type comparison is allowed in the standard use case."""
df = | pd.DataFrame(data=self.reference_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 15:45:46 2021
@author: lcunha
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 14:58:36 2021
@author: lcunha
"""
import os
from osgeo import ogr
from osgeo.gdalconst import GA_ReadOnly
import matplotlib.pyplot as plt
import sys
import pandas as pd
sys.path.append("/home/west/Projects/IUH_TWI/")
def plot_twi(IDs,outputfolder_twi,outputfolder_summary,filename,xlim):
plt.figure()
ncolsAr=[];catAr=[]
NoFiles=[]
for cat in IDs:
DatFile=os.path.join(outputfolder_twi,"cat-"+str(cat)+".dat")
catAr.append(cat)
if os.path.exists(DatFile):
TWI= | pd.read_csv(DatFile, sep=' ',skiprows=3,skipfooter=3,header=None,engine='python') | pandas.read_csv |
import re
import copy
import random
import itertools
from faker import Faker
from . import filth as filth_module
from .filth import Filth
from .detectors.tagged import KnownFilthItem
from typing import List, Dict, Union, Optional, Tuple, Callable, Iterable, Type, Set
import numpy as np
import pandas as pd
import sklearn.metrics
# I was originally thinking of building this into the Filth system, but they serve subtlly different purposes:
# * Filths need to be merged by text location so that replacements can be made
# * TextPostions need to be merged by text location, but seperated by type so that we an correclty count them
from .utils import ToStringMixin
Grouping = Dict[str, str]
GroupingFunction = Callable[[Filth], Grouping]
class TextPosition(ToStringMixin):
def __init__(self, filth: Filth, grouping_function: GroupingFunction):
self.beg = filth.beg
self.end = filth.end
self.detected = set() # type: Set[Tuple[str, ...]]
self.tagged = set() # type: Set[Tuple[str, ...]]
self.document_name = str(filth.document_name or '') # type: str
if isinstance(filth, filth_module.TaggedEvaluationFilth):
self.tagged.add(tuple(grouping_function(filth).values()))
else:
self.detected.add(tuple(grouping_function(filth).values()))
@staticmethod
def sort_key(position: 'TextPosition') -> Tuple[str, int, int]:
return (position.document_name, position.beg, -position.end)
def merge(self, other: 'TextPosition') -> 'TextPosition':
if self.document_name != other.document_name:
raise ValueError("Positions are in different documents")
if self.beg <= other.end and self.end > other.beg:
self.beg = min(self.beg, other.beg)
self.end = max(self.end, other.end)
self.tagged = self.tagged | other.tagged
self.detected = self.detected | other.detected
return self
raise ValueError(f"Positions do not overlap {self.beg} to {self.end} and {other.beg} to {other.end}")
def __repr__(self) -> str:
return self._to_string(['beg', 'end', 'tagged', 'detected', 'document_name', ])
def __eq__(self, other):
return self.__dict__ == other.__dict__
class FilthTypePositions(ToStringMixin, object):
def __init__(self, grouping_function: GroupingFunction, filth_type: str):
self.positions = [] # type: List[TextPosition]
self.filth_type = filth_type
self.grouping_function = grouping_function
self.column_names = None # type: Optional[List[str]]
def __repr__(self) -> str:
return self._to_string(['filth_type', 'positions', ])
def __eq__(self, other):
return self.__dict__ == other.__dict__
def add_filth(self, filth: Filth):
self.positions.append(TextPosition(filth, grouping_function=self.grouping_function))
if self.column_names is None:
self.column_names = list(self.grouping_function(filth).keys())
@staticmethod
def _merge_position_list(position_list: List[TextPosition]) -> List[TextPosition]:
position_list.sort(key=TextPosition.sort_key)
merged_positions = [] # type: List[TextPosition]
current_position = position_list[0]
for next_position in position_list[1:]:
if current_position.document_name != next_position.document_name or \
current_position.end <= next_position.beg:
merged_positions.append(current_position)
current_position = next_position
else:
current_position = current_position.merge(next_position)
merged_positions.append(current_position)
return merged_positions
def merge_positions(self):
self.positions = self._merge_position_list(self.positions)
def get_counts(self) -> pd.DataFrame:
self.merge_positions()
data_list = [] # type: List[Dict[Tuple[str, ...], int]]
for position in self.positions:
row = {
detected_name: 1
for detected_name in position.detected
}
row.update({
detected_name: 1
for detected_name in position.tagged
})
data_list.append(row)
dataframe = | pd.DataFrame(data_list) | pandas.DataFrame |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
class TestTake(unittest.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert(result[3] == fill_value)
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2, 3]] == data[indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert((result[3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert((result[:, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_3d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
com.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_3d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())
assert((result[3, :, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())
assert((result[:, 3, :] == fill_value).all())
assert(result.dtype == out_dtype)
result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())
assert((result[:, :, 3] == fill_value).all())
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())
assert(result.dtype == dtype)
result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = com.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = com.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = com.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
self.assert_(np.array_equal(result, expected))
result = com.take_1d(arr, [0, 2, -1])
self.assert_(result.dtype == np.object_)
def test_2d_bool(self):
arr = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 1]], dtype=bool)
result = com.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
self.assert_(np.array_equal(result, expected))
result = com.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
self.assert_(np.array_equal(result, expected))
result = com.take_nd(arr, [0, 2, -1])
self.assert_(result.dtype == np.object_)
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
#### this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
com.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = com.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000
arr = arr.view(dtype='datetime64[ns]')
indexer = [0, 2, -1, 1, -1]
# axis=0
result = com.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = com.take_nd(arr, indexer, axis=0,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
com.take_nd(arr, indexer, out=result2, axis=0,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = com.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
com.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = com.take_nd(arr, indexer, axis=1,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
com.take_nd(arr, indexer, out=result2, axis=1,
fill_value=datetime(2007, 1, 1))
| tm.assert_almost_equal(result, result2) | pandas.util.testing.assert_almost_equal |
"""Functions for point patterns spatial statistics."""
from __future__ import annotations
from typing import Union # noqa: F401
from typing import TYPE_CHECKING
from typing_extensions import Literal
from scanpy import logging as logg
from anndata import AnnData
from numpy.random import default_rng
from scipy.spatial import Delaunay, ConvexHull
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from scipy.spatial.distance import pdist
import numpy as np
import pandas as pd
from squidpy._docs import d, inject_docs
from squidpy._utils import NDArrayA
from squidpy.gr._utils import _save_data, _assert_spatial_basis, _assert_categorical_obs
from squidpy._constants._constants import RipleyStat
from squidpy._constants._pkg_constants import Key
__all__ = ["ripley"]
@d.dedent
@inject_docs(key=Key.obsm.spatial, rp=RipleyStat)
def ripley(
adata: AnnData,
cluster_key: str,
mode: Literal["F", "G", "L"] = "F",
spatial_key: str = Key.obsm.spatial,
metric: str = "euclidean",
n_neigh: int = 2,
n_simulations: int = 100,
n_observations: int = 1000,
max_dist: float | None = None,
n_steps: int = 50,
seed: int | None = None,
copy: bool = False,
) -> dict[str, pd.DataFrame | NDArrayA]:
r"""
Calculate various Ripley's statistics for point processes.
According to the `'mode'` argument, it calculates one of the following Ripley's statistics:
`{rp.F.s!r}`, `{rp.G.s!r}` or `{rp.L.s!r}` statistics.
`{rp.F.s!r}`, `{rp.G.s!r}` are defined as:
.. math::
F(t),G(t)=P( d_{{i,j}} \le t )
Where :math:`d_{{i,j}}` represents:
- distances to a random Spatial Poisson Point Process for `{rp.F.s!r}`.
- distances to any other point of the dataset for `{rp.G.s!r}`.
`{rp.L.s!r}` we first need to compute :math:`K(t)`, which is defined as:
.. math::
K(t) = \frac{{1}}{{\lambda}} \sum_{{i \ne j}} \frac{{I(d_{{i,j}}<t)}}{{n}}
and then we apply a variance-stabilizing transformation:
.. math::
L(t) = (\frac{{K(t)}}{{\pi}})^{{1/2}}
Parameters
----------
%(adata)s
%(cluster_key)s
mode
Which Ripley's statistic to compute.
%(spatial_key)s
metric
Which metric to use for computing distances.
For available metrics, check out :class:`sklearn.neighbors.DistanceMetric`.
n_neigh
Number of neighbors to consider for the KNN graph.
n_simulations
How many simulations to run for computing p-values.
n_observations
How many observations to generate for the Spatial Poisson Point Process.
max_dist
Maximum distances for the support. If `None`, `max_dist=`:math:`\sqrt{{area \over 2}}`.
n_steps
Number of steps for the support.
%(seed)s
%(copy)s
Returns
-------
%(ripley_stat_returns)s
References
----------
For reference, check out
`Wikipedia <https://en.wikipedia.org/wiki/Spatial_descriptive_statistics#Ripley's_K_and_L_functions>`_
or :cite:`Baddeley2015-lm`.
"""
_assert_categorical_obs(adata, key=cluster_key)
_assert_spatial_basis(adata, key=spatial_key)
coordinates = adata.obsm[spatial_key]
clusters = adata.obs[cluster_key].values
mode = RipleyStat(mode) # type: ignore[assignment]
if TYPE_CHECKING:
assert isinstance(mode, RipleyStat)
# prepare support
N = coordinates.shape[0]
hull = ConvexHull(coordinates)
area = hull.volume
if max_dist is None:
max_dist = (area / 2) ** 0.5
support = np.linspace(0, max_dist, n_steps)
# prepare labels
le = LabelEncoder().fit(clusters)
cluster_idx = le.transform(clusters)
obs_arr = np.empty((le.classes_.shape[0], n_steps))
start = logg.info(
f"Calculating Ripley's {mode} statistic for `{le.classes_.shape[0]}` clusters and `{n_simulations}` simulations"
)
for i in np.arange(np.max(cluster_idx) + 1):
coord_c = coordinates[cluster_idx == i, :]
if mode == RipleyStat.F:
random = _ppp(hull, n_simulations=1, n_observations=n_observations, seed=seed)
tree_c = NearestNeighbors(metric=metric, n_neighbors=n_neigh).fit(coord_c)
distances, _ = tree_c.kneighbors(random, n_neighbors=n_neigh)
bins, obs_stats = _f_g_function(distances.squeeze(), support)
elif mode == RipleyStat.G:
tree_c = NearestNeighbors(metric=metric, n_neighbors=n_neigh).fit(coord_c)
distances, _ = tree_c.kneighbors(coordinates[cluster_idx != i, :], n_neighbors=n_neigh)
bins, obs_stats = _f_g_function(distances.squeeze(), support)
elif mode == RipleyStat.L:
distances = pdist(coord_c, metric=metric)
bins, obs_stats = _l_function(distances, support, N, area)
else:
raise NotImplementedError(f"Mode `{mode.s!r}` is not yet implemented.")
obs_arr[i] = obs_stats
sims = np.empty((n_simulations, len(bins)))
pvalues = np.ones((le.classes_.shape[0], len(bins)))
for i in range(n_simulations):
random_i = _ppp(hull, n_simulations=1, n_observations=n_observations, seed=seed)
if mode == RipleyStat.F:
tree_i = NearestNeighbors(metric=metric, n_neighbors=n_neigh).fit(random_i)
distances_i, _ = tree_i.kneighbors(random, n_neighbors=1)
_, stats_i = _f_g_function(distances_i.squeeze(), support)
elif mode == RipleyStat.G:
tree_i = NearestNeighbors(metric=metric, n_neighbors=n_neigh).fit(random_i)
distances_i, _ = tree_i.kneighbors(coordinates, n_neighbors=1)
_, stats_i = _f_g_function(distances_i.squeeze(), support)
elif mode == RipleyStat.L:
distances_i = pdist(random_i, metric=metric)
_, stats_i = _l_function(distances_i, support, N, area)
else:
raise NotImplementedError(f"Mode `{mode.s!r}` is not yet implemented.")
for j in range(obs_arr.shape[0]):
pvalues[j] += stats_i >= obs_arr[j]
sims[i] = stats_i
pvalues /= n_simulations + 1
pvalues = np.minimum(pvalues, 1 - pvalues)
obs_df = _reshape_res(obs_arr.T, columns=le.classes_, index=bins, var_name=cluster_key)
sims_df = _reshape_res(sims.T, columns=np.arange(n_simulations), index=bins, var_name="simulations")
res = {f"{mode}_stat": obs_df, "sims_stat": sims_df, "bins": bins, "pvalues": pvalues}
if TYPE_CHECKING:
assert isinstance(res, dict)
if copy:
logg.info("Finish", time=start)
return res
_save_data(adata, attr="uns", key=Key.uns.ripley(cluster_key, mode), data=res, time=start)
def _reshape_res(results: NDArrayA, columns: NDArrayA | list[str], index: NDArrayA, var_name: str) -> pd.DataFrame:
df = | pd.DataFrame(results, columns=columns, index=index) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Alignments
#
# This notebook analyzes page alignments and prepares metrics for final use.
# %% [markdown]
# ## Setup
#
# We begin by loading necessary libraries:
# %%
from pathlib import Path
import pandas as pd
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gzip
import pickle
import binpickle
from natural.size import binarysize
# %%
codec = binpickle.codecs.Blosc('zstd')
# %% [markdown]
# Set up progress bar and logging support:
# %%
from tqdm.auto import tqdm
tqdm.pandas(leave=False)
# %%
import sys, logging
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log = logging.getLogger('alignment')
# %% [markdown]
# Import metric code:
# %%
# %load_ext autoreload
# %autoreload 1
# %%
# %aimport metrics
from trecdata import scan_runs
# %% [markdown]
# ## Loading Data
#
# We first load the page metadata:
# %%
pages = pd.read_json('data/trec_metadata_eval.json.gz', lines=True)
pages = pages.drop_duplicates('page_id')
pages.info()
# %% [markdown]
# Now we will load the evaluation topics:
# %%
eval_topics = pd.read_json('data/eval-topics-with-qrels.json.gz', lines=True)
eval_topics.info()
# %%
train_topics = pd.read_json('data/trec_topics.json.gz', lines=True)
train_topics.info()
# %% [markdown]
# Train and eval topics use a disjoint set of IDs:
# %%
train_topics['id'].describe()
# %%
eval_topics['id'].describe()
# %% [markdown]
# This allows us to create a single, integrated topics list for convenience:
# %%
topics = | pd.concat([train_topics, eval_topics], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[ | Timestamp('20130102') | pandas.core.api.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.