prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from covsirphy.util.term import Term
from covsirphy.loading.db_base import _RemoteDatabase
class _CSJapan(_RemoteDatabase):
"""
Access "COVID-19 Dataset in Japan.
https://github.com/lisphilar/covid19-sir/tree/master/data
Args:
filename (str): CSV filename to save records
"""
# URL
GITHUB_URL = "https://raw.githubusercontent.com"
URL_C = f"{GITHUB_URL}/lisphilar/covid19-sir/master/data/japan/covid_jpn_total.csv"
URL_P = f"{GITHUB_URL}/lisphilar/covid19-sir/master/data/japan/covid_jpn_prefecture.csv"
# Citation
CITATION = "Hirokazu Takaya (2020-2021), COVID-19 dataset in Japan, GitHub repository, " \
"https://github.com/lisphilar/covid19-sir/data/japan"
# Column names and data types
# {"name in database": "name defined in Term class"}
COL_DICT = {
"Date": Term.DATE,
Term.COUNTRY: Term.COUNTRY,
"Area": Term.PROVINCE,
Term.ISO3: Term.ISO3,
"Positive": Term.C,
"Fatal": Term.F,
"Discharged": Term.R,
"Hosp_require": "Hosp_require",
Term.MODERATE: Term.MODERATE,
"Hosp_severe": Term.SEVERE,
"Tested": Term.TESTS,
Term.VAC: Term.VAC,
Term.V_ONCE: Term.V_ONCE,
Term.V_FULL: Term.V_FULL,
}
def download(self, verbose):
"""
Download the dataset from the server and set the list of primary sources.
Args:
verbose (int): level of verbosity
Returns:
pandas.DataFrame
Index
reset index
Columns
defined by the first values of self.COL_DICT.values()
Note:
If @verbose is equal to or over 1, how to show the list will be explained.
"""
# Download datasets
if verbose:
print("Retrieving COVID-19 dataset in Japan from https://github.com/lisphilar/covid19-sir/data/japan")
# Domestic/Airport/Returnee
dar_value_cols = ["Positive", "Tested", "Discharged", "Fatal", "Hosp_require", "Hosp_severe"]
dar_cols = [*dar_value_cols, "Date", "Location", "Vaccinated_1st", "Vaccinated_2nd"]
dar_df = pd.read_csv(self.URL_C, usecols=dar_cols)
dar_df = dar_df.rename(columns={"Location": "Area"}).set_index("Date")
# Country level data
c_df = dar_df.groupby("Date").sum().reset_index()
c_df["Area"] = self.UNKNOWN
# Entering (= Airport + Returnee)
e_df = dar_df.loc[dar_df["Area"].isin(["Airport", "Returnee"])].groupby("Date").sum().reset_index()
e_df["Area"] = "Entering"
# Province level data
p_cols = [*dar_value_cols, "Date", "Prefecture"]
p_df = pd.read_csv(self.URL_P, usecols=p_cols)
p_df = p_df.rename(columns={"Prefecture": "Area"})
# Combine
df = | pd.concat([c_df, e_df, p_df], axis=0, ignore_index=True, sort=True) | pandas.concat |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepcopy(self):
from copy import deepcopy
copy = deepcopy(self.strIndex)
self.assert_(copy is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse( | Index(['a', 'b', 'c']) | pandas.core.index.Index |
import pandas as pd
#import html5lib as html5lib
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
#
# .. your code here ..
#df = pd.read_html('http://espn.go.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2')
htmlstr = 'http://espn.go.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2'
df = pd.read_html(htmlstr)[0]
columns = df.iloc[1,:]
df.columns = columns
col_num = len(columns)
df_1 = df.dropna(thresh = col_num-4) # drop最少4个NAN的值
df_1 = df_1.drop(df.RK == 'RK')
#df_1 = df_1[(df.RK != 'RK')]
#df_1 = df_1.iloc[:,1:]
df_1.iloc[:,1] = | pd.to_numeric(df.iloc[:,1],errors='coerce') | pandas.to_numeric |
import pandas as pd
import pandas.testing as tm
print(pd.__version__)
s = | pd.Series([1, 2, 3]) | pandas.Series |
import sys
import time
from pathlib import Path
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import catboost
import mlflow
import hydra
import pickle
import shutil
import pprint
import warnings
from typing import List, Tuple, Any
from omegaconf.dictconfig import DictConfig
from sklearn.metrics import roc_auc_score
from src.util.get_environment import get_exec_env, get_datadir, is_gpu, is_ipykernel
from src.util.fast_fillna import fast_fillna
from src.models.PurgedGroupTimeSeriesSplit import PurgedGroupTimeSeriesSplit
from src.util.calc_utility_score import utility_score_pd_scaled
from src.util.calc_cross_feature import calc_cross_feature
warnings.filterwarnings("ignore")
def create_janeapi() -> Tuple[Any, Any]:
DATA_DIR = get_datadir()
if get_exec_env() not in ['kaggle-Interactive', 'kaggle-Batch']:
sys.path.append(f'{DATA_DIR}/raw')
import janestreet
env = janestreet.make_env() # initialize the environment
iter_test = env.iter_test() # an iterator which loops over the test set
return env, iter_test
def predict(models: List[Any], feature_engineering: DictConfig, target: str, OUT_DIR: str) -> None:
'''
Note: Be aware of the performance in the 'for' loop!
Prediction API generates 1*130 DataFrame per iteration.
Which means you need to process every record separately. (no vectorization)
If the performance in 'for' loop is poor, it'll result in submission timeout.
'''
env, iter_test = create_janeapi()
feat_cols = [f'feature_{i}' for i in range(130)]
print('Start predicting')
time_start = time.time()
if feature_engineering.method_fillna == 'forward':
tmp = np.zeros(len(feat_cols)) # this np.ndarray will contain last seen values for features
for (test_df, pred_df) in iter_test: # iter_test generates test_df(1,130)
if test_df['weight'].item() > 0: # cut-off by weight
# For forward fillna, using high-performance logic by <NAME>
if feature_engineering.method_fillna == 'forward':
x_tt = test_df.loc[:, feat_cols].values # this is (1,130) ndarray([[values...]])
x_tt[0, :] = fast_fillna(x_tt[0, :], tmp) # use values in tmp to replace nan
tmp = x_tt[0, :] # save last seen values to tmp
elif feature_engineering.method_fillna == '-999':
X_test = test_df.loc[:, feat_cols]
X_test.fillna(-999)
x_tt = X_test.values
else:
raise ValueError(f'Invalid feature_engineering.method_fillna: {feature_engineering.method_fillna}')
if feature_engineering.cross:
x_tt = calc_cross_feature(x_tt)
y_pred: np.ndarray = 0.
for model in models:
y_pred += model.predict(x_tt) / len(models)
y_pred = y_pred > 0
pred_df[target] = y_pred.astype(int)
else:
pred_df[target] = 0
env.predict(pred_df)
elapsed_time = time.time() - time_start
test_len = 15219 # length of test data (for developing API)
print('End predicting')
print(f'Prediction time: {elapsed_time} s, Prediction speed: {test_len / elapsed_time} iter/s')
# move submission file generated by env into experiment directory
shutil.move('submission.csv', f'{OUT_DIR}/submission.csv')
return None
def get_model(model_name: str, model_param: DictConfig) -> Any:
if model_name == 'XGBClassifier':
if is_gpu(): # check if you're utilizing gpu if present
assert model_param.tree_method == 'gpu_hist'
return xgb.XGBClassifier(**model_param)
elif model_name == 'LGBMClassifier':
return lgb.LGBMClassifier(**model_param)
elif model_name == 'CatBoostClassifier':
return catboost.CatBoostClassifier(**model_param)
else:
raise ValueError(f'Invalid model_name: {model_name}')
def train_full(
train: pd.DataFrame,
features: List[str],
target: str,
model_name: str,
model_param: DictConfig,
train_param: DictConfig,
OUT_DIR: str
) -> None:
print('Start training')
X_train = train.loc[:, features].values
y_train = train.loc[:, target].values
model = get_model(model_name, model_param)
model.fit(X_train, y_train, **train_param)
file = f'{OUT_DIR}/model_0.pkl'
pickle.dump(model, open(file, 'wb'))
mlflow.log_artifact(file)
print('End training')
return None
def train_cv(
train: pd.DataFrame,
features: List[str],
target: str,
model_name: str,
model_param: DictConfig,
train_param: DictConfig,
cv_param: DictConfig,
OUT_DIR: str
) -> None:
kf = PurgedGroupTimeSeriesSplit(**cv_param)
scores = []
for fold, (tr, te) in enumerate(kf.split(train[target].values, train[target].values, train['date'].values)):
print(f'Starting fold: {fold}, train size: {len(tr)}, validation size: {len(te)}')
X_tr, X_val = train.loc[tr, features].values, train.loc[te, features].values
y_tr, y_val = train.loc[tr, target].values, train.loc[te, target].values
model = get_model(model_name, model_param)
model.fit(X_tr, y_tr,
eval_set=[(X_tr, y_tr), (X_val, y_val)],
**train_param)
pred_tr, pred_val = model.predict(X_tr), model.predict(X_val)
auc = roc_auc_score(y_val, pred_val)
utility_tr = utility_score_pd_scaled(
train.loc[tr, 'date'].values,
train.loc[tr, 'weight'].values,
train.loc[tr, 'resp'].values,
pred_tr)
utility_val = utility_score_pd_scaled(
train.loc[te, 'date'].values,
train.loc[te, 'weight'].values,
train.loc[te, 'resp'].values,
pred_val)
score = {'fold': fold, 'auc': auc, 'utility_tr': utility_tr, 'utility_val': utility_val}
mlflow.log_metrics(score)
scores.append(score)
pprint.pprint(score)
file = f'{OUT_DIR}/model_{fold}.pkl'
pickle.dump(model, open(file, 'wb'))
mlflow.log_artifact(file)
del model, X_tr, X_val, y_tr, y_val
return None
@hydra.main(config_path="../config/train_v1", config_name="config")
def main(cfg: DictConfig) -> None:
pprint.pprint(dict(cfg))
DATA_DIR = get_datadir()
OUT_DIR = f'{DATA_DIR}/{cfg.out_dir}'
Path(OUT_DIR).mkdir(exist_ok=True, parents=True)
# follow these sequences: uri > experiment > run > others
tracking_uri = f'{DATA_DIR}/mlruns'
mlflow.set_tracking_uri(tracking_uri) # uri must be set before set_experiment
mlflow.set_experiment(cfg.mlflow.experiment.name)
mlflow.start_run()
mlflow.set_tags(cfg.mlflow.experiment.tags)
if not is_ipykernel():
mlflow.log_artifacts('.hydra/')
mlflow.log_param('feature_engineering', cfg.feature_engineering)
mlflow.log_param('model.name', cfg.model.name)
mlflow.log_params(cfg.model.model_param)
mlflow.log_params(cfg.model.train_param)
mlflow.log_param('cv.name', cfg.cv.name)
mlflow.log_params(cfg.cv.param)
mlflow.log_param('feature', cfg.features)
# FE
train = pd.DataFrame()
# load feature, info
features = []
for f in cfg.features:
df = | pd.read_pickle(f'{DATA_DIR}/{f.path}') | pandas.read_pickle |
import keras
import numpy as np
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
import spacy
nlp = spacy.load('en_core_web_sm')
import warnings
# from Contractions import contraction_mapping
pd.set_option("display.max_colwidth", 200)
warnings.filterwarnings("ignore")
data = | pd.read_csv("NewsSum.csv") | pandas.read_csv |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
| assert_frame_equal(move_df, expected) | pandas.testing.assert_frame_equal |
#=======================================================================================================================
#
# ALLSorts v2 - The ALLSorts pipeline
# Author: <NAME>
# License: MIT
#
# Note: Inherited from Sklearn Pipeline
#
#=======================================================================================================================
''' --------------------------------------------------------------------------------------------------------------------
Imports
---------------------------------------------------------------------------------------------------------------------'''
''' Internal '''
from ALLSorts.common import _flat_hierarchy, message, root_dir
''' External '''
from sklearn.pipeline import Pipeline
from sklearn.base import clone
import joblib
import pandas as pd
import plotly.express as px
from string import ascii_lowercase
''' --------------------------------------------------------------------------------------------------------------------
Global Variables
---------------------------------------------------------------------------------------------------------------------'''
c_subtypes = {
# Greens
"High Sig": "#358600",
"High hyperdiploid": "#16a085",
"Low hyperdiploid": "#1abc9c",
"Low hypodiploid": "#27ae60",
"Near haploid": "#2ecc71",
"iAMP21": "#006266",
# Purple
"ETV6-RUNX1 Group": "#3C174F",
"ETV6-RUNX1": "#9b59b6",
"ETV6-RUNX1-like": "#8e44ad",
# Red
"PAX5alt": "#c0392b",
"PAX5 P80R": "#e74c3c",
# Blue
"Ph Group": "#45aaf2",
"Ph": "#2d98da",
"Ph-like": "#3867d6",
# Orange
"KMT2A Group": "#e67e22",
"KMT2A": "#e67e22",
"KMT2A-like": "#f39c12",
# Yellows
"ZNF384 Group": "#ffd32a",
"ZNF384": "#ffd32a",
"ZNF384-like": "#ffdd59",
# Others
"DUX4": "#1e272e", # Grey
"HLF": "#FDA7DF", # light pink
"TCF3-PBX1": "#40407a", # dark purple
"IKZF1 N159Y": "#2c2c54", # darkest purple
"BCL2/MYC": "#22a6b3", # dark cyan
"NUTM1": "#B33771", # light mauve
"MEF2D": "#6D214F", # dark mauve
"IL3-IGH": "#000000", # black
"Unclassified": "#dddddd",
"Other": "#ffffff"
}
''' --------------------------------------------------------------------------------------------------------------------
Classes
---------------------------------------------------------------------------------------------------------------------'''
class ALLSorts(Pipeline):
"""
Fundamentally, ALLSorts is just a pipeline, consisting of stages that need to be executed sequentially.
This prepares the input for training or prediction. This ALLSorts class extends the Scikit Learn pipeline
class and contains all the sequential stages needed to run ALLSorts. It is ALLSorts!
...
Attributes
__________
Pipeline : Scikit-Learn pipeline class
Inherit from this class.
Methods
-------
transform(X)
Execute every stage of the pipeline, transforming the initial input with each step. This does not include the
final classification.
predict_proba(X, parents=True)
Prior to classification, transform the raw input appropriately.
predict_dist(predicted_proba, return_plot=False, plot_height=7, plot_width=20)
Generate probability distributions for the results derived from predict_proba.
predict_waterfall(predicted_proba, compare=False, return_plot=False):
Generate waterfall plots data for the results derived from predict_proba and comparisons.
plot_waterfall(prediction_order)
Generate waterfall plots from data generated in predict_waterfall.
predict_plot, X, return_plot=False)
Use UMAP to plot samples unto a predefined manifold.
clone
Create an empty clone of this pipeline
save(path="models/allsorts.pkl.gz")
Save pipeline in pickle format to the supplied path.
"""
def __init__(self, steps, memory=None, verbose=False):
"""
Initialise the class
Attributes
__________
steps : list
A list of all steps to be used in the pipeline (generally objects)
memory : str or object
Used to cache the fitted transformers. Default "None".
verbose : bool
Include extra messaging during the training of ALLSorts.
"""
self.steps = steps
self.memory = memory
self.verbose = verbose
self._validate_steps()
def _get_flat_hierarchy(self, hierarchy):
return _flat_hierarchy(hierarchy, flat_hierarchy={})
def _get_parents(self, f_hierarchy):
parents = []
for subtype in f_hierarchy:
if f_hierarchy[subtype]:
parents.append(subtype)
return parents
def transform(self, X):
"""
Transform the input through the sequence of stages provided in self.steps.
The final step (the classification stage) will NOT be executed here.
This merely gets the raw input into the correct format.
...
Parameters
__________
X : Pandas DataFrame
Pandas DataFrame that represents the raw counts of your samples (rows) x genes (columns)).
Returns
__________
Xt : Pandas DataFrame
Transformed counts.
"""
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return Xt
def predict_proba(self, X, parents=True):
"""
Given a set of samples, return the probabilities of the classification attempt.
...
Parameters
__________
X : Pandas DataFrame
Pandas DataFrame that represents the raw counts of your samples (rows) x genes (columns)).
parents : bool
True/False as to whether to include parents in the hierarchy in the output, i.e. Ph Group.
Returns
__________
probabilities: Pandas DataFrame
Probabilities returned by ALLSorts for each prediction - samples (rows) x subtype/meta-subtype (columns)
Note: These do not have to add to 1 column-wise - see paper (when it is released!)
"""
Xt = self.transform(X)
return self.steps[-1][-1].predict_proba(Xt, parents=parents)
def predict(self, X, probabilities=False, parents=True):
"""
Given a set of samples, return the predictions of the classification attempt.
...
Parameters
__________
X : Pandas DataFrame
Pandas DataFrame that represents either:
- The raw counts of your samples (rows) x genes (columns))
- The probabilities as provided by predict_proba
probabilities : bool
True/False as to whether to indicate X is probabilities vs. raw counts
parents : bool
True/False as to whether to include parents in the hierarchy in the output, i.e. Ph Group.
Returns
__________
predictions : Pandas DataFrame
Predictions as made by ALLSorts given the input. A 1 x n Sample data Frame.
"""
if not probabilities:
return self.steps[-1][-1].predict(self.transform(X), probabilities=probabilities, parents=parents)
else:
return self.steps[-1][-1].predict(X, probabilities=probabilities, parents=parents)
def predict_dist(self, predicted_proba, return_plot=False, plot_height=7, plot_width=20):
"""
Given a set of predicted probabilities, generate a figure displaying distributions of probabilities.
See https://github.com/Oshlack/AllSorts/ for examples.
...
Parameters
__________
predicted_proba : Pandas DataFrame
Calculated probabilities via predict_proba.
return_plot : bool
Rather than showing the plot through whatever IDE is being used, send it back to the function call.
Likely so it can be saved.
plot_height : 7
Height in inches of the final image.
plot_width : 20
Width in inches of the final image.
Returns
__________
Matplotlib object containing the drawn figure
Output
__________
Probability distribution figure.
"""
'''These should not be hard coded here, but... c'mon. I'm doing OK, give me a break.'''
order = ["High Sig", "High hyperdiploid", 'Low hyperdiploid', "Near haploid", 'Low hypodiploid', 'Ph Group',
"Ph-like", "Ph", "PAX5alt", 'ETV6-RUNX1 Group', 'ETV6-RUNX1', 'ETV6-RUNX1-like', 'KMT2A Group',
'TCF3-PBX1', 'DUX4', 'iAMP21', 'NUTM1', 'BCL2/MYC', 'MEF2D', 'HLF', 'IKZF1 N159Y', 'PAX5 P80R',
'ZNF384 Group']
parents = {"Ph Group": ["Ph", "Ph-like"],
"ETV6-RUNX1 Group": ["ETV6-RUNX1", "ETV6-RUNX1-like"],
"High Sig": ["Low hyperdiploid", "High hyperdiploid", "Near haploid"]}
'''Now, on with the show!'''
thresholds = self.steps[-1][-1].thresholds
probabilities = predicted_proba.copy()
x = []
y = []
c = []
sample_id = []
true = []
pred = []
if "True" not in probabilities.columns:
probabilities["True"] = ""
for i, values in probabilities.drop(["Pred", "True"], axis=1).iteritems():
y += list(values)
x += [i] * values.shape[0]
sample_id += list(values.index)
if i in parents.keys():
labels = probabilities["True"].isin(parents[i])
else:
labels = probabilities["True"] == i
true += list(probabilities["True"])
pred += list(probabilities["Pred"])
c += list(labels)
title = "Probability Distributions (" + str(probabilities.shape[0]) + " Samples)"
fig = px.strip(x=x, y=y, color=c,
color_discrete_map={True: "#DD4075", False: "#164EB6"},
hover_data={"sample": sample_id, "new_pred": pred},
labels=dict(x="Subtype", y="Probability"), title=title).update_traces(
jitter=1, marker={"size": 11, "line": {"color": "rgba(0,0,0,0.4)", "width":1}})
fig.update_layout(font_size=42, title_font_size=72, yaxis={"dtick": 0.1},
plot_bgcolor='rgba(0,0,0,0)')
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#dadada')
for i in range(0, len(order)):
fig.add_shape({'type': "line", 'x0': i - 0.5, 'x1': i + 0.5,
'y0': thresholds[order[i]], 'y1': thresholds[order[i]],
'line': {'color': '#03CEA4', 'width': 3}})
if return_plot:
return fig
else:
fig.show()
def predict_waterfall(self, predicted_proba, compare=False, return_plot=False):
"""
Given a set of predicted probabilities, generate a figure displaying the decreasing probabilities per sample.
This depiction is useful to compare probabilities more directly, in an ordered way, as to judge the efficacy
of the classification attempt.
See https://github.com/Oshlack/AllSorts/ for examples.
...
Parameters
__________
predicted_proba : Pandas DataFrame
Calculated probabilities via predict_proba.
return_plot : bool
Rather than showing the plot through whatever IDE is being used, send it back to the function call.
Likely so it can be saved.
compare : bool or Pandas DataFrame
Samples to compare newly predicted samples to
Returns
__________
Matplotlib object containing the drawn figure
Output
__________
Waterfalls figure.
"""
probabilities = predicted_proba.copy()
''' Have true labels been assigned? Are comparison samples provided? '''
if "True" not in probabilities.columns:
probabilities["True"] = "Other"
if isinstance(compare, pd.DataFrame):
probabilities = | pd.concat([probabilities, compare], join="inner") | pandas.concat |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length= | pd.Timedelta('24h') | pandas.Timedelta |
from pprint import pprint
import json
import requests
import pandas as pd
import os
import datetime as dt
from datetime import datetime
from configparser import ConfigParser
import base64
path_to_batches = "batches/"
batch_files = ['ct_river_area.json', 'ledgelight.json', 'lyme_oldlyme.json']
add_style = "yes"
export_html = "yes"
update_detailed_report = "no"
update_summary = "yes"
create_blog = "no"
## External sources
opening = "source/opening.txt"
data_intro = "source/data_intro.txt"
style_declaration = "source/style.txt"
creds = "creds/creds_batch.ini"
population = "source/ct_population.csv"
### Case-specific information
case_initial = 14 # number of most recent reports to show
# interval is measured in number of reports. In general, 5 reports come out
# every 7 days, so to go back 28 days enter 20
case_interval = [10, 20, 64, 128]
### Vax specific information
vax_initial = 3 # number of most recent vax level reports to show
# set ouput directories
report_dir = 'report/'
###########################################
# Use care if adjusting values below here #
###########################################
dir_list = [report_dir]
for d in dir_list:
try:
os.makedirs(d)
except FileExistsError:
pass
#Read creds.ini file
config = ConfigParser()
config.read(creds)
#Get options for details to include in summaries, and file format export options
creds = config["WORDPRESS"]
url_page = creds["url_page"]
url_post = creds["url_post"]
user = creds["username"]
password = creds["password"]
df_vax = | pd.DataFrame(columns=['town', 'reported_date', 'age_group', 'initiated', 'vaccinated', 'change']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 10:22:36 2018
@author: Prodipta
"""
import sys
import pandas as pd
import os
import datetime
import requests
import json
import shutil
## TODO: This is a hack, install the correct version
#zp_path = "C:/Users/academy.academy-72/Documents/python/zipline/"
#sys.path.insert(0, zp_path)
## TODO: End of hack part
from zipline.data.bundles import register
from zipline.data.bundles.algoseek import algoseek_minutedata
from zipline.data.bundles.ingest_utilities import touch, unzip_to_directory, clean_up, download_spx_changes
from zipline.data import bundles as bundles_module
class IngestLoop:
def __init__(self,configpath):
with open(configpath) as configfile:
config = json.load(configfile)
self.config_path = configpath
self.bundle_name=config["BUNDLE_NAME"]
self.input_path=config["INPUT_PATH"]
self.data_path=config["DATA_PATH"]
self.meta_path=config["META_PATH"]
self.daily_path=config["DAILY_PATH"]
self.benchmar_symbol=config["BENCHMARK_SYM"]
self.benchmark_file=config["BENCHMARKDATA"]
self.bizdays_file=config["BIZDAYLIST"]
self.symlist_file=config["SYMLIST"]
self.calendar_name=config["CALENDAR_NAME"]
self.sym_directory=config["SYM_DIRECTORY"]
self.wiki_url=config["WIKI_URL"]
self.spx_data = download_spx_changes(self.wiki_url)
self.known_missing_syms = pd.read_csv(os.path.join(self.meta_path,'missing_syms.csv'))
self.known_missing_syms.date = pd.to_datetime(self.known_missing_syms.date)
def update_benchmark(self):
if not os.path.isfile(os.path.join(self.meta_path, self.benchmark_file)):
raise IOError("Benchmark file is missing")
df0 = pd.read_csv(os.path.join(self.meta_path,
self.benchmark_file),parse_dates=[0],index_col=0).sort_index()
df0 = df0.dropna()
r = requests.get(
'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(self.benchmar_symbol)
)
data = json.loads(r.text)
df1 = pd.DataFrame(data)
df1.index = pd.DatetimeIndex(df1['date'])
df1 = df1[['open','high','low','close','volume']]
df1 = df1.sort_index()
df = pd.concat([df0,df1])
df = df[~df.index.duplicated(keep='last')]
df.to_csv(os.path.join(self.meta_path,self.benchmark_file),
index_label = 'date')
def update_bizdays(self, strdate):
strpathmeta = os.path.join(self.meta_path,self.bizdays_file)
dts = []
if os.path.isfile(strpathmeta):
dts = pd.read_csv(strpathmeta)
dts = pd.to_datetime(dts['dates']).tolist()
dts = dts+ [pd.to_datetime(strdate,format='%Y%m%d')]
bizdays = pd.DataFrame(sorted(set(dts)),columns=['dates'])
bizdays.to_csv(strpathmeta,index=False)
def _validate_dropouts(self, syms, spx_changes, cutoff=10):
if not syms:
return True
if not spx_changes:
spx_changes = download_spx_changes(self.wiki_url)
current_sym_list = spx_changes['tickers']['symbol'].tolist()
deleted_sym_list = spx_changes['change']['delete'].tolist()
added_sym_list = spx_changes['change']['add'].tolist()
validation_exists = [True if s in current_sym_list else False for s in syms]
validation_added = [True if s in added_sym_list else False for s in syms]
validation_deleted = [False if s in deleted_sym_list else True for s in syms]
if len(syms) > cutoff:
validation_results = [validation_exists[i] or validation_added[i] or validation_deleted[3] for i,e in enumerate(syms)]
else:
validation_results = [validation_exists[i] or validation_added[i] for i,e in enumerate(syms)]
return validation_results
def manage_symlist(self, symbols, date):
fname = 'symbols_'+date+'.csv'
if not os.path.isfile(os.path.join(self.meta_path,self.symlist_file)):
pd.DataFrame(symbols,columns=['symbol']).to_csv(os.path.join(self.meta_path,self.symlist_file),index=False)
pd.DataFrame(symbols,columns=['symbol']).to_csv(os.path.join(self.meta_path,self.sym_directory,fname),index=False)
return
symlist = pd.read_csv(os.path.join(self.meta_path,self.symlist_file))
symlist = symlist['symbol'].tolist()
extra_syms = [s for s in symbols if s not in symlist]
print("extra symbols {}".format(extra_syms))
missing_syms = [s for s in symlist if s not in symbols]
print("missing symbols {}".format(missing_syms))
date = pd.to_datetime(date)
known_missing_symbols = self.known_missing_syms[self.known_missing_syms.date==date]
known_missing_symbols = known_missing_symbols.symbol[known_missing_symbols.keep==1].tolist()
if missing_syms and len(missing_syms) - len(extra_syms) > 1:
spx_tickers = self.spx_data['tickers']
change_data = self.spx_data['change']
change_data = change_data[change_data['date'] <= | pd.to_datetime(date) | pandas.to_datetime |
#!/usr/bin python3
"""
<Description of the programme>
Author: <NAME> <<EMAIL>>
Created: 05 Nov 2020
License: MIT
Contributors: <NAME>
"""
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
import logging
# 3rd party:
from pandas import DataFrame, to_datetime
from numpy import NaN
# Internal:
try:
from __app__.utilities import func_logger
except ImportError:
from utilities import func_logger
from utilities.latest_data import get_latest_csv
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Header
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020, Public Health England"
__license__ = "MIT"
__version__ = "0.0.1"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'change_by_sum'
]
def col2datetime(d, col, format):
d.loc[:, col] = | to_datetime(d.loc[:, col], format=format) | pandas.to_datetime |
import unittest
from pathlib import Path
import sys
import tkinter
import numpy as np
import pandas as pd
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src"))
import src.score as score
import src.database as database
import src.const as const
class TestDatabase(unittest.TestCase):
def setUp(self): # 設定 save_temp_db
print("setUp")
self.pattern = const.PATTERN
self.db_name = "./data/test.db"
self.score = score.Score(0)
self.db = database.Database(self.db_name, self.score)
def create_temp_data_score(self):
# 2ポイントのscoreデータ
self.db.arrayFrameStart = [0, 100, 200]
self.db.arrayFrameEnd = [10, 150, 230]
self.db.arraySet = ["1", "1", "1"]
self.db.arrayGame = ["0-0", "0-0", "0-0"]
self.db.arrayScore = ["0-0", "15-0", "15-0"]
self.db.arrayScoreResult = ["15-0", "15-15", "15-15"]
self.db.arrayFirstSecond = [0, 1, 2]
self.db.arrayServer = ["A", "B", "B"]
self.db.arrayPointWinner = ["A", "B", "B"]
self.db.pointWin = [[0, 1, 1], [1, 0, 0]] # pointwin [[0], [0]] [[A],[B]]
self.db.arrayFault = [0, 1, 1]
self.db.arrayPointPattern = [self.pattern[0], self.pattern[1], self.pattern[3]]
self.db.arrayForeBack = ["Fore", "Back", "Back"]
self.db.arrayContactServe = [[0, 0], [0, 0], [0, 0]] # 使っている?使っていなければ消す
self.db.arrayCourt = [
[[0, 1], [2, 3], [2, 3]],
[[4, 5], [6, 7], [2, 3]],
[[8, 9], [10, 11], [2, 3]],
[[12, 13], [14, 15], [2, 3]],
]
def create_temp_data_shot(self):
self.db.shot_frame = [1296, 1730, 1742]
self.db.array_ball_position_shot_x = ["5.42", "9.26", "13.96"]
self.db.array_ball_position_shot_y = ["17.15", "16.21", "24.5"]
self.db.arrayPlayerAPosition_x = ["4.57", "4.57", "4.57"]
self.db.arrayPlayerAPosition_y = ["-1.97", "-1.97", "-1.97"]
self.db.arrayPlayerBPosition_x = ["1.27", "1.27", "1.27"]
self.db.arrayPlayerBPosition_y = ["-8.95", "-8.95", "-8.95"]
self.db.arrayHitPlayer = ["Nishioka", "Nishioka", "Nishioka"]
self.db.arrayBounceHit = ["Bounce", "Hit", "Bounce"]
self.db.arrayForeBack = ["", "Fore", ""]
self.db.arrayDirection = ["Cross", "Cross", "Cross"]
self.db.array_x1 = [2, 3, 4]
self.db.array_y1 = [2, 3, 4]
self.db.array_x2 = [2, 3, 4]
self.db.array_y2 = [2, 3, 4]
self.db.array_x3 = [2, 3, 4]
self.db.array_y3 = [2, 3, 4]
self.db.array_x4 = [2, 3, 4]
self.db.array_y4 = [2, 3, 4]
# shotデータ
# self.db.array_ball_position_shot = [
# [],
# [[1, 1296.0, "5.42", "17.15"]],
# [[2, 1730.0, "9.26", "16.21"], [2, 1742.0, "13.96", "24.5"]],
# ] # point frame bx by
# self.db.arrayPlayerAPosition = [
# [],
# [[1, 861.0, "4.57", "-1.97"]],
# [[2, 861.0, "4.57", "-1.97"], [2, 861.0, "4.57", "-1.97"]],
# ]
# self.db.arrayPlayerBPosition = [
# [],
# [[1, 1016.0, "1.27", "-8.95"]],
# [[2, 1016.0, "1.27", "-8.95"], [2, 1016.0, "1.27", "-8.95"]],
# ] #
# self.db.arrayHitPlayer = [
# [],
# ["Nishioka"],
# ["Nishioka", "Nishioka", "Nishioka"],
# ]
# self.db.arrayBounceHit = [[], ["Hit"], ["Bounce", "Hit", "Bounce"]]
# self.db.arrayForeBack = [[], ["Fore"], ["", "Fore", ""]]
# self.db.arrayDirection = [[], ["Cross"], ["Cross", "Cross", "Cross"]]
# self.db.array_x1 = [[], [1], [2, 3, 4]]
# self.db.array_y1 = [[], [1], [2, 3, 4]]
# self.db.array_x2 = [[], [1], [2, 3, 4]]
# self.db.array_y2 = [[], [1], [2, 3, 4]]
# self.db.array_x3 = [[], [1], [2, 3, 4]]
# self.db.array_y3 = [[], [1], [2, 3, 4]]
# self.db.array_x4 = [[], [1], [2, 3, 4]]
# self.db.array_y4 = [[], [1], [2, 3, 4]]
def create_temp_data_basic(self):
self.db.playerA = "player_A"
self.db.playerB = "player_B"
self.db.number = 2
self.db.totalGame = 5
self.db.faultFlug = 1
def test_save_database_score(self):
self.assertEqual(1, self.db.save_database_score(self.db_name)) # 初期データ 1
self.create_temp_data_score()
self.assertEqual(3, self.db.save_database_score(self.db_name)) # 仮データ 3
def test_save_database_shot(self):
"""calc length of saved dataframe"""
self.assertEqual(0, self.db.save_database_shot(self.db_name)) # 初期データ 0
self.create_temp_data_shot()
self.assertEqual(3, self.db.save_database_shot(self.db_name)) # 仮データ 3
def test_save_database_basic(self):
self.assertEqual(1, self.db.save_database_basic(self.db_name)) # 初期データ 1
self.create_temp_data_basic()
self.assertEqual(1, self.db.save_database_basic(self.db_name)) # 仮データ 1
def test_array2arrays(self):
print("test_array2arrays")
frame, ballx, bally = [], [], []
self.assertEqual([], self.db.array2arrays(frame, ballx, bally)) # 初期データ
# point = [1, 2, 3, 4]
frame = [861, 1296, 1730, 1742]
ballx = [12.2, 5.42, 9.26, 13.96]
bally = [23.47, 17.15, 16.21, 24.5]
# r = self.db.array2arrays(frame, ballx, bally)
# for i in range(len(point)):
# self.assertEqual(
# [[point[i], frame[i], ballx[i], bally[i]]], r[i + 1]
# ) # 仮データ
self.assertEqual(
[
[861, 12.2, 23.47],
[1296, 5.42, 17.15],
[1730, 9.26, 16.21],
[1742, 13.96, 24.5],
],
self.db.array2arrays(frame, ballx, bally),
)
def test_array2arrays2(self):
point = [1, 2, 3, 3, 4, 4, 4, 4]
hit = ["A", "B", "A", "B", "A", "B", "A", "B"]
bh = ["Hit", "Hit", "Hit", "Bounce", "Hit", "Bounce", "Hit", "Bounce"]
fb = ["", "", "Fore", "", "", "Back", "", ""]
d = ["Cross", "Cross", "Cross", "Cross", "Cross", "Cross", "Cross", "Cross"]
x1 = [1, 2, 3, 4, 5, 6, 7, 8]
y1 = [1, 2, 3, 4, 5, 6, 7, 8]
x2 = [1, 2, 3, 4, 5, 6, 7, 8]
y2 = [1, 2, 3, 4, 5, 6, 7, 8]
x3 = [1, 2, 3, 4, 5, 6, 7, 8]
y3 = [1, 2, 3, 4, 5, 6, 7, 8]
x4 = [1, 2, 3, 4, 5, 6, 7, 8]
y4 = [1, 2, 3, 4, 5, 6, 7, 8]
(
array_hit,
array_bouncehit,
array_foreback,
array_direction,
array_x1,
array_y1,
array_x2,
array_y2,
array_x3,
array_y3,
array_x4,
array_y4,
) = self.db.array2arrays2(point, hit, bh, fb, d, x1, y1, x2, y2, x3, y3, x4, y4)
self.assertEqual(
[[], ["A"], ["B"], ["A", "B"], ["A", "B", "A", "B"]], array_hit
)
self.assertEqual(
[
[],
["Hit"],
["Hit"],
["Hit", "Bounce"],
["Hit", "Bounce", "Hit", "Bounce"],
],
array_bouncehit,
)
self.assertEqual(
[[], [""], [""], ["Fore", ""], ["", "Back", "", ""]], array_foreback
)
self.assertEqual(
[
[],
["Cross"],
["Cross"],
["Cross", "Cross"],
["Cross", "Cross", "Cross", "Cross"],
],
array_direction,
)
self.assertEqual([[], [1], [2], [3, 4], [5, 6, 7, 8]], array_x1)
def test_load_database_score_init(self):
sc = score.Score(0)
db_name = "./tests/temp.db"
db = database.Database(db_name, sc)
db.save_database_score(db_name)
self.assertEqual(1, db.load_database_score(db_name)) # 初期scoreテーブル
def test_load_database_score(self):
db_name = "./data/test.db"
self.assertEqual(3, self.db.load_database_score(db_name))
def test_load_database_shot_init(self):
sc = score.Score(0)
db_name = "./tests/temp.db"
db = database.Database(db_name, sc)
db.save_database_shot(db_name)
self.assertEqual(0, db.load_database_shot(db_name))
def test_load_database_shot(self):
# レコード数3つのデータ
db_name = "./data/test.db"
self.assertEqual(3, self.db.load_database_shot(db_name))
def test_load_database_basic(self):
sc = score.Score(0)
db_name = "./data/test.db"
db = database.Database(db_name, sc)
db.save_database_basic(db_name)
self.assertEqual(1, self.db.load_database_basic(db_name)) # 初期basicテーブル
def test_pop_array_from_df(self):
df = | pd.DataFrame({"x1": [1, 2, 3]}) | pandas.DataFrame |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import pandas as pd
import pylife.mesh.meshsignal
def test_plain_mesh_3d():
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0], 'a': [9.9]})
pd.testing.assert_frame_equal(df.plain_mesh.coordinates,
pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0]}))
def test_plain_mesh_2d():
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'b': [3.0], 'a': [9.9]})
pd.testing.assert_frame_equal(df.plain_mesh.coordinates,
pd.DataFrame({'x': [1.0], 'y': [2.0]}))
def test_plain_mesh_3d_dims():
df = pd.DataFrame({'x': [1.0, 2.0], 'y': [2.0, 3.0], 'z': [3.0, 4.0], 'b': [3.0, 3.0]})
assert df.plain_mesh.dimensions == 3
assert df.plain_mesh.dimensions == 3
def test_plain_mesh_2d_dims():
df = pd.DataFrame({'x': [1.0, 2.0], 'y': [2.0, 3.0], 'b': [3.0, 3.0]})
assert df.plain_mesh.dimensions == 2
assert df.plain_mesh.dimensions == 2
def test_plain_mesh_pseudeo_2d_dims():
df = pd.DataFrame({'x': [1.0, 2.0], 'y': [2.0, 3.0], 'z': [3.0, 3.0], 'b': [3.0, 3.0]})
assert df.plain_mesh.dimensions == 2
assert df.plain_mesh.dimensions == 2
def test_plain_mesh_fail():
df = pd.DataFrame({'x': [1.0], 't': [2.0], 'b': [3.0], 'a': [9.9]})
with pytest.raises(AttributeError, match=r'PlainMesh.*Missing y'):
df.plain_mesh.coordinates
def test_mesh_3d():
mi = pd.MultiIndex.from_tuples([(1, 1)], names=['element_id', 'node_id'])
df = pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0], 'a': [9.9]}).set_index(mi)
pd.testing.assert_frame_equal(df.mesh.coordinates,
| pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0]}) | pandas.DataFrame |
from skyfield.api import load
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from skyfield.api import utc
from scipy.optimize import brentq # machine learning
from datetime import timedelta, datetime
import pytz
# Custom helper functions
from definitions import *
from whereIsData import *
# convert binary to elemental representation
b_to_q = lambda x: [from_binary_to_element_symbols[x[i*2:i*2+2]] for i in [0,1,2]]
b_to_q_el = lambda x: [from_binary_to_element_ix[x[i*2:i*2+2]] for i in [0,1,2]]
b_to_ching = lambda x: [b for b in x]
# index elemental composition of the I Ching - binary becomes a 'triplet' with one of the four elements (air - fire - water - earth)
iching_ix = [b_to_q_el(str(x['binary']))for x in iching]
# binary position of the I Ching - binary becomes a string
iching_binary = [b_to_q(str(x['binary']))for x in iching]
# binary position of the I Ching - binary becomes an array of [1 and 0]
iching_binary_full = [b_to_ching(str(x['binary']))for x in iching]
def get_color_map():
water = "#0E61B0"
air = "#C29F17"
earth = "#55A349"
fire = "#C9280C"
return [air, fire, water, earth]
def __test_hex_binary_to_element():
print ([x for x in '10000'])
print( from_binary_to_element_symbols['00'] )
print ( b_to_q('000000'))
print ( b_to_q_el('101010'))
print ( b_to_ching('111000'))
def neutron_stream_pos(planet_position):
""" returns mandala position (base 64) given planet position"""
return ( (planet_position + (2*line_width - 1*color_width - 2*tone_width) ) / (2*math.pi) * 64) % 64
def map_on_hexagram(df, include=[]):
""" maps df planet positions onto position onto a hexagram and line """
# convert dataframe to numpy array
neutron_stream = df.to_numpy()
hexagram_bin = np.floor(neutron_stream) # rounded up downwards
# map bin number onto 'hexagram' (neutron stream is sequential order, hexagram is King Wen Sequence)
strong = np.array(iching_map)
flat = hexagram_bin.astype(int).flatten()
previous_shape = neutron_stream.shape
mapped = strong[flat]
hexagram = mapped.reshape(previous_shape)
hexagram_fraction = neutron_stream - hexagram_bin
line = hexagram_fraction // (1/6) + 1 # count in which 6th this neutrino stream falls in
line_fraction = (hexagram_fraction - (line - 1)*1/6 ) / (1/6)
color = line_fraction // (1/6) + 1
color_fraction = (line_fraction - (color -1) * 1/6) / (1/6)
tone = color_fraction // (1/6) + 1
return_info = [hexagram]
if 'lines' in include:
return_info += [line.astype(int)]
return return_info
def get_elemental_ching_lines_map(df_planets):
# input = ephemeris planets for a certain time period
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles, include=['lines'])
hexagrams = z[0]
lines = z[1]
# many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_binary_full) # strong
one_2 = hexagrams.astype(int).flatten() - 1 # flat
# binary el
#el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*6))
return finish.astype(int), lines
def get_elemental_ching_map(df_planets):
# input = ephemeris planets for a certain time period
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles)
# many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_binary_full) # strong
one_2 = z.astype(int).flatten() - 1 # flat
# binary el
#el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*6))
return finish.astype(int)
def get_elemental_map(df_planets):
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles)
many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_ix) # strong
one_2 = z.astype(int).flatten() - 1 # flat
# binary el
el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*3))
return finish.astype(int)
def __test_neutron_stream_and_mapping(df_planets):
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
df_angles = neutron_stream_pos(df_planets.iloc[:, 1:6])
z = map_on_hexagram(df_angles)
print (z)
many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_ix) # strong
one_2 = z.astype(int).flatten() - 1 # flat
print (many_2_b[63])
# binary el
el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
print (el)
def get_crypto_planet_data(size, mapping_type='elemental'):
""" Returns bitstamp data with size = # of ticks (in minutes for this dataset)
params:
size: # of ticks (seconds in this case)
mapping_type:
- 'elemental' (3-compound elements)
- 'elemntal_ching' (I-Ching Plain Binary)
- 'elemental_ching_lines' (I-Ching Plain Binary + Lines Dummified [1-6 lines -> 6 columns with 0 or 1])
"""
# get planetary positions
_planets, ts = get_planetary_ephemeris()
color_map = get_color_map()
df_c = pd.read_csv('bitstampUSD_1-min_data_2012-01-01_to_2020-09-14.csv', parse_dates=True)
# make data timestamp
df_c['date'] = pd.to_datetime(df_c['Timestamp'], unit='s')
# cast down to hourly data
groupkey = pd.to_datetime(df_c[-size:].date.dt.strftime('%Y-%m-%d %H'))
df_hourly = df_c[-size:].groupby(groupkey).agg({'Close':'last','Volume_(BTC)':'sum'})
df_hourly.head()
first_date = df_hourly.iloc[0].name
print ( first_date )
# generate ephemerial elements
h = first_date.hour
hours_in_trading_code = len(df_hourly) # stock exchange count of number differences
t_time_array = ts.utc(first_date.year, first_date.month, first_date.day, range(h,h+hours_in_trading_code), 0) # -3000 BC to 3000 BC, increments in hours
# generate empheremis for time period
df_crypto_planets = generate_planets(_planets, ts, t_time_array) # can take a while
# selected desired planets for attribution
r = ['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']
r.reverse()
# create elemental data map
if mapping_type == 'elemental':
data_tmp = get_elemental_map(df_crypto_planets.loc[:,r])
elif mapping_type == 'elemental_ching':
data_tmp = get_elemental_ching_map(df_crypto_planets.loc[:,r])
elif mapping_type == 'element_ching_lines':
data_tmp, lines = get_elemental_ching_lines_map(df_crypto_planets.loc[:,r])
# return data_tmp, lines
# plot data map
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(data_tmp.transpose(), ax=ax, cmap=color_map, cbar=False)
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(lines.transpose(), ax=ax, cmap=color_map, cbar=False)
if mapping_type == 'elemental' or mapping_type == 'elemental_ching':
# create the training dataset [Close, Solar System Time]
df_solar = | pd.DataFrame(data_tmp) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
Estacion = '6001'
df1 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/6001Historico.txt', parse_dates=[2])
Theoric_rad_method = 'GIS_Model' ##-->> PARA QUE USE EL MODELO DE Gis DEBE SER 'GIS_Model'
resolucion = 'diaria' ##-->> LAS OPCIONES SON 'diaria' U 'horaria'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## ---CALCULO DE LA RADIACIÓN TEORICA--- ##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model_hora(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
'pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_nati/Total_Timeseries.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion_Teo'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
df_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
df_hourly_theoric = df_hourly_theoric[df_hourly_theoric['Radiacion_Teo'] > 0]
return df_hourly_theoric
def Elevation_RadiationTA(n, lat, lon, start):
'Para obtener la radiación en W/m2 y el ángulo de elevación del sol en grados horariamente para un número "n" de ' \
'días aun punto en una latitud y longitud determinada ( "lat-lon"como flotantes) a partir de una fecha de inicio ' \
'"start" como por ejemplo datetime.datetime(2018, 1, 1, 8).'
import pysolar
import pytz
import datetime
timezone = pytz.timezone("America/Bogota")
start_aware = timezone.localize(start)
# Calculate radiation every hour for 365 days
nhr = 24*n
dates, altitudes_deg, radiations = list(), list(), list()
for ihr in range(nhr):
date = start_aware + datetime.timedelta(hours=ihr)
altitude_deg = pysolar.solar.get_altitude(lat, lon, date)
if altitude_deg <= 0:
radiation = 0.
else:
radiation = pysolar.radiation.get_radiation_direct(date, altitude_deg)
dates.append(date)
altitudes_deg.append(altitude_deg)
radiations.append(radiation)
days = [ihr/24 for ihr in range(nhr)]
return days, altitudes_deg, radiations
if Theoric_rad_method != 'GIS_Model' and Estacion == '6001':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.259, -75.588, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6002':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.168, -75.644, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6003':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.255, -75.542, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method == 'GIS_Model':
Io_hora = serie_Kumar_Model_hora(Estacion)
print('Teorica con el modelo de KUMAR')
###############################################################################
##--------------EFICIENCIAS TEORICAS COMO PROXI DE TRANSPARENCIA-------------##
###############################################################################
'Calculo de la eficiencias teorica como proxi de la transparencia de la atmosfera'
'Para esto se hace uso de la información del piranometro y de la radiación teórica'
'de <NAME>, con esto se prentenden obtener las caracteristicas que deriven'
'del análisis estocastico, similar al de <NAME> en su tesis de doctorado.'
##------------------LECTURA DE LOS DATOS DEL EXPERIMENTO----------------------##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de potencia mayores o iguales a 0, los que parecen generarse una'
'hora despues de cuando empieza a incidir la radiación.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P350 = df_P350[(df_P350['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P348 = df_P348[(df_P348['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P975_h = df_P975_h.between_time('06:00', '17:00')
df_P350_h = df_P350_h.between_time('06:00', '17:00')
df_P348_h = df_P348_h.between_time('06:00', '17:00')
##----AJUSTE DE LOS DATOS DE RADIACIÓN TEORICA AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
Io_hora_975 = serie_Kumar_Model_hora('6001')
Io_hora_350 = serie_Kumar_Model_hora('6002')
Io_hora_348 = serie_Kumar_Model_hora('6003')
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
Io_hora_975 = Io_hora_975.loc[(Io_hora_975.index >= '2018-03-20') & (Io_hora_975.index <= '2018-'+str(df_P975.index[-1].month)+'-'+str(df_P975.index[-1].day+1))]
Io_hora_350 = Io_hora_350.loc[(Io_hora_350.index >= '2018-03-22') & (Io_hora_350.index <= '2018-'+str(df_P350.index[-1].month)+'-'+str(df_P350.index[-1].day+1))]
Io_hora_348 = Io_hora_348.loc[(Io_hora_348.index >= '2018-03-23') & (Io_hora_348.index <= '2018-'+str(df_P348.index[-1].month)+'-'+str(df_P348.index[-1].day+1))]
Io_hora_975 = Io_hora_975.between_time('06:00', '17:00')
Io_hora_975.index = [Io_hora_975.index[i].replace(year=2019) for i in range(len(Io_hora_975.index))]
Io_hora_350 = Io_hora_350.between_time('06:00', '17:00')
Io_hora_350.index = [Io_hora_350.index[i].replace(year=2019) for i in range(len(Io_hora_350.index))]
Io_hora_348 = Io_hora_348.between_time('06:00', '17:00')
Io_hora_348.index = [Io_hora_348.index[i].replace(year=2019) for i in range(len(Io_hora_348.index))]
df_Rad_P975 = pd.concat([Io_hora_975, df_P975_h], axis = 1)
df_Rad_P350 = pd.concat([Io_hora_350, df_P350_h], axis = 1)
df_Rad_P348 = pd.concat([Io_hora_348, df_P348_h], axis = 1)
df_Rad_P975 = df_Rad_P975.drop(['NI','strength'], axis=1)
df_Rad_P350 = df_Rad_P350.drop(['NI','strength'], axis=1)
df_Rad_P348 = df_Rad_P348.drop(['NI','strength'], axis=1)
##--------------------EFICIANCIA REAL PROXI DE TRANSPARENCIA-----------------##
df_Rad_P975['Efi_Transp'] = df_Rad_P975['radiacion'] / df_Rad_P975['Radiacion_Teo']
df_Rad_P350['Efi_Transp'] = df_Rad_P350['radiacion'] / df_Rad_P350['Radiacion_Teo']
df_Rad_P348['Efi_Transp'] = df_Rad_P348['radiacion'] / df_Rad_P348['Radiacion_Teo']
##-----------------HORAS EN LA QUE SE PRODUCE LA MAYOR EFICIENCIA Y SU HISTOGRAMA-------------##
'La frecuencia de las horas que excedieron el máximo de la eficiencia (1), se presenta en el hisograma'
'a continuación. El resultado muestra que las mayores frecuencias se presentan a als 6 y las 7 de la ma-'
'ñana, y esto es atribuible a falencias en el modelo de radiacion en condiciones de cierlo despejado'
'en esos puntos.'
Hour_Max_Efi_975 = df_Rad_P975[df_Rad_P975['Efi_Transp']>1].index.hour
Hour_Max_Efi_350 = df_Rad_P350[df_Rad_P350['Efi_Transp']>1].index.hour
Hour_Max_Efi_348 = df_Rad_P348[df_Rad_P348['Efi_Transp']>1].index.hour
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hour_Max_Efi_348, bins='auto', alpha = 0.5)
ax1.set_title(u'Distribución horas de excedencia \n de la eficiencia en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hour_Max_Efi_350, bins='auto', alpha = 0.5)
ax2.set_title(u'Distribución horas de excedencia \n de la eficiencia en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hour_Max_Efi_975, bins='auto', alpha = 0.5)
ax3.set_title(u'Distribución horas de excedencia \n de la eficiencia en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHoraExceEfi.png')
plt.show()
##-------DISCRIMINACION ENTRE DIAS LLUVIOSOS Y SECOS POR PERCENTILES DE RADIACION--------##
'Para lidiar cno la situación en que pueden haber dias en los que los piranometros solo midieron'
'durante una fracción del día por posibles daños y alteraciones, se deben considerar los dias que'
'al menos tuvieron 6 horas de medicion.'
df_Rad_P975_count_h_pira = df_Rad_P975.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P350_count_h_pira = df_Rad_P350.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P348_count_h_pira = df_Rad_P348.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
days_P975_count_h_pira = df_Rad_P975_count_h_pira.index[df_Rad_P975_count_h_pira == True]
days_P350_count_h_pira = df_Rad_P350_count_h_pira.index[df_Rad_P350_count_h_pira == True]
days_P348_count_h_pira = df_Rad_P348_count_h_pira.index[df_Rad_P348_count_h_pira == True]
'Se establecieron umbrales empiricamente para la seleccion de los dias marcadamente nubados y'
'marcadamente despejados dentro el periodo de registro, de acuerdo a los procedimentos en el'
'programa Umbrales_Radiacion_Piranometro.py'
Sum_df_Rad_P975 = df_Rad_P975.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P350 = df_Rad_P350.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P348 = df_Rad_P348.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['radiacion']>0]
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['radiacion']>0]
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['radiacion']>0]
lista_days_975 = []
for i in range(len(Sum_df_Rad_P975)):
if Sum_df_Rad_P975.index[i] in days_P975_count_h_pira:
lista_days_975.append(1)
else:
lista_days_975.append(0)
Sum_df_Rad_P975['days'] = lista_days_975
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['days'] == 1]
Sum_df_Rad_P975 = Sum_df_Rad_P975.drop(['days'], axis = 1)
lista_days_350 = []
for i in range(len(Sum_df_Rad_P350)):
if Sum_df_Rad_P350.index[i] in days_P350_count_h_pira:
lista_days_350.append(1)
else:
lista_days_350.append(0)
Sum_df_Rad_P350['days'] = lista_days_350
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['days'] == 1]
Sum_df_Rad_P350 = Sum_df_Rad_P350.drop(['days'], axis = 1)
lista_days_348 = []
for i in range(len(Sum_df_Rad_P348)):
if Sum_df_Rad_P348.index[i] in days_P348_count_h_pira:
lista_days_348.append(1)
else:
lista_days_348.append(0)
Sum_df_Rad_P348['days'] = lista_days_348
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['days'] == 1]
Sum_df_Rad_P348 = Sum_df_Rad_P348.drop(['days'], axis = 1)
Desp_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion>=(Sum_df_Rad_P975.Radiacion_Teo)*0.85]
Desp_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion>=(Sum_df_Rad_P350.Radiacion_Teo)*0.78]
Desp_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion>=(Sum_df_Rad_P348.Radiacion_Teo)*0.80]
Nuba_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion<=(Sum_df_Rad_P975.Radiacion_Teo)*0.25]
Nuba_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion<=(Sum_df_Rad_P350.Radiacion_Teo)*0.25]
Nuba_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion<=(Sum_df_Rad_P348.Radiacion_Teo)*0.22]
Appended_data_desp_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Appended_data_desp_975.append(df_P975_h[df_P975_h.index.date == Desp_Pira_975.index.date[i]])
Appended_data_desp_975 = pd.concat(Appended_data_desp_975)
Appended_data_desp_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Appended_data_desp_350.append(df_P350_h[df_P350_h.index.date == Desp_Pira_350.index.date[i]])
Appended_data_desp_350 = pd.concat(Appended_data_desp_350)
Appended_data_desp_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Appended_data_desp_348.append(df_P348_h[df_P348_h.index.date == Desp_Pira_348.index.date[i]])
Appended_data_desp_348 = pd.concat(Appended_data_desp_348)
Appended_data_nuba_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Appended_data_nuba_975.append(df_P975_h[df_P975_h.index.date == Nuba_Pira_975.index.date[i]])
Appended_data_nuba_975 = pd.concat(Appended_data_nuba_975)
Appended_data_nuba_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Appended_data_nuba_350.append(df_P350_h[df_P350_h.index.date == Nuba_Pira_350.index.date[i]])
Appended_data_nuba_350 = pd.concat(Appended_data_nuba_350)
Appended_data_nuba_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Appended_data_nuba_348.append(df_P348_h[df_P348_h.index.date == Nuba_Pira_348.index.date[i]])
Appended_data_nuba_348 = pd.concat(Appended_data_nuba_348)
#------------------HISTOGRAMAS DE RADIACION PARA CADA PUNTO EN LOS DOS CASOS----------------##
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Appended_data_desp_348['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Appended_data_nuba_348['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Appended_data_desp_350['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Appended_data_nuba_350['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Appended_data_desp_975['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Appended_data_nuba_975['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoRadiacionNubaDespTotal.png')
plt.show()
#------------------PRUEBA DE KOLMOGOROV-SMIRNOV PARA LA BONDAD DE AJUSTE ----------------##
'Se aplica la prueba de bondad KOLMOGOROV-SMIRNOV sobre los datos de los dias nublados y los'
'despejados con respecto a la serie general de los datos, para evaluar si pertenecen a la '
'funcion de distribución de probabilidad. Se usa un nivel de significancia del 5%. Esta prueba es'
'mas sensible a los valores cercanos a la media que a los extremos, por lo que en general puede'
'usarse para evitar los outliers. La hipotesis nula, será que los datos de ambas series siguen'
'una misma distribución. La hipotesis alternativa sugiere que no sigen la misma distribución.'
Significancia = 0.05
SK_desp_348 = ks_2samp(Appended_data_desp_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_desp = SK_desp_348[0]
pvalue_348_desp = SK_desp_348[1]
SK_nuba_348 = ks_2samp(Appended_data_nuba_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_nuba = SK_nuba_348[0]
pvalue_348_nuba = SK_nuba_348[1]
if pvalue_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if pvalue_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_350 = ks_2samp(Appended_data_desp_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_desp = SK_desp_350[0]
pvalue_350_desp = SK_desp_350[1]
SK_nuba_350 = ks_2samp(Appended_data_nuba_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_nuba = SK_nuba_350[0]
pvalue_350_nuba = SK_nuba_350[1]
if pvalue_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if pvalue_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_975 = ks_2samp(Appended_data_desp_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_desp = SK_desp_975[0]
pvalue_975_desp = SK_desp_975[1]
SK_nuba_975 = ks_2samp(Appended_data_nuba_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_nuba = SK_nuba_975[0]
pvalue_975_nuba = SK_nuba_975[1]
if pvalue_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if pvalue_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------HISTOGRAMAS DE EFICIENCIA PARA CADA PUNTO EN LOS DOS CASOS----------------##
Desp_Efi_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Desp_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Desp_Pira_348.index.date[i]])
Desp_Efi_348 = pd.concat(Desp_Efi_348)
Desp_Efi_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Desp_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Desp_Pira_350.index.date[i]])
Desp_Efi_350 = pd.concat(Desp_Efi_350)
Desp_Efi_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Desp_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Desp_Pira_975.index.date[i]])
Desp_Efi_975 = pd.concat(Desp_Efi_975)
Nuba_Efi_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Nuba_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Nuba_Pira_348.index.date[i]])
Nuba_Efi_348 = | pd.concat(Nuba_Efi_348) | pandas.concat |
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn import preprocessing
from sklearn.base import BaseEstimator
def calc_canceling_fund(estimated_vacation_time,
cancelling_policy_code,
original_selling_amount,
normalize=True):
policy_options = cancelling_policy_code.split("_")
cost_sum = 0
for option in policy_options:
if "D" in option:
if "P" in option:
charge = int(option[option.find("D") + 1:option.find("P")])
charge /= 100
cost_sum += original_selling_amount * charge
if "N" in option:
charge = int(option[option.find("D") + 1:option.find("N")])
charge /= estimated_vacation_time
cost_sum += original_selling_amount * charge
elif "P" in option:
charge = int(option[option.find("D") + 1:option.find("P")])
charge /= 100
cost_sum += original_selling_amount * charge
if normalize:
return ((cost_sum / len(policy_options)) / original_selling_amount) * 100
return cost_sum / len(policy_options)
def add_hour_of_day(full_data: DataFrame):
full_data["hour"] = pd.to_datetime(full_data["booking_datetime"]).dt.hour
def add_month_of_booking(full_data: DataFrame):
full_data["month_of_booking"] = pd.to_datetime(full_data["booking_datetime"]).dt.month
def add_month_of_cheking(full_data):
full_data["month_of_checkin"] = pd.to_datetime(full_data["checkin_date"]).dt.month
# def payment_type_to_hashcode(full_data):
# payment_type = set(full_data["original_payment_type"])
# payment_type_dict = {k: v for v, k in enumerate(payment_type)}
# full_data.replace({"original_payment_type": payment_type_dict},
# inplace=True)
# bool_dict = {True: 0, False: 1}
# full_data.replace({"is_first_booking": bool_dict}, inplace=True)
# def accommodation_to_hashcode(full_data):
# accommodation_type = set(full_data["accommadation_type_name"])
# accommodation_type_dict = {k: v for v, k in enumerate(accommodation_type)}
# full_data.replace({"accommadation_type_name": accommodation_type_dict},
# inplace=True)
# def country_to_hashcode(full_data):
# countries_code = set(full_data["origin_country_code"]).union(
# set(full_data["hotel_country_code"]))
#
# countries_code_dict = {k: v for v, k in enumerate(countries_code)}
# full_data.replace({"origin_country_code": countries_code_dict},
# inplace=True)
# full_data.replace({"hotel_country_code": countries_code_dict},
# inplace=True)
def calculate_canceling_fund_present(full_data):
temp = full_data[["cancellation_policy_code", "original_selling_amount", "estimated_stay_time"]]
res = temp.apply(lambda x: calc_canceling_fund(x['estimated_stay_time'], x['cancellation_policy_code'],
x['original_selling_amount']), axis=1)
# res_ = temp.apply(lambda x: calc_canceling_fund(x['estimated_stay_time'], x['cancellation_policy_code'],
# x['original_selling_amount'], False), axis=1)
full_data["avg_cancelling_fund_percent"] = res
# full_data["total_cancelling_fund"] = res_
def calc_booking_checking(full_data):
checking_date = pd.to_datetime(full_data["checkin_date"])
booking_date = | pd.to_datetime(full_data["booking_datetime"]) | pandas.to_datetime |
from warnings import filterwarnings
filterwarnings("ignore")
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.corpus import stopwords
import nltk
import json
import urllib
import re
import pandas as pd
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
#If you get stopwords error pleasew uncomment the following two lines.
# nltk.download('stopwords')
# nltk.download('wordnet')
def get_compund_score(text):
score = analyzer.polarity_scores(text)
str(text)
return score['compound']
def preprocess(sentence):
sentence=str(sentence)
sentence = sentence.lower()
sentence=sentence.replace('{html}',"")
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', sentence)
rem_url=re.sub(r'http\S+', '',cleantext)
rem_num = re.sub('[0-9]+', '', rem_url)
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(rem_num)
filtered_words = [w for w in tokens if len(w) > 2 if not w in stopwords.words('english')]
stem_words=[stemmer.stem(w) for w in filtered_words]
lemma_words=[lemmatizer.lemmatize(w) for w in stem_words]
return " ".join(filtered_words)
def get_score_by_comment_id(id):
x=[]
html = urllib.request.urlopen(
'https://hacker-news.firebaseio.com/v0/item/' + str(id) + '.json')
x.append(json.loads(html.read()))
df = pd.DataFrame.from_dict(x)
df_comments = df[df['type'] == 'comment']
df_comments['clean_text']= df_comments['text'].map(lambda s:preprocess(s))
df_comments['clean_vader_score'] = df_comments['clean_text'].apply(
get_compund_score)
return (df_comments['clean_vader_score'][0])
def get_score_for_entries(entries):
data = []
max_entries = 25
count = 0
for id in entries:
html = urllib.request.urlopen('https://hacker-news.firebaseio.com/v0/item/' + str(id) + '.json')
data.append(json.loads(html.read()))
count += 1
if count > max_entries:
break
data = [i for i in data if i is not None]
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import dstk
import random
# Create test data
# Class creation test dataset
df_create = | pd.DataFrame() | pandas.DataFrame |
__version__ = '0.0.1'
__author__ = '<NAME>, 2020'
import re
import numpy as np
import pandas as pd
fields = {
'AC': 'activating compound',
'AP': 'application',
'CF': 'cofactor',
'CL': 'cloned',
'CR': 'crystallization',
'EN': 'engineering',
'EXP': 'expression',
'GI': 'general information on enzyme',
'GS': 'general stability',
'IC50': 'IC-50 Value',
'ID': 'EC-class',
'IN': 'inhibitors',
'KKM': 'Kcat/KM-Value substrate in {...}',
'KI': 'Ki-value, inhibitor in {...}',
'KM': 'KM-value, substrate in {...}',
'LO': 'localization',
'ME': 'metals/ions',
'MW': 'molecular weight',
'NSP': 'natural substrates/products reversibilty information in {...}',
'OS': 'oxygen stability',
'OSS': 'organic solvent stability',
'PHO': 'pH-optimum',
'PHR': 'pH-range',
'PHS': 'pH stability',
'PI': 'isoelectric point',
'PM': 'posttranslation modification',
'PR': 'protein',
'PU': 'purification',
'RE': 'reaction catalyzed',
'RF': 'references',
'REN': 'renatured',
'RN': 'accepted name (IUPAC)',
'RT': 'reaction type',
'SA': 'specific activity',
'SN': 'synonyms',
'SP': 'substrates/products, reversibilty information in {...}',
'SS': 'storage stability',
'ST': 'source/tissue',
'SU': 'subunits',
'SY': 'systematic name',
'TN': 'turnover number, substrate in {...}',
'TO': 'temperature optimum',
'TR': 'temperature range',
'TS': 'temperature stability'
}
units = {
'KM': 'mM',
'KI': 'mM',
'TN': '$s^{-1}$',
'SA': '$µmol.min^{-1}.mg^{-1}$',
'KKM': '$mM^{-1}.s^{-1}$',
'TO': '${}^oC$',
'TR': '${}^oC$',
'TS': '${}^oC$',
'MW': 'Da'
}
class BRENDA:
"""
Provides methods to parse the BRENDA database (https://www.brenda-enzymes.org/)
"""
def __init__(self, path_to_database):
with open(path_to_database, encoding="iso-8859-1") as file:
self.__data = file.read()
self.__ec_numbers = [ec.group(1)
for ec in re.finditer('(?<=ID\\t)(.*)(?=\\n)', self.__data)]
self.__reactions = self.__initializeReactionObjects()
self.__copyright = ("""Copyrighted by <NAME>, Techn. University
Braunschweig, GERMANY. Distributed under the License as stated
at http:/www.brenda-enzymes.org""")
self.__fields = fields
self.__units = units
def _repr_html_(self):
"""This method is executed automatically by Jupyter to print html!"""
return """
<table>
<tr>
<td><strong>Number of Enzymes</strong></td><td>{n_ec}</td>
</tr><tr>
<td><strong>BRENDA copyright</strong></td><td>{cr}</td>
</tr><tr>
<td><strong>Parser version</strong></td><td>{parser}</td>
</tr><tr>
<td><strong>Author</strong></td><td>{author}</td>
</tr>
</table>
""".format(n_ec=len(self.__reactions),
cr=self.__copyright,
parser=__version__,
author=__author__)
def __getRxnData(self):
rxn_data = [r.group(0)
for r in re.finditer('ID\\t(.+?)///', self.__data, flags=re.DOTALL)]
del self.__data
return rxn_data
def __initializeReactionObjects(self):
return [Reaction(datum) for datum in self.__getRxnData()]
@property
def fields(self):
return self.__fields
@property
def units(self):
return self.__units
@property
def reactions(self):
return ReactionList(self.__reactions)
@property
def copyright(self):
return self.__copyright
def getOrganisms(self) -> list:
"""
Get list of all represented species in BRENDA
"""
species = set()
for rxn in self.__reactions:
species.update([s['name'] for s in rxn.proteins.values()])
species.remove('')
species = list(set([s for s in species if 'no activity' not in s]))
return species
def getKMcompounds(self) -> list:
"""
Get list of all substrates in BRENDA with KM data
"""
cpds = set()
for rxn in self.__reactions:
cpds.update([s for s in rxn.KMvalues.keys()])
try:
cpds.remove('')
except Exception:
pass
return list(cpds)
class ReactionList(list):
# Make ReactionList slicing return ReactionList object
def __init__(self, seq=None):
super(self.__class__, self).__init__(seq)
def __getslice__(self, start, stop):
return self.__class__(super(self.__class__, self).__getslice__(start, stop))
def __getitem__(self, key):
if isinstance(key, slice):
return self.__class__(super(self.__class__, self).__getitem__(key))
else:
return super(self.__class__, self).__getitem__(key)
def get_by_id(self, id: str):
try:
return [rxn for rxn in self if rxn.ec_number == id][0]
except Exception:
raise ValueError(f'Enzyme with EC {id} not found in database')
def get_by_name(self, name: str):
try:
return [rxn for rxn in self if rxn.name.lower() == name.lower()][0]
except Exception:
raise ValueError(f'Enzyme {name} not found in database')
def filter_by_organism(self, species: str):
def is_contained(p, S): return any([p in s.lower() for s in S])
return self.__class__(
[rxn for rxn in self if is_contained(species.lower(), rxn.organisms)]
)
class EnzymeDict(dict):
def filter_by_organism(self, species: str):
# filtered_dict = {species: []}
filtered_dict = {}
def is_contained(p, S): return any([p in s for s in S])
for k in self.keys():
filtered_values = [v for v in self[k] if is_contained(species, v['species'])]
if len(filtered_values) > 0:
filtered_dict[k] = filtered_values
return self.__class__(filtered_dict)
def get_values(self):
return [v['value'] for k in self.keys() for v in self[k]]
class EnzymePropertyDict(EnzymeDict):
def filter_by_compound(self, compound: str):
try:
return self.__class__({compound: self[compound]})
except Exception:
return self.__class__({compound: []})
# raise KeyError(f'Invalid compound, valid compounds are: {", ".join(list(self.keys()))}')
class EnzymeConditionDict(EnzymeDict):
def filter_by_condition(self, condition: str):
try:
return self.__class__({condition: self[condition]})
except Exception:
raise KeyError(f'Invalid condition, valid conditions are: {", ".join(list(self.keys()))}')
class Reaction:
def __init__(self, reaction_data):
self.__reaction_data = reaction_data
self.__ec_number = self.__extractRegexPattern('(?<=ID\t)(.*)(?=\n)')
self.__systematic_name = self.__extractRegexPattern('(?<=SN\t)(.*)(?=\n)')
self.__name = self.__extractRegexPattern('(?<=RN\t)(.*)(?=\n)').capitalize()
self.__mechanism_str = (self.__extractRegexPattern('(?<=RE\t)(.*)(?=\n\nREACTION_)',
dotall=True).replace('=', '<=>')
.replace('\n\t', ''))
self.__reaction_type = self.__extractRegexPattern('(?<=RT\t)(.*)(?=\n)').capitalize()
self.__proteins = self.__getSpeciesDict()
self.__references = self.__getReferencesDict()
def __getSpeciesDict(self) -> dict:
"""
Returns a dict listing all proteins for given EC number
"""
species = {}
lines = self.__getDataLines('PR')
for line in lines:
res = self.__extractDataLineInfo(line)
species_name, protein_ID = self.__splitSpeciesFromProteinID(res['value'])
species[res['species'][0]] = {'name': species_name,
'proteinID': protein_ID,
'refs': res['refs']}
return species
def __getReferencesDict(self):
"""
Returns a dict listing the bibliography cited for the given EC number
"""
references = {}
lines = self.__getDataLines('RF')
for line in lines:
line = self.__removeTabs(line)
line, refs = self.__extractDataField(line, ('<', '>'))
references[refs[0]] = line
return references
def __printReactionSummary(self):
data = {'EC number': self.__ec_number,
'Name': self.__name,
'Systematic name': self.__systematic_name,
'Reaction type': self.__reaction_type,
'Mechanism': self.__mechanism}
return | pd.DataFrame.from_dict(data, orient='index', columns=['']) | pandas.DataFrame.from_dict |
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
| tm.assert_index_equal(with_prefix.columns, expected) | pandas._testing.assert_index_equal |
import numpy as np
import pandas as pd
from mip import Model, xsum, minimize, CONTINUOUS, OptimizationStatus, BINARY, CBC, GUROBI, LP_Method
class InterfaceToSolver:
"""A wrapper for the mip model class, allows interaction with mip using pd.DataFrames."""
def __init__(self, solver_name='CBC'):
self.variables = {}
self.linear_mip_variables = {}
self.solver_name = solver_name
if solver_name == 'CBC':
self.mip_model = Model("market", solver_name=CBC)
self.linear_mip_model = Model("market", solver_name=CBC)
elif solver_name == 'GUROBI':
self.mip_model = Model("market", solver_name=GUROBI)
self.linear_mip_model = Model("market", solver_name=GUROBI)
else:
raise ValueError("Solver '{}' not recognised.")
self.mip_model.verbose = 0
self.mip_model.solver.set_mip_gap_abs(1e-10)
self.mip_model.solver.set_mip_gap(1e-20)
self.mip_model.lp_method = LP_Method.DUAL
self.linear_mip_model.verbose = 0
self.linear_mip_model.solver.set_mip_gap_abs(1e-10)
self.linear_mip_model.solver.set_mip_gap(1e-20)
self.linear_mip_model.lp_method = LP_Method.DUAL
def add_variables(self, decision_variables):
"""Add decision variables to the model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1],
... 'lower_bound': [0.0, 0.0],
... 'upper_bound': [6.0, 1.0],
... 'type': ['continuous', 'binary']})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
The underlying mip_model should now have 2 variables.
>>> print(si.mip_model.num_cols)
2
The first one should have the following properties.
>>> print(si.mip_model.var_by_name('0').var_type)
C
>>> print(si.mip_model.var_by_name('0').lb)
0.0
>>> print(si.mip_model.var_by_name('0').ub)
6.0
The second one should have the following properties.
>>> print(si.mip_model.var_by_name('1').var_type)
B
>>> print(si.mip_model.var_by_name('1').lb)
0.0
>>> print(si.mip_model.var_by_name('1').ub)
1.0
"""
# Create a mapping between the nempy level names for variable types and the mip representation.
variable_types = {'continuous': CONTINUOUS, 'binary': BINARY}
# Add each variable to the mip model.
for variable_id, lower_bound, upper_bound, variable_type in zip(
list(decision_variables['variable_id']), list(decision_variables['lower_bound']),
list(decision_variables['upper_bound']), list(decision_variables['type'])):
self.variables[variable_id] = self.mip_model.add_var(lb=lower_bound, ub=upper_bound,
var_type=variable_types[variable_type],
name=str(variable_id))
self.linear_mip_variables[variable_id] = self.linear_mip_model.add_var(lb=lower_bound, ub=upper_bound,
var_type=variable_types[
variable_type],
name=str(variable_id))
def add_sos_type_2(self, sos_variables, sos_id_columns, position_column):
"""Add groups of special ordered sets of type 2 two the mip model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> sos_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'sos_id': ['A', 'A', 'A', 'B', 'B', 'B'],
... 'position': [0, 1, 2, 0, 1, 2]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_sos_type_2(sos_variables, 'sos_id', 'position')
"""
# Function that adds sets to mip model.
def add_sos_vars(sos_group):
self.mip_model.add_sos(list(zip(sos_group['vars'], sos_group[position_column])), 2)
# For each variable_id get the variable object from the mip model
sos_variables['vars'] = sos_variables['variable_id'].apply(lambda x: self.variables[x])
# Break up the sets based on their id and add them to the model separately.
sos_variables.groupby(sos_id_columns).apply(add_sos_vars)
# This is a hack to make sure mip knows there are binary constraints.
self.mip_model.add_var(var_type=BINARY, obj=0.0)
def add_sos_type_1(self, sos_variables):
# Function that adds sets to mip model.
def add_sos_vars(sos_group):
self.mip_model.add_sos(list(zip(sos_group['vars'], [1.0 for i in range(len(sos_variables['vars']))])), 1)
# For each variable_id get the variable object from the mip model
sos_variables['vars'] = sos_variables['variable_id'].apply(lambda x: self.variables[x])
# Break up the sets based on their id and add them to the model separately.
sos_variables.groupby('sos_id').apply(add_sos_vars)
# This is a hack to make mip knows there are binary constraints.
self.mip_model.add_var(var_type=BINARY, obj=0.0)
def add_objective_function(self, objective_function):
"""Add the objective function to the mip model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> objective_function = pd.DataFrame({
... 'variable_id': [0, 1, 3, 4, 5],
... 'cost': [1.0, 2.0, -1.0, 5.0, 0.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_objective_function(objective_function)
>>> print(si.mip_model.var_by_name('0').obj)
1.0
>>> print(si.mip_model.var_by_name('5').obj)
0.0
"""
objective_function = objective_function.sort_values('variable_id')
objective_function = objective_function.set_index('variable_id')
obj = minimize(xsum(objective_function['cost'][i] * self.variables[i] for i in
list(objective_function.index)))
self.mip_model.objective = obj
self.linear_mip_model.objective = obj
def add_constraints(self, constraints_lhs, constraints_type_and_rhs):
"""Add constraints to the mip model.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> print(si.mip_model.constr_by_name('1'))
1: +1.0 0 +0.5 1 <= 10.0
>>> print(si.mip_model.constr_by_name('2'))
2: +1.0 3 +2.0 4 = 20.0
"""
constraints_lhs = constraints_lhs.groupby(['constraint_id', 'variable_id'], as_index=False).agg(
{'coefficient': 'sum'})
rows = constraints_lhs.groupby(['constraint_id'], as_index=False)
# Make a dictionary so constraint rhs values can be accessed using the constraint id.
rhs = dict(zip(constraints_type_and_rhs['constraint_id'], constraints_type_and_rhs['rhs']))
# Make a dictionary so constraint type can be accessed using the constraint id.
enq_type = dict(zip(constraints_type_and_rhs['constraint_id'], constraints_type_and_rhs['type']))
var_ids = constraints_lhs['variable_id'].to_numpy()
vars = np.asarray(
[self.variables[k] if k in self.variables.keys() else None for k in range(0, max(var_ids) + 1)])
coefficients = constraints_lhs['coefficient'].to_numpy()
for row_id, row in rows.indices.items():
# Use the variable_ids to get mip variable objects present in the constraints
lhs_variables = vars[var_ids[row]]
# Use the positions of the non nan values to the lhs coefficients.
lhs = coefficients[row]
# Multiply and the variables by their coefficients and sum to create the lhs of the constraint.
exp = lhs_variables * lhs
exp = exp.tolist()
exp = xsum(exp)
# Add based on inequality type.
if enq_type[row_id] == '<=':
new_constraint = exp <= rhs[row_id]
elif enq_type[row_id] == '>=':
new_constraint = exp >= rhs[row_id]
elif enq_type[row_id] == '=':
new_constraint = exp == rhs[row_id]
else:
raise ValueError("Constraint type not recognised should be one of '<=', '>=' or '='.")
self.mip_model.add_constr(new_constraint, name=str(row_id))
self.linear_mip_model.add_constr(new_constraint, name=str(row_id))
def optimize(self):
"""Optimize the mip model.
If an optimal solution cannot be found and the investigate_infeasibility flag is set to True then remove
constraints until a feasible solution is found.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.optimize()
>>> decision_variables['value'] = si.get_optimal_values_of_decision_variables(decision_variables)
>>> print(decision_variables)
variable_id lower_bound upper_bound type value
0 0 0.0 5.0 continuous 0.0
1 1 0.0 5.0 continuous 0.0
2 2 0.0 10.0 continuous 0.0
3 3 0.0 10.0 continuous 10.0
4 4 0.0 5.0 continuous 5.0
5 5 0.0 5.0 continuous 0.0
"""
status = self.mip_model.optimize()
if status != OptimizationStatus.OPTIMAL:
# Attempt find constraint causing infeasibility.
print('Model infeasible attempting to find problem constraint.')
con_index = find_problem_constraint(self.mip_model)
print('Couldn\'t find an optimal solution, but removing con {} fixed INFEASIBLITY'.format(con_index))
raise ValueError('Linear program infeasible')
def get_optimal_values_of_decision_variables(self, variable_definitions):
"""Get the optimal values for each decision variable.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.optimize()
>>> decision_variables['value'] = si.get_optimal_values_of_decision_variables(decision_variables)
>>> print(decision_variables)
variable_id lower_bound upper_bound type value
0 0 0.0 5.0 continuous 0.0
1 1 0.0 5.0 continuous 0.0
2 2 0.0 10.0 continuous 0.0
3 3 0.0 10.0 continuous 10.0
4 4 0.0 5.0 continuous 5.0
5 5 0.0 5.0 continuous 0.0
"""
values = variable_definitions['variable_id'].apply(lambda x: self.mip_model.var_by_name(str(x)).x,
self.mip_model)
return values
def get_optimal_values_of_decision_variables_lin(self, variable_definitions):
values = variable_definitions['variable_id'].apply(lambda x: self.linear_mip_model.var_by_name(str(x)).x,
self.mip_model)
return values
def get_slack_in_constraints(self, constraints_type_and_rhs):
"""Get the slack values in each constraint.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 2, 2],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 0.5, 1.0, 2.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'type': ['<=', '='],
... 'rhs': [10.0, 20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.optimize()
>>> constraints_type_and_rhs['slack'] = si.get_slack_in_constraints(constraints_type_and_rhs)
>>> print(constraints_type_and_rhs)
constraint_id type rhs slack
0 1 <= 10.0 10.0
1 2 = 20.0 0.0
"""
slack = constraints_type_and_rhs['constraint_id'].apply(lambda x: self.mip_model.constr_by_name(str(x)).slack,
self.mip_model)
return slack
def price_constraints(self, constraint_ids_to_price):
"""For each constraint_id find the marginal value of the constraint.
This is done by incrementing the constraint by a value of 1.0 and re-optimizing the model, the marginal cost
of the constraint is increase in the objective function value between model runs.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... 'upper_bound': [5.0, 5.0, 10.0, 10.0, 5.0, 5.0],
... 'type': ['continuous', 'continuous', 'continuous',
... 'continuous', 'continuous', 'continuous']})
>>> objective_function = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'cost': [1.0, 3.0, 10.0, 8.0, 9.0, 7.0]})
>>> constraints_lhs = pd.DataFrame({
... 'constraint_id': [1, 1, 1, 1],
... 'variable_id': [0, 1, 3, 4],
... 'coefficient': [1.0, 1.0, 1.0, 1.0]})
>>> constraints_type_and_rhs = pd.DataFrame({
... 'constraint_id': [1],
... 'type': ['='],
... 'rhs': [20.0]})
>>> si = InterfaceToSolver()
>>> si.add_variables(decision_variables)
>>> si.add_constraints(constraints_lhs, constraints_type_and_rhs)
>>> si.add_objective_function(objective_function)
>>> si.optimize()
>>> si.linear_mip_model.optimize()
<OptimizationStatus.OPTIMAL: 0>
>>> prices = si.price_constraints([1])
>>> print(prices)
{1: 8.0}
>>> decision_variables['value'] = si.get_optimal_values_of_decision_variables(decision_variables)
>>> print(decision_variables)
variable_id lower_bound upper_bound type value
0 0 0.0 5.0 continuous 5.0
1 1 0.0 5.0 continuous 5.0
2 2 0.0 10.0 continuous 0.0
3 3 0.0 10.0 continuous 10.0
4 4 0.0 5.0 continuous 0.0
5 5 0.0 5.0 continuous 0.0
"""
costs = {}
for id in constraint_ids_to_price:
costs[id] = self.linear_mip_model.constr_by_name(str(id)).pi
return costs
def update_rhs(self, constraint_id, violation_degree):
constraint = self.linear_mip_model.constr_by_name(str(constraint_id))
constraint.rhs += violation_degree
def update_variable_bounds(self, new_bounds):
for variable_id, lb, ub in zip(new_bounds['variable_id'], new_bounds['lower_bound'], new_bounds['upper_bound']):
self.mip_model.var_by_name(str(variable_id)).lb = lb
self.mip_model.var_by_name(str(variable_id)).ub = ub
def disable_variables(self, variables):
for var_id in variables['variable_id']:
var = self.linear_mip_model.var_by_name(str(var_id))
var.lb = 0.0
var.ub = 0.0
def find_problem_constraint(base_prob):
cons = []
test_prob = base_prob.copy()
for con in [con.name for con in base_prob.constrs]:
[test_prob.remove(c) for c in test_prob.constrs if c.name == con]
status = test_prob.optimize()
cons.append(con)
if status == OptimizationStatus.OPTIMAL:
return cons
return []
def create_lhs(constraints, decision_variables, join_columns):
"""Combine constraints with general definitions of lhs with variables to give an explicit lhs definition.
Both constraints and decision_variables can have a coefficient, the coefficient use in the actual lhs will
be the product of the two coefficients.
Examples
--------
>>> decision_variables = pd.DataFrame({
... 'variable_id': [0, 1, 2, 3, 4, 5],
... 'region': ['NSW', 'NSW', 'VIC',
... 'VIC', 'VIC', 'VIC'],
... 'service': ['energy', 'energy','energy',
... 'energy','energy','energy',],
... 'coefficient': [0.9, 0.8, 1.0, 0.95, 1.1, 1.01]})
>>> constraints = pd.DataFrame({
... 'constraint_id': [1, 2],
... 'region': ['NSW', 'VIC'],
... 'service': ['energy', 'energy'],
... 'coefficient': [1.0, 1.0]})
>>> lhs = create_lhs(decision_variables, constraints, ['region', 'service'])
>>> print(lhs)
constraint_id variable_id coefficient
0 1 0 0.90
1 1 1 0.80
2 2 2 1.00
3 2 3 0.95
4 2 4 1.10
5 2 5 1.01
Parameters
----------
constraints : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
join_columns one or more columns defining the types of variables that should
be on the lhs (as `str`)
coefficient the constraint level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
decision_variables : pd.DataFrame
============= ===============================================================
Columns: Description:
variable_id the unique identifier of the variable (as `np.int64`)
join_columns one or more columns defining the types of variables that should
be on the lhs (as `str`)
coefficient the variable level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
Returns
-------
lhs : pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
variable_id the unique identifier of the variable (as `np.int64`)
coefficient the constraint level contribution to the lhs coefficient (as `np.float64`)
============= ===============================================================
"""
constraints = pd.merge(constraints, decision_variables, 'inner', on=join_columns)
constraints['coefficient'] = constraints['coefficient_x'] * constraints['coefficient_y']
lhs = constraints.loc[:, ['constraint_id', 'variable_id', 'coefficient']]
return lhs
def create_mapping_of_generic_constraint_sets_to_constraint_ids(constraints, market_constraints):
"""Combine generic constraints and fcas market constraints to get the full set of generic constraints.
Returns non if there are no generic of fcas market constraints.
Examples
--------
>>> constraints = {
... 'generic': pd.DataFrame({
... 'constraint_id': [0, 1],
... 'set': ['A', 'B']})
... }
>>> market_constraints = {
... 'fcas': pd.DataFrame({
... 'constraint_id': [2, 3],
... 'set': ['C', 'D']})
... }
>>> generic_constraints = create_mapping_of_generic_constraint_sets_to_constraint_ids(
... constraints, market_constraints)
>>> print(generic_constraints)
constraint_id set
0 0 A
1 1 B
0 2 C
1 3 D
Parameters
----------
constraints : dict{str : pd.DataFrame}
The pd.DataFrame stored under the key 'generic', if it exists, should have the structure.
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
market_constraints : dict{str : pd.DataFrame}
The pd.DataFrame stored under the key 'fcas', if it exists, should have the structure.
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
Returns
-------
pd.DataFrame or None
If pd.DataFrame
============= ===============================================================
Columns: Description:
constraint_id the unique identifier of the constraint (as `np.int64`)
set the constraint set that the id refers to (as `str`)
============= ===============================================================
"""
generic_constraints = []
if 'generic' in constraints:
generic_constraints.append(constraints['generic'].loc[:, ['constraint_id', 'set']])
if 'fcas' in market_constraints:
generic_constraints.append(market_constraints['fcas'].loc[:, ['constraint_id', 'set']])
if len(generic_constraints) > 0:
return | pd.concat(generic_constraints) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from xgboost import XGBRegressor, plot_importance
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler, Normalizer
from sklearn.feature_selection import SelectKBest, f_regression
from model import ElasticNetModel, PCAModel, LassoModel, KernelPCAModel, RidgeModel, SimulatedAnnealing
from tools.train import xgb_parameters_search
# 导入数据并删除无意义的变量
def load_data(dir_path, drop_list=['id', '时间']):
data = pd.read_excel(dir_path)
for col in drop_list:
data.drop(col, axis=1, inplace=True)
return data
def nan_data_rate(df, n, ascending_=False, origin=True):
"""
【Function】缺失率统计函数 nan_data_rate
:param df: 需要处理的数据框
:param n: 显示变量个数
:param ascending_: 按缺失程度上升还是下降表示
:param origin: 是否显示无缺失值失变量
:return: 返回前n个缺失变量缺失率
"""
if n > len(df.columns): # 判断显示个数是否多于变量总数,如果超过则默认显示全部变量
print('显示变量个数多于变量总数%i,将显示全部变量' % (len(df.columns)))
n = len(df.columns)
na_rate = df.isnull().sum() / len(df) * 100 # 统计各变量缺失率
if origin: # 判断为真则显示无缺失值的变量
na_rate = na_rate.sort_values(ascending=ascending_)
missing_data = | pd.DataFrame({'Missing_Ratio': na_rate}) | pandas.DataFrame |
import numpy as np
import pandas as pd
def declat_mine(df, minsup):
frequent = {'support': [], 'itemset': []}
prefix = []
for col in df.columns:
d_col = set(df[df[col] == 0].index)
support = df.shape[0] - len(d_col)
if support >= minsup:
prefix.append((set(col), d_col, support))
declat(prefix, minsup, frequent)
return | pd.DataFrame(frequent) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
from tqdm import tqdm
filepath_hist = 'H:/cloud/cloud_data/Projects/DISCHARGEMaster/data/discharge_master/discharge_master_01092020/discharge_sources_01092020/discharge_hist_01092020.pkl'
filepath_dublicates = 'H:/cloud/cloud_data/Projects/DISCHARGEMaster/data/discharge_master/discharge_master_01092020/tmp/discharge_dublicates.pkl'
# Load data
df_hist = | pd.read_pickle(filepath_hist) | pandas.read_pickle |
#
# extract and plot GMSL rate vs T values from AR5 and SROCC
#
# <NAME> 2021
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import hadcrut5
import pickle
from scipy.stats.stats import pearsonr
#--------------read google sheet:
sheet_id = '1b2CXW2D9ZFfJ4HDD42WpccF8xSzGVzzsEGie5yZBHCw'
sheet_name = "Sheet1"
url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={sheet_name}"
#print(url)
#url = 'c:/users/ag/downloads/data.csv'
df = | pd.read_csv(url, error_bad_lines=False) | pandas.read_csv |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning) # to surpress future warnings
import pandas as pd
import sys
import textstat
import numpy as numpy
import math
import gensim
from pprint import pprint
from string import ascii_lowercase
#import Use_NN as nn
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split , KFold , LeaveOneOut , LeavePOut , ShuffleSplit , StratifiedKFold , GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestRegressor , VotingClassifier , RandomTreesEmbedding, ExtraTreesClassifier , RandomForestClassifier , AdaBoostClassifier , GradientBoostingClassifier
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.svm import LinearSVC , SVC
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import TheilSenRegressor , SGDClassifier
from sklearn.naive_bayes import GaussianNB , BernoulliNB, MultinomialNB , ComplementNB
from sklearn.linear_model import LogisticRegressionCV , PassiveAggressiveClassifier, HuberRegressor
from sklearn.metrics import f1_score , recall_score , accuracy_score , precision_score , jaccard_score , balanced_accuracy_score, confusion_matrix
from mlxtend.plotting import plot_decision_regions, plot_confusion_matrix
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsClassifier , RadiusNeighborsClassifier
import nltk
from nltk.tokenize import RegexpTokenizer
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from imblearn.pipeline import make_pipeline
from collections import Counter
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
import string
import xgboost as xgb
from pushover import Client
removeUnWanted = re.compile('[\W_]+') #strip off the damn characters
isClassify = False #to run classification on test data
isCreationMode = False
isWord2Vec = False
isEmbeddings = True
isBOW = False
doc2VecFileName ="doc2vec"
useSMOTE = True
searchParams = False
STATE = 21
#logistic , nb , svm , xgboost, rf
DETERMINER = 'xgboost'
embedType = 'bert' #or bert
# Take any text - and converts it into a vector. Requires the trained set (original vector) and text we pan to infer (shall be known as test)
def vectorize(train,test):
token = RegexpTokenizer(r'[a-zA-Z0-9]+')
vectorizer = CountVectorizer(ngram_range=(2,3),min_df=0, lowercase=True, analyzer='char_wb',tokenizer = token.tokenize, stop_words='english') #this is working
#vectorizer = CountVectorizer(min_df=0, lowercase=True)
# vectorizer = TfidfTransformer(use_idf=True,smooth_idf=True)
x = vectorizer.fit(train)
x = vectorizer.transform(test)
return x
def loadEmbeddings(filename):
embeddings = numpy.load(filename,allow_pickle=True)
print(embeddings.shape)
return embeddings
# Pandas Method to read our CSV to make it easier
def read_csv(filepath):
#parseDate = ['review_date']
#dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
#colName = ['customer_id','product_category', 'review_id', 'star_rating','helpful_votes','total_votes','vine','verified_purchase','review_body','review_date']
colName = ['ID','Comment','Prediction']
column_dtypes = {
'ID': 'uint8',
'Comment' : 'str',
'Prediction' : 'uint8'
}
#df_chunk = pd.read_csv(filepath, sep='\t', header=0, chunksize=500000, error_bad_lines=False,parse_dates=parseDate, dtype=column_dtypes, usecols=colName, date_parser=dateparse)
df_chunk = pd.read_csv(filepath, sep=',', header=0, dtype=column_dtypes,usecols=colName,encoding = "ISO-8859-1")
#df_chuck = df_chuck.fillna(0)
return df_chunk
def read_csv2(filepath):
#parseDate = ['review_date']
#dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
#colName = ['customer_id','product_category', 'review_id', 'star_rating','helpful_votes','total_votes','vine','verified_purchase','review_body','review_date']
colName = ['ID','Comment']
column_dtypes = {
'ID': 'uint8',
'Comment' : 'str'
}
#df_chunk = pd.read_csv(filepath, sep='\t', header=0, chunksize=500000, error_bad_lines=False,parse_dates=parseDate, dtype=column_dtypes, usecols=colName, date_parser=dateparse)
df_chunk = pd.read_csv(filepath, sep=',', header=0, dtype=column_dtypes,usecols=colName)
#df_chuck = df_chuck.fillna(0)
return df_chunk
#Classify Sarcasm Based on the Neural Network That Was Trained for it - TO DO
def detectSarcasm(text):
#text = re.sub('[^A-Za-z0-9]+', '', text)
# print(text)
# return ("3")
return nn.use_neural_network(text)
def calcSyllableCount(text):
return textstat.syllable_count(text, lang='en_US')
def calcLexCount(text):
return textstat.lexicon_count(text)
def commentCleaner(df):
df['Comment'] = df['Comment'].str.lower()
# df['Comment'] = df['Comment'].str.replace("[^abcdefghijklmnopqrstuvwxyz1234567890' ]", "")
def get_good_tokens(sentence):
replaced_punctation = list(map(lambda token: re.sub('[^0-9A-Za-z!?]+', '', token), sentence))
removed_punctation = list(filter(lambda token: token, replaced_punctation))
return removed_punctation
# Converts to POS Tags that can be used
def tag(sent):
words=nltk.word_tokenize(sent)
tagged=nltk.pos_tag(words)
return tagged
#Checks for Nouns , To Implement the method found in Cindy Chung's Physc Paper (Search for <NAME> and <NAME> and cite here)
def checkForNouns(text,method='None'):
counter = 0
counter2 = 0
if "aa" in text: #Dummy variable to inform that it is outside , so we dont' track them
return counter
else:
wrb = tag(text)
index = 0
for row in wrb:
POSTag = wrb[index][1]
# print(POSTag)
if (POSTag in "IN") or (POSTag in "PRP") or (POSTag in "DT") or (POSTag in "CC") or (POSTag in "VB") or (POSTag in "VB") or (POSTag in "PRP$") or (POSTag is "RB"):
counter = counter+1
else:
counter2 = counter2+1
index = index + 1
if "function" in method:
return counter
elif "ratio" in method:
return abs(counter2/counter)
else:
return counter2
#Given an un-seen dataframe and [TO DO - the column] , it will convert it into Matrix
def convertToVectorFromDataframe(df):
matrix = []
targets = list(df['tokenized_sents'])
for i in range(len(targets)):
matrix.append(model.infer_vector(targets[i])) # A lot of tutorials use the model directly , we will do some improvement over it
targets_out = numpy.asarray(matrix)
return (matrix)
#A simple method which basically takes in the tokenized_sents and the tag and starts do it.
def make_tagged_document(df,train):
# taggeddocs = []
for doc, tanda in zip(df['tokenized_sents'], train):
yield(TaggedDocument(doc,[tanda]))
def calculateScoresVariousAlphaValues(predicted_data,truth_data,threshold_list=[0.00,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,0.99,1.00]):
for i in threshold_list:
squarer = (lambda x: 1 if x>=i else 0)
fucd = numpy.vectorize(squarer)
vfunc = fucd(predicted_data)
f1score = f1_score(y_true=truth_data,y_pred=vfunc)
print(str(i)+","+str(perf_measure(truth_data,vfunc)))
#print(confusion_matrix(vfunc, truth_data))
#print(str(i)+","+ str(f1score))
# Creates a Doc2Vec Model by giving an input of documents [String]. It's much of an easier way. It then saves to disk , so it can be used later :)
def createDoc2VecModel(documents,tag):
docObj = list(make_tagged_document(documents,tag)) # document that we will use to train our model for
model = Doc2Vec(documents=docObj,vector_size=500,
# window=2,
alpha=.025,
epochs=100,
min_alpha=0.00025,
sample=0.335,
ns_exponent=0.59,
dm_concat=0,
dm_mean=1,
# negative=2,
seed=10000,
min_count=2,
dm=0,
workers=4)
model.save(doc2VecFileName) #our file name
return model
# Loads Doc2Vec model based on the filename given
def loadDoc2VecModel(filepath=doc2VecFileName):
model = Doc2Vec.load(filepath)
return model
# Implements Class Weight to ensure that fair distribution of the classes
def get_class_weights(y):
counter = Counter(y)
majority = max(counter.values())
return {cls: round(float(majority)/float(count), 2) for cls, count in counter.items()}
# Selects a Classifier to perform the task
def selectClassifier(weights='balanced',classifymethod='logistic'):
#classifier = RandomForestRegressor(n_estimators=100)
#clf = svm.NuSVC(kernel='rbf',decision_function_shape='ovo',probability=True)
#classifier = LinearSVC(random_state=21, tol=1e-4,C=1000,fit_intercept=False)
if 'logistic' in classifymethod:
#cy = LogisticRegression(fit_intercept=True, max_iter=8000,solver='newton-cg',random_state=STATE,class_weight=weights)
cy = LogisticRegression(C=5.0, fit_intercept=False, max_iter=100, penalty= 'l2', solver='sag', tol=0.0001)
return cy
elif 'nb' in classifymethod:
cy = GaussianNB()
return cy
elif 'xgboost' in classifymethod:
cy = xgb.XGBClassifier(colsample_bytree= 0.6, gamma= 2, max_depth= 5, min_child_weight= 5, n_estimators= 100, subsample= 0.8)
return cy
elif 'svm' in classifymethod:
cy = SVC(random_state=STATE,probability=True,C= 10, gamma= 0.001, max_iter= 500, tol= 0.001)
return cy
elif 'rf' in classifymethod:
cy = RandomForestClassifier(bootstrap = True, class_weight= 'balanced_subsample', criterion= 'entropy', max_depth= 8, max_features = 'log2', min_samples_split= 30, min_weight_fraction_leaf= 0.0, n_estimators = 300,random_state=STATE)
return cy
elif 'kn' in classifymethod:
cy = MLPClassifier(hidden_layer_sizes=50,learning_rate='adaptive',random_state=STATE,solver='lbfgs')
return cy
else:
return null
def gridParameters(classifyMethod):
if 'rf' in classifyMethod:
grid_param = {
'n_estimators': [100, 300, 500, 800, 900,1000],
'criterion': ['gini', 'entropy'],
'max_features': ['auto','sqrt','log2'],
'min_samples_split' : [2,4,8,10,15,30],
'class_weight': ['balanced','balanced_subsample'],
'min_weight_fraction_leaf' : [0.0,0.1,0.3,0.5],
'max_depth': [1,3,5,8,10,15,20],
'bootstrap': [True, False]
}
elif 'logistic' in classifyMethod:
grid_param = {
'penalty' : ['l2'],
'tol': [1e-4, 1e-5],
'C': [0.5,1.0,5.0],
'fit_intercept': [True,False],
'solver': ['newton-cg','lbfgs','sag'],
'max_iter': [100,200]
}
elif 'xgboost' in classifyMethod:
grid_param = {
'n_estimators': [100,200,600,800],
'min_child_weight': [1, 5, 10],
'gamma': [0.5, 1, 1.5, 2, 5],
'subsample': [0.6, 0.8, 1.0],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [3, 4, 5]
}
elif 'svm' in classifyMethod:
grid_param = {
# 0.01, 0.1, 1, 10,100,1000,3000 , 1e-4, 1e-5, 5e-4 , 5e-5,5e-10 , 500,1000,8000
'C' : [0.001,0.1,1,10,100,1000,3000],
'tol': [1e-3,1e-4,1e-5,5e-4,5e-5,5e-10],
'max_iter': [100,500,1000],
'gamma': [0.001,0.005,0.010],
}
return grid_param
def getChars(s):
count = lambda l1,l2: sum([1 for x in l1 if x in l2])
return (count(s,set(string.punctuation)))
def mergeMatrix(matrixa,matrixb):
print(matrixa.shape)
print(matrixb.shape)
print(matrixb)
return(numpy.concatenate((matrixa, matrixb[:,None]), axis=1))
def w2v_preprocessing(df):
df['Comment'] = df['Comment'].str.lower()
df['nouns'] = df['Comment'].apply(checkForNouns,'function')
df['tokenized_sents'] = df.apply(lambda row: nltk.word_tokenize(row['Comment']), axis=1)
df['uppercase'] = df['Comment'].str.findall(r'[A-Z]').str.len() # get upper case
df['punct'] = df['Comment'].apply(getChars)
def FoldValidate(original,truth,classifier,iter=3):
Val = StratifiedKFold(n_splits=iter, random_state=STATE, shuffle=True) # DO OUR FOLD here , maybe add the iteration
scores = []
tns = [] # true neg
fps = [] # false positive
fns = [] # false negative
tps = [] # true positive
for train_index,test_index in Val.split(original,truth):
model2 = classifier
model2.fit(original[train_index], truth[train_index])
x_output = model2.predict(original[test_index])
# print(x_output.shape)
# print(truth[test_index].shape)
#scores.append(classifier.score(x_output, truth[test_index]))
tn, fp , fn , tp = confusion_matrix(x_output, truth[test_index]).ravel()
score = accuracy_score(x_output,truth[test_index])
tns.append(tn)
fps.append(fp)
fns.append(fn)
tps.append(tp)
scores.append(score)
print("TP is,",numpy.mean(tps))
print("FP is,",numpy.mean(fps))
print("FN is,",numpy.mean(fns))
print("TN is,",numpy.mean(tns))
print("Avg Accuracy is,",numpy.mean(scores))
#score = classifier.score(original[train_index], truth[train_index])
#print("Linear Regression Accuracy (using Weighted Avg):", score)
# tester = classifier.predict_proba(original[test_index])
# tester = tester[:,1]
# calculateScoresVariousAlphaValues(tester,truth[test_index])
# scores = numpy.asarray(scores)
#print("Accuracy Score Is:", numpy.mean(scores))
# print("Valuesdfs for train are ", train_index)
# print("Values for test index are ",test_index)
# print("Testing with the values",original[train_index])
# print("Testing it with the values",truth[train_index])
#weights = get_class_weights(truth_data[test_index]) # implement the weights
#model2.fit(classifer_data, truth_data, class_weight=weights)
#unseendata = convertToVectorFromDataframe(test)
#tester = classifier.predict_proba(unseendata)
#tester = tester[:,1]
#calculateScoresVariousAlphaValues(tester,truth_data)
def showGraph(model):
xgb.plot_importance(classifier, importance_type='gain',max_num_features=10)
plt.show()
# Performs a Grid Search
def gridSearch(model,params,x_train,y_train):
params = gridParameters(params)
gd_sr = GridSearchCV(estimator=model,
param_grid=params,
scoring='accuracy',
cv=3,
n_jobs=5)
gd_sr.fit(x_train, y_train)
best_parameters = gd_sr.best_params_
score2 = ("Best Performing Parameters",best_parameters)
best_score = gd_sr.best_score_
score1 = ("Best Score",best_score)
#output = best_parameters + " " + best_score
return (score2,score1)
def perf_measure(y_actual, y_hat):
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==1:
TP += 1
if y_hat[i]==1 and y_actual[i]!=y_hat[i]:
FP += 1
if y_actual[i]==y_hat[i]==0:
TN += 1
if y_hat[i]==0 and y_actual[i]!=y_hat[i]:
FN += 1
return(TP,FP)
#return(TP, FP, TN, FN)
#Main Method
if __name__ == '__main__':
#train_classifier_3
df = read_csv("train_classifier.csv") #Read CSV
w2v_preprocessing(df) # process our junk here by converting it into tokens
scaler = preprocessing.MinMaxScaler()
label = df['Prediction'].values # take the values for prediction for our model
#df['scores']
# df['Comment'] = df.apply(lambda row: nltk.word_tokenize(row['Comment']), axis=1)
# df['Comment'] = df['Comment'].apply(get_good_tokens)
#tagged_train = list(make_tagged_document(sentences_test,y_train))
# df['nounratio'] = df['Comment'].apply(checkForNouns,ratio)
# commentCleaner(df) # clean the comments
# sent = df['Comment'].values # take the comments instead , but in our case we are gonna split them up
if isEmbeddings is True:
if embedType is 'guse':
sent = loadEmbeddings('embeddings.npy')
if embedType is 'bert':
sent = loadEmbeddings('output_alta.npy')
# nouns = df['nouns'].to_numpy()
# caps = df['uppercase'].to_numpy()
# punt = df['punct'].to_numpy()
#nouns = preprocessing.normalize(nouns)
# nouns = preprocessing.minmax_scale(nouns)
# caps = preprocessing.minmax_scale(caps)
# punt = preprocessing.minmax_scale(punt)
# sent = mergeMatrix(sent,nouns)
# sent = mergeMatrix(sent,caps)
# sent = mergeMatrix(sent,punt)
# print(sent.shape)
full_comment = sent
else:
sent = df
# sent = df.drop(['Prediction'],axis=1)
sentences_train, sentences_test, y_train, y_test = train_test_split(
sent, label, test_size=0.25, random_state=10000)
if isBOW is True:
train_x = vectorize(sentences_train['Comment'],sentences_train['Comment'])
test_x = vectorize(sentences_train['Comment'],sentences_test['Comment'])
full_comment = vectorize(sent['Comment'],sent['Comment'])
full_comment = full_comment.toarray() # convert sparse matrix to dense
train_x = train_x.todense()
test_x = test_x.todense()
if isWord2Vec is True:
if isCreationMode is True:
print("Creating Doc2Vec Model")
model = createDoc2VecModel(sentences_train,y_train)
else:
print("Loading Doc2Vec Model")
model = loadDoc2VecModel()
train_x = convertToVectorFromDataframe(sentences_train)
test_x = convertToVectorFromDataframe(sentences_test)
full_comment = convertToVectorFromDataframe(df)
full_comment = numpy.array(full_comment)
#We need to do the split for being consistent esle the naming runs
if isEmbeddings is True:
train_x = sentences_train
test_x = sentences_test
#scaler.fit(train_x)
#train_x = scaler.transform(train_x)
#test_x = scaler.transform(test_x)
title = "GridSearch"
weights = get_class_weights(label)
client = Client("<KEY>", api_token="am9vr<PASSWORD>gm<PASSWORD>azfzude<PASSWORD>")
client.send_message("Hello!", title=title)
#smt = SMOTE()
if useSMOTE is True:
print("USING SMOTE TO BOOST THE IMBALANCED DATA")
smt = SMOTE() # Boost the samples to improve the classification
train_x, y_train = smt.fit_sample(train_x, y_train)
if searchParams is True:
classifiers = ['rf','svm','logistic','xgboost']
for classify in classifiers:
client.send_message(("Running ",classify), title=title)
classifier = selectClassifier(classifymethod=classify)
scoresA, scoreB = gridSearch(classifier,classify,sent,label)
f = open("gridSearchOutput.txt", "a")
f.write(classify)
f.write(str(scoresA))
f.write(str(scoreB))
f.close()
else:
classifier = selectClassifier(classifymethod=DETERMINER)
# classifier = AdaBoostClassifier(random_state=STATE,n_estimators=50, base_estimator=old)
#hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
#train_x = hasher.fit_transform(train_x)
#classifer = RandomTreesEmbedding
# print(full_comment.mean(axis=0))
# full_comment = preprocessing.scale(full_comment)
# train_x = preprocessing.scale(train_x)
# test_x = preprocessing.scale(test_x)
#print("SEPERATOR")
#print(full_comment_test.mean(axis=0))
#print(full_comment.shape)
FoldValidate(full_comment,label,classifier)
# y_crossfold = CrossFoldData(test_x,y_test,classifier)
# y_crossfold = y_crossfold[:,1] # keep the 1 values only
#weights = [0.05, 0.10 , 0.15]
#pipe = make_pipeline(
#SMOTE(),
#LogisticRegression(fit_intercept=True, max_iter=1000,solver='newton-cg',random_state=STATE)
#)
#gsc = GridSearchCV(
#estimator=pipe,
#param_grid={
#'smote__ratio': [{0: int(num_neg), 1: int(num_neg * w) } for w in weights]
# 'smote__ratio': weights
#},
#scoring='f1',
#cv=3
#)
#grid_result = gsc.fit(train_x, y_train)
#print("Best parameters : %s" % grid_result.best_params_)
# vector_trained = vectorize(sentences_train,sentences_train)
# vector_test = vectorize(sentences_train,sentences_test)
# skfold = StratifiedKFold(n_splits=3, random_state=100) # split into 3
# results_skfold = cross_val_score(classifier, full_comment, label, cv=skfold)
# print("Results Scores Are:" + str(results_skfold))
# print(scores)
# voter.fit(train_x, y_train)
# classifier.fit(vector_trained, y_train)
# clf.fit(vector_trained, y_train)
# implement the weights
classifier.fit(train_x, y_train)
score = cross_val_score(estimator=classifier,X=sent,y=label,cv=3)
# clf.fit(train_x, y_train)
# clf2.fit(train_x, y_train)
# classifier2.fit(train_x, y_train)
# score = classifier.score(vector_test, y_test)
#score = classifier.score(test_x, y_test)
print("Mean Accuracy Score:", score.mean())
print("Std Dev Accuracy Score:", score.std())
# score = classifier.score(full_comment, label)
#print("Linear Regression Accuracy (using FULL Avg):", score)
tester = classifier.predict_proba(test_x)
tester = tester[:,1]
print("*******NORMAL STARTS HERE*****")
calculateScoresVariousAlphaValues(tester,y_test)
#xgb.plot_importance(classifier, importance_type='gain',max_num_features=10)
#plt.show() # matplotlib plot
# score = clf.score(vector_test, y_test)
# score = clf.score(test_x, y_test)
#score = clf.score(vector_test, y_test)
# score = clf2.score(test_x, y_test)
# print("NB Accuracy (BOW):", score)
# f1score = f1_score(test_x, y_test)
# print("F1 Score:", score)
# unseendata = convertToVectorFromDataframe(sentences_test)
# tester = classifier.predict_proba(unseendata)
# tester = tester[:,1]
# print(tester.shape)
# print(y_test.shape)
# df3 = pd.DataFrame(tester)
# print("*******NORMAL STARTS HERE*****")
# calculateScoresVariousAlphaValues(tester,y_test)
# print("*******K-FOLD 3 SCORES STARTS HERE*****")
# calculateScoresVariousAlphaValues(y_crossfold,y_test)
# df3.to_csv('output_classifer_linear_reg_prob_test.csv',index=False)
# sentences_test.to_csv('output_junk',index=False)
# df3.to_csv('output_classifer_linear_reg_prob_test.csv',index=False)
# score = voter.score(test_x, y_test)
#score = clf.score(vector_test, y_test)
#Calculate Prediction for it
# print("Voter Accuracy:", score)
# print("MLP Accuracy:", score)
# score = classifier2.score(test_x, y_test)
#score = clf.score(vector_test, y_test)
# print("MLP Accuracy:", score)
## New One Starts here
if isClassify == True:
df2 = read_csv2("test_noannotations.csv")
w2v_preprocessing(df2) # process our junk here by converting it into tokens
if isBOW is True:
unseendata = vectorize(sentences_train['Comment'],sentences_test['Comment'])
elif isWord2Vec is True:
unseendata = convertToVectorFromDataframe(df2)
elif isEmbeddings is True:
unseendata = loadEmbeddings('embeddings_prod.npy')
#nouns = df2['nouns'].to_numpy()
#nouns = preprocessing.minmax_scale(nouns)
#unseendata = mergeMatrix(unseendata,nouns)
tester = classifier.predict_proba(unseendata)
tester = tester[:,1]
i = 0.425
squarer = (lambda x: 1 if x>=i else 0)
fucd = numpy.vectorize(squarer) #our function to calculate
tester = fucd(tester)
# print(tester)
# print(type(tester))
df3 = | pd.DataFrame(tester) | pandas.DataFrame |
from timeit import default_timer as timer
from collections import defaultdict
from tqdm import tqdm
import pandas as pd
#from evaluation_config import eval_runs
tqdm.pandas(desc="progess: ")
def add_scores(scores, list_of_param_dicts):
for param_dict in list_of_param_dicts:
for key, value in zip(param_dict, param_dict.values()):
scores[key] = value
return scores
# parameters
#eval_run = eval_runs["WEP_RNNs"]
def eval_unsup(eval_run) -> None:
# initialize variables
eval_run.init_result_path()
eval_run.init_iter_counter()
result_df = | pd.DataFrame() | pandas.DataFrame |
from pyulog import ULog
import pandas as pd
def getVioData(ulog: ULog) -> pd.DataFrame:
vehicle_visual_odometry = ulog.get_dataset("vehicle_visual_odometry").data
vio = | pd.DataFrame({'timestamp': vehicle_visual_odometry['timestamp'],
'sensor' : 'vio',
'x': vehicle_visual_odometry["x"],
'y': vehicle_visual_odometry["y"],
'z': vehicle_visual_odometry["z"],
'qw': vehicle_visual_odometry["q[0]"],
'qx': vehicle_visual_odometry["q[1]"],
'qy': vehicle_visual_odometry["q[2]"],
'qz': vehicle_visual_odometry["q[3]"],
'vx': vehicle_visual_odometry["vx"],
'vy': vehicle_visual_odometry["vy"],
'vz': vehicle_visual_odometry["vz"]
}) | pandas.DataFrame |
# coding: utf-8
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
def test_learning(probs, returns):
"""Function to calculate total returns
Args:
probs, returns: probabilities, returns
Returns:
gain, outcome: total returns, classes with the greatest returns for each instance
"""
outcome = np.argmax(probs, 1)
gain = sum(returns[np.arange(len(returns)), np.argmax(probs,1)])
return gain, outcome
class write_results:
"""A helper class to simulate data for Cost Sensitive Learning"
Attributes:
formatter: formatter of the specified experiment
test_df: dataframe to hold test results
train_df: dataframe to hold train results
val_df: dataframe to hold validation results
mip_perf: dataframe to hold mip and mip-wi results seperately
average_results: dataframe to hold average results
mip_avg_perf: dataframe to hold average results of mip and mip-wi
methods: list to store the methods
validation: whether there are results of the validation folds
"""
def __init__(self, formatter):
self.formatter = formatter
self.test_df = pd.DataFrame()
self.train_df = pd.DataFrame()
self.val_df = pd.DataFrame()
self.mip_perf = pd.DataFrame()
self.average_results = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import tsplib95
import networkx as nx
from tqdm import tqdm
import sys
import re
def prepare_testset_FINDER(data_dir, scale_factor=0.000001):
graph_list = []
atoi = lambda text : int(text) if text.isdigit() else text
natural_keys = lambda text : [atoi(c) for c in re.split('(\d+)', text)]
fnames = os.listdir(data_dir)
fnames.sort(key=natural_keys)
print("Loading test graphs...")
for fname in tqdm(fnames):
try:
if not '.tsp' in fname or '.sol' in fname:
continue
problem = tsplib95.load(data_dir + fname)
g = problem.get_graph()
except:
print('Error, while loading file {}'.format(fname))
# remove edges from one node to itself
ebunch=[(k,k) for k in g.nodes]
g.remove_edges_from(ebunch)
# reset node index to start at zero
mapping = {k:i for i,k in enumerate(g.nodes)}
g = nx.relabel_nodes(g, mapping)
# scale size of the graphs such that it fits into 0,1 square
for node in g.nodes:
g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor
for edge in g.edges:
g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor
graph_list.append(g)
print("Number of loaded test graphs:",len(graph_list))
return graph_list, fnames
def prepare_testset_S2VDQN(folder, scale_factor=0.000001):
if folder[-1] == '/':
folder = folder[0:-1]
graph_list = []
fnames = []
print("Loading test graphs...")
with open(f'{folder}/paths.txt', 'r') as f:
for line in tqdm(f):
fname = line.split('/')[-1].strip()
file_path = '%s/%s' % (folder, fname)
try:
if not '.tsp' in fname or '.sol' in fname:
continue
problem = tsplib95.load(file_path)
g = problem.get_graph()
except:
print('Error, while loading file {}'.format(fname))
# remove edges from one node to itself
ebunch=[(k,k) for k in g.nodes]
g.remove_edges_from(ebunch)
# reset node index to start at zero
mapping = {k:i for i,k in enumerate(g.nodes)}
g = nx.relabel_nodes(g, mapping)
# scale size of the graphs such that it fits into 0,1 square
for node in g.nodes:
g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor
for edge in g.edges:
g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor
graph_list.append(g)
fnames.append(fname)
# print("Number of loaded test graphs:",len(graph_list))
return graph_list, fnames
def get_approx_ratios(data_dir, test_lengths):
fnames = get_fnames(data_dir)
true_lengths = []
len_dict = get_len_dict(data_dir)
for fname in fnames:
true_lengths.append(len_dict[fname])
approx_ratios = [length[0]/length[1] for length in zip(test_lengths, true_lengths)]
mean_approx_ratio = np.mean([length[0]/length[1] for length in zip(test_lengths, true_lengths)])
return approx_ratios, mean_approx_ratio
def get_fnames(dir, search_phrase='tsp'):
atoi = lambda text : int(text) if text.isdigit() else text
natural_keys = lambda text : [atoi(c) for c in re.split('(\d+)', text)]
try:
fnames = [f for f in os.listdir(dir) if os.path.isfile(f'{dir}/{f}')]
fnames.sort(key=natural_keys)
except:
print('\nBad directory!')
fnames = [fname for fname in fnames if search_phrase in fname]
return fnames
def get_len_dict(folder):
# get lengths
with open(f'{folder}/lengths.txt', 'r') as f:
lines = f.readlines()
file_names = [line.split(':')[0].strip() for k, line in enumerate(lines)]
test_lens = [float(line.split(':')[-1].strip()) for k, line in enumerate(lines)]
len_dict = dict(zip(file_names, test_lens))
return len_dict
def save_solutions(data_dir, solutions, model_name, suffix=''):
fnames = get_fnames(data_dir)
sol_df = pd.DataFrame()
idx = 0
tqdm.write("Saving solutions...")
for fname in tqdm(fnames):
if not '.tsp' in fname or '.sol' in fname:
continue
tmp_df = pd.DataFrame()
tmp_df[fname] = solutions[idx]
sol_df = pd.concat([sol_df,tmp_df.astype(int)], ignore_index=False, axis=1)
idx += 1
test_set_folder = data_dir.split("/")[-2]
test_set_name = data_dir.split("/")[-1]
result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'
model_name_short = '_'.join(model_name.split('_')[0:-4])
create_dir(result_path)
if suffix:
sol_df.to_csv(f'{result_path}/solutions_{model_name_short}_{suffix}.csv')
else:
sol_df.to_csv(f'{result_path}/solutions_{model_name_short}.csv')
def save_lengths(data_dir, lengths, model_name, suffix=''):
fnames = get_fnames(data_dir)
lens_df = pd.DataFrame()
idx = 0
tqdm.write("Saving solution lengths...")
for fname in tqdm(fnames):
if not '.tsp' in fname or '.sol' in fname:
continue
tmp_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""Convert EURECOM data dump file into a train sets.
"""
import ast
import os
import numpy as np
import pandas as pd
from collections import defaultdict
import langid
RANDOM_NUMBER = 621323849
RANDOM_NUMBER2 = 581085259
FNAME = "data/total_post.csv"
COLS = [
"obj",
"museum",
"place_country_code",
"time_label",
"technique_group",
"material_group",
"category_group",
]
LABEL_COLS = [
"place_country_code",
"time_label",
"technique_group",
"material_group",
]
MIN_LABEL_COUNT = 120
def rec_is_nan(x):
"""Check if a record has a NaN equivalent value.
I don't think this first block of checks is necessary in the latest
version of the data export.
"""
if | pd.isna(x) | pandas.isna |
import importlib
import json
import os
import pdb
import sys
import fnet
import pandas as pd
import tifffile
import numpy as np
from fnet.transforms import normalize
def pearson_loss(x, y):
#x = output
#y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))
return cost
# code retrieved on 21.05.21 from: https://github.com/pytorch/pytorch/issues/1254
def pearsonr(x, y):
"""
Mimics `scipy.stats.pearsonr`
Arguments
---------
x : 1D torch.Tensor
y : 1D torch.Tensor
Returns
-------
r_val : float
pearsonr correlation coefficient between x and y
Scipy docs ref:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
Scipy code ref:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py#L2975-L3033
Example:
>>> x = np.random.randn(100)
>>> y = np.random.randn(100)
>>> sp_corr = scipy.stats.pearsonr(x, y)[0]
>>> th_corr = pearsonr(torch.from_numpy(x), torch.from_numpy(y))
>>> np.allclose(sp_corr, th_corr)
"""
x = x.detach().cpu().numpy().flatten() #pred
y = y.detach().cpu().numpy().flatten() #target
pearson_img = np.corrcoef(x,y)
r_val = pearson_img[0,1]
return r_val
def load_model(path_model, gpu_ids=0, module='fnet_model', in_channels=1, out_channels=1):
module_fnet_model = importlib.import_module('fnet.' + module)
if os.path.isdir(path_model):
path_model = os.path.join(path_model, 'model.p')
model = module_fnet_model.Model(in_channels=in_channels, out_channels=out_channels)
model.load_state(path_model, gpu_ids=gpu_ids)
return model
def load_model_from_dir(path_model_dir, gpu_ids=0, in_channels=1, out_channels=1):
assert os.path.isdir(path_model_dir)
path_model_state = os.path.join(path_model_dir, 'model.p')
model = fnet.fnet_model.Model(in_channels=in_channels, out_channels=out_channels)
model.load_state(path_model_state, gpu_ids=gpu_ids)
return model
def compute_dataset_min_max_ranges(train_path, val_path=None, norm=False):
df_train = pd.read_csv(train_path)
if val_path is not None:
df_val = | pd.read_csv(val_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
his_temp_matrix = df_temp.values
# Import calender
calender=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Calender',header= None)
calender=calender.values
julian=calender[:,2]
###############################
# Synthetic HDD CDD calculation
# Simulation data
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
# Load temperature data only
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
sim_temperature=sim_weather[cities]
# Convert temperatures to Fahrenheit
sim_temperature= (sim_temperature*(9/5))+32
sim_temperature=sim_temperature.values
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_cities = len(cities)
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow= | pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0) | pandas.read_csv |
import pandas as pd
from tabulate import tabulate
from sklearn.model_selection import train_test_split
def beautiful_nan_table(dataframe):
nans = dataframe.isna().sum().to_frame().rename(columns={0:"Number of Null Values"}).T
print(tabulate(nans, nans.columns, tablefmt="fancy_grid"))
def train_valiadate_test_split(df, train_size, validate_size, test_size):
test_fraction = (test_size) / (train_size + validate_size + test_size)
X_train_validate, X_test = train_test_split(df, test_size=test_fraction)
validate_fraction = (validate_size) / (train_size + validate_size)
X_train, X_validate = train_test_split(X_train_validate, test_size=validate_fraction)
return X_train, X_validate, X_test
def list_to_new_col(df, input_list, col_name):
df = df.copy()
df[col_name] = | pd.Series(input_list) | pandas.Series |
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial,stats
import numba as nb
import copy
from pathlib import Path
import pylab as pl
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
def norm_by_row(A):
"""
Subfunction used to vectorised normalisation of disorder by max of row using apply_along_axis function
B.G
"""
return A/A.max()
def norm_by_row_by_range(A):
"""
Subfunction used to vectorised normalisation of disorder by range of concavity using apply_along_axis function
B.G
"""
return (A - A.min())/(A.max() - A.min())
def numfmt(x, pos):
"""
Plotting subfunction to automate tick formatting from metres to kilometres
B.G
"""
s = '{:d}'.format(int(round(x / 1000.0)))
return s
def get_best_bit_and_err_from_Dstar(thetas, medD, fstD, thdD):
"""
Takes ouput from concavity calculation to calculate the best-fit theta and its error
"""
# Calculating the index of minimum medium disorder to get the best-fit
index_of_BF = np.argmin(medD)
# Getting the Dstar value of the best-fit
dstar_val = medD[index_of_BF]
# Getting the acutal best-fit
BF = thetas[index_of_BF]
# Preformatting 2 arrays for calculating the error: I am just interested by the first half for the first error and the second for the second
A = np.copy(fstD)
A[index_of_BF+1:] = 9999
B = np.copy(fstD)
B[:index_of_BF] = 9999
# calculating the error by extracting the closest theta with a Dstar close to the median best fit ones
err = ( thetas[np.abs(A - dstar_val).argmin()] , thetas[np.abs(B - dstar_val).argmin()] )
# REturning a tuple with [0] being the best fit and [1] another tuple f error
return BF,err
def process_basin(ls, **kwargs):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
print("Processing basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if("extension" not in kwargs):
kwargs["extension"] = "tif"
if("n_tribs_by_combo" not in kwargs):
kwargs["n_tribs_by_combo"] = 4
if(kwargs["ignore_numbering"] == True):
name = prefix
else:
name = prefix + "%s"%(number)
if(kwargs["precipitation_raster"] == ""):
precipitation = False
else:
precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
dem_name ="%s.%s"%(name,kwargs["extension"])
if("overwrite_dem_name" in kwargs):
dem_name = kwargs["overwrite_dem_name"]
MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# Extracting basins
if(precipitation):
MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
else:
MD.CommonFlowRoutines()
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
print("River extracted")
MD.DefineCatchment( method="from_XY", X_coords = [X], Y_coords = [Y], coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels")
MD.df_base_river.to_feather("%s_rivers.feather"%(name))
print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, kwargs["n_tribs_by_combo"])
print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
np.save("%s_disorder_tot.npy"%(name), results)
XY = MD.cppdem.query_xy_for_each_basin()["0"]
tdf = pd.DataFrame(XY)
tdf.to_feather("%s_XY.feather"%(name))
return 0
def theta_quick_constrain_single_basin(MD,X_coordinate_outlet = 0, Y_coordinate_outlet = 0, area_threshold = 1500):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
# number = ls[0]
# X = ls[1]
# Y = ls[2]
# area_threshold = ls[3]
# prefix = ls[4]
# print("Processing basin ", number, " with proc ", current_process())
# if("ignore_numbering" not in kwargs):
# kwargs["ignore_numbering"] = False
# if("extension" not in kwargs):
# kwargs["extension"] = "tif"
# if("n_tribs_by_combo" not in kwargs):
# kwargs["n_tribs_by_combo"] = 4
# if(kwargs["ignore_numbering"] == True):
# name = prefix
# else:
# name = prefix + "%s"%(number)
# if(kwargs["precipitation_raster"] == ""):
# precipitation = False
# else:
# precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
# dem_name ="%s.%s"%(name,kwargs["extension"])
# if("overwrite_dem_name" in kwargs):
# dem_name = kwargs["overwrite_dem_name"]
# MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# # Extracting basins
# if(precipitation):
# MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
# else:
# MD.CommonFlowRoutines()
# print("Experimental function (Gailleton et al., submitted), if it crashes restart from a clean LSDDEM object with only the flow routines processed.")
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# print("River pre-extracted")
MD.DefineCatchment( method="from_XY", X_coords = X_coordinate_outlet, Y_coords = Y_coordinate_outlet, coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
# print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
# print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("DEBUG::You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels \n")
# MD.df_base_river.to_feather("%s_rivers.feather"%(name))
# print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, 4)
# print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
# pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
# np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
# np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
# print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
# np.save("%s_disorder_tot.npy"%(name), results)
# XY = MD.cppdem.query_xy_for_each_basin()["0"]
# tdf = pd.DataFrame(XY)
# tdf.to_feather("%s_XY.feather"%(name))
# print("\n\n")
try:
from IPython.display import display, Markdown, Latex
todusplay = r"""
**Thanks for constraning** $\theta$ with the disorder algorithm from _Mudd et al., 2018_ and _Gailleton et al, submitted_.
Keep in mind that it is not straightforward and that the "best fit" we suggest is most of the time the "least worst" value maximising the collinearity in $\chi$ space.
Especially in large, complex basin, several $\theta$ actually fit different areas and the best fit is just a try to make everyone happy where it is not necessarily possible.
$\theta$ constraining results:
median $\theta$ | $1^{st}$ Q | $3^{rd}$ Q
--- | --- | ---
%s | %s | %s
"""%(round(np.nanmedian(all_disorder[0]),3), round(np.nanpercentile(all_disorder[0],25),3), round(np.nanpercentile(all_disorder[0],75),3))
display(Markdown(todusplay))
except:
pass
return all_disorder
def get_median_first_quartile_Dstar(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D* for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def get_median_first_quartile_Dstar_r(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D*_r for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row_by_range,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def plot_single_theta(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
print("plotting D*_r for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_by_range_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def plot_min_D_star_map(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = pd.read_csv(prefix + "all_raster_names.csv")
thetas = np.round(pd.read_feather(df["raster_name"].iloc[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = 1e12
for tval in thetas:
valtest = df["D*_%s"%tval][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
if(valtest<val):
val=valtest
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_minimum_disorder_across_theta_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def post_process_analysis_for_Dstar(prefix, n_proc = 1, base_raster_full_name = "SEC_PP.tif"):
# Loading the list of raster
df = pd.read_csv(prefix + "all_raster_names.csv")
# Preparing the multiprocessing
d_of_med = {}
d_of_fst = {}
d_of_med_r = {}
d_of_fst_r = {}
params = df["raster_name"].tolist()
ras_to_ignore = {}
ras_to_ignore_list = []
for i in params:
ras_to_ignore[i] = False
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med[gut.get()[2]] = gut.get()[0]
d_of_fst[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar_r, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med_r[gut.get()[2]] = gut.get()[0]
d_of_fst_r[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# Getting the list of thetas tested
thetas = np.round(pd.read_feather(params[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
df["best_fit"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["best_fit_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Preparing my dataframe to ingest
for t in thetas:
df["D*_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["D*_r_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Ingesting hte results
for i in range(df.shape[0]):
if(ras_to_ignore[df["raster_name"].iloc[i]]):
continue
BF,err = get_best_bit_and_err_from_Dstar(thetas, d_of_med[df["raster_name"].iloc[i]], d_of_fst[df["raster_name"].iloc[i]], 10)
BF_r,err_r = get_best_bit_and_err_from_Dstar(thetas, d_of_med_r[df["raster_name"].iloc[i]], d_of_fst_r[df["raster_name"].iloc[i]], 10)
df["best_fit"].iloc[i] = BF
df["err_neg"].iloc[i] = err[0]
df["err_pos"].iloc[i] = err[1]
df["best_fit_norm_by_range"].iloc[i] = BF_r
df["err_neg_norm_by_range"].iloc[i] = err_r[0]
df["err_pos_norm_by_range"].iloc[i] = err_r[1]
for t in range(thetas.shape[0]):
df["D*_%s"%thetas[t]].iloc[i] = d_of_med[df["raster_name"].iloc[i]][t]
df["D*_r_%s"%thetas[t]].iloc[i] = d_of_med_r[df["raster_name"].iloc[i]][t]
# Getting the hillshade
mydem = lsd.LSDDEM(file_name = base_raster_full_name,already_preprocessed = True)
HS = mydem.get_hillshade(altitude = 45, angle = 315, z_exageration = 1)
mydem.save_array_to_raster_extent( HS, name = prefix + "HS", save_directory = "./")
# will add X-Y to the sumarry dataframe
df["X_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# I do not mutiprocess here: it would require load the mother raster for each process and would eat a lot of memory
for i in params:
if(ras_to_ignore[i]):
continue
XY = pd.read_feather(i + "_XY.feather")
row,col = mydem.cppdem.query_rowcol_from_xy(XY["X"].values, XY["Y"].values)
np.save(i + "_row.npy", row)
np.save(i + "_col.npy", col)
df["X_median"][df["raster_name"] == i] = XY["X"].median()
df["X_firstQ"][df["raster_name"] == i] = XY["X"].quantile(0.25)
df["X_thirdtQ"][df["raster_name"] == i] = XY["X"].quantile(0.75)
df["Y_median"][df["raster_name"] == i] = XY["Y"].median()
df["Y_firstQ"][df["raster_name"] == i] = XY["Y"].quantile(0.25)
df["Y_thirdtQ"][df["raster_name"] == i] = XY["Y"].quantile(0.75)
#Removing the unwanted
df = df[~df["raster_name"].isin(ras_to_ignore_list)]
# Saving the DataFrame
df.to_csv(prefix +"summary_results.csv", index = False)
print("Done with the post processing")
def plot_main_figures(prefix, **kwargs):
# Loading the list of raster
dfrast = pd.read_csv(prefix + "all_raster_names.csv")
df = pd.read_csv(prefix +"summary_results.csv")
# Creating the folder
Path("./%s_figures"%(prefix)).mkdir(parents=True, exist_ok=True)
print("Printing your histograms first")
fig, ax = plt.subplots()
ax.grid(ls = "--")
ax.hist(df["best_fit"], bins = 19, histtype = "stepfilled", edgecolor = "k", facecolor = "orange", lw = 2)
ax.set_xlabel(r"$\theta$")
plt.tight_layout()
plt.savefig("./%s_figures/%shistogram_all_fits.png"%(prefix, prefix), dpi = 500)
plt.close(fig)
print("Building the IQ CDF")
IQR,bin_edge = np.histogram(df["err_pos"].values - df["err_neg"].values)
fig, ax = plt.subplots()
CSIQR = np.cumsum(IQR)
CSIQR = CSIQR/np.nanmax(CSIQR)*100
bin_edge = bin_edge[1:] - np.diff(bin_edge)
ax.plot(bin_edge, CSIQR, lw = 2, color = "k", alpha = 1)
# ax.axhspan(np.percentile(CSIQR,25),np.percentile(CSIQR,75), lw = 0, color = "r", alpha = 0.2)
ax.fill_between(bin_edge,0,CSIQR, lw = 0, color = "k", alpha = 0.1)
ax.set_xlabel(r"IQR $\theta$ best-fit")
ax.set_ylabel(r"%")
ax.grid(ls = "--", lw = 1)
plt.savefig("./%s_figures/%sCDF_IQR.png"%(prefix, prefix), dpi = 500)
plt.close(fig)
print("plotting the map of best-fit")
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting best-fit")
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["best_fit"][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "RdYlBu_r", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig("./%s_figures/"%(prefix) +prefix + "MAP_best_fit.png", dpi = 500)
plt.close(fig)
a = np.array([[0,1]])
pl.figure(figsize=(9, 1.5))
img = pl.imshow(a, cmap="RdYlBu_r")
pl.gca().set_visible(False)
cax = pl.axes([0.1, 0.2, 0.8, 0.6])
pl.colorbar(orientation="horizontal", cax=cax)
pl.title(r"$\theta$ best-fit")
pl.savefig("./%s_figures/"%(prefix) +"colorbar_mapbest_fit.png")
pl.close(fig)
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting min theta")
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = pd.read_csv(prefix + "all_raster_names.csv")
thetas = np.round(pd.read_feather(df["raster_name"].iloc[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = 1e12
for tval in thetas:
valtest = df["D*_%s"%tval][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
if(valtest<val):
val=valtest
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.05, vmax = 0.65)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig("./%s_figures/"%(prefix) +prefix + "min_Dstar_for_each_basins.png", dpi = 500)
plt.close(fig)
a = np.array([[0,1]])
pl.figure(figsize=(9, 1.5))
img = pl.imshow(a, cmap="gnuplot2", vmin = 0.05, vmax = 0.65)
pl.gca().set_visible(False)
cax = pl.axes([0.1, 0.2, 0.8, 0.6])
pl.colorbar(orientation="horizontal", cax=cax, label = r"Min. $D^{*}$")
pl.title(r"Min. $D^{*}$")
pl.savefig("./%s_figures/"%(prefix) +"colorbar_map_minDstar.png")
pl.close(fig)
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting best-fit theta range yo")
min_theta = 99999
min_Dsum = 1e36
for this_theta in thetas:
this_sum = np.sum(df["D*_r_%s"%this_theta].values)
if(this_sum < min_Dsum):
min_theta = this_theta
min_Dsum = this_sum
this_theta = min_theta
print("Which is ", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so... A[row,col] = val
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.05, vmax = 0.65)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig("./%s_figures/"%(prefix) +prefix + "MAP_D_star_range_theta_%s.png" % this_theta, dpi = 500)
plt.close(fig)
a = np.array([[0,1]])
pl.figure(figsize=(9, 1.5))
img = pl.imshow(a, cmap="gnuplot2", vmin = 0.05, vmax = 0.65)
pl.gca().set_visible(False)
cax = pl.axes([0.1, 0.2, 0.8, 0.6])
pl.colorbar(orientation="horizontal", cax=cax, label = r"$D^{*}_{r}$")
pl.title(r"$D^{*}_{r}$")
pl.savefig("./%s_figures/"%(prefix) +"colorbar_map_Dstar_range.png")
pl.close(fig)
def plot_Dstar_maps_for_all_concavities(prefix, n_proc = 1):
# Loading the list of raster
df = pd.read_csv(prefix + "all_raster_names.csv")
params = df["raster_name"].tolist()
thetas = np.round(pd.read_feather(params[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3) # running the multiprocessing
params = []
for t in thetas:
params.append((t,prefix))
# plot_single_theta(params[0])
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(plot_single_theta, args = (i,)))
for gut in fprocesses:
gut.wait()
plot_min_D_star_map(params[0])
def plot_basin(ls, **kwargs):
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
nbins = None
if("nbins" in kwargs):
nbins = kwargs["nbins"]
print("Plotting basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if(kwargs["ignore_numbering"]):
name = prefix
else:
name = prefix + "%s"%(number)
# Alright, loading the previous datasets
df_rivers = pd.read_feather("%s_rivers.feather"%(name))
df_overall = pd.read_feather("%s_overall_test.feather"%(name))
all_concavity_best_fits = np.load("%s_concavity_tot.npy"%(name))
all_disorders = np.load("%s_disorder_tot.npy"%(name))
XY = | pd.read_feather("%s_XY.feather"%(name)) | pandas.read_feather |
import streamlit as st
import pandas as pd
from utils import *
from modules import *
import os
import numpy as np
import altair as alt
import plotly.graph_objects as go
absolute_path = os.path.abspath(__file__)
path = os.path.dirname(absolute_path)
ipl_ball = pd.read_csv(path+'/2008_2021_updated_ball.csv')
ipl_match = pd.read_csv(path+'/2008_2021_data_matches.csv')
season_list = ['2007/08','2009','2009/10','2011','2012','2013','2014','2015','2016','2017','2018','2019','2020/21','2021']
season_dict = {2008:'2007/08',2009:'2009',2010:'2009/10',2011:'2011',2012:'2012',2013:'2013',2014:'2014',2015:'2015',2016:'2016',2017:'2017',2018:'2018',2019:'2019',2020:'2020/21',2021:'2021'}
team_dict = { 'Delhi Capitals':'Delhi Daredevils' , 'Punjab Kings':'Kings XI Punjab' }
GRID = True
WIDTH = 0
def formatt(df):
temp = []
for i in df.columns:
if i in ['SR', 'Avg', 'Eco','Win Percent']:
temp.append(i)
return df.style.format(subset=temp, formatter="{:.2f}")
def player_career():
st.title('Player Career')
player = st.sidebar.selectbox('Player', get_player_name(ipl_ball))
bat = pd.DataFrame(get_run( ipl_ball, [player] ))
bat = bat.drop(['batsman'], axis = 1)
bat['M'] = 'IPL'
bat = bat.set_index('M')
bowl = pd.DataFrame(get_wicket( ipl_ball, [player] ))
bowl = bowl.drop(['bowler'], axis = 1)
bowl['M'] = 'IPL'
bowl = bowl.set_index('M')
st.subheader('Batting Career')
bat['Runs'] = bat.apply(lambda x: "{:,}".format(x['Runs']), axis=1)
st.table(formatt(bat))
st.subheader('Bowling Career')
st.table(formatt(bowl))
result = pd.DataFrame()
for i in season_list:
match = ipl_match[ipl_match['season'] == i]
id = list(match['id'].unique())
ball = ipl_ball[ipl_ball['id'].isin(id)]
temp = get_run(ball, batsman = [player], choice = ['Innings','Runs','HS'])
temp['year'] = i
result = pd.concat([result,pd.DataFrame(temp)])
st.subheader('Yearly Performance')
result = result.drop(['batsman'], axis = 1)
c = alt.Chart(result).mark_trail().encode(
x='year:T',
y='Runs:Q',
size = 'Runs:Q',
tooltip=['Runs:Q']
).configure_axis(
grid= GRID
).configure_view(
strokeWidth= WIDTH
).interactive()
st.altair_chart(c, use_container_width=True)
result_bat = result.set_index('year')
result = pd.DataFrame()
for i in season_list:
match = ipl_match[ipl_match['season'] == i]
id = list(match['id'].unique())
ball = ipl_ball[ipl_ball['id'].isin(id)]
temp = get_wicket(ball, bowler= [player], choice = ['Innings','Wickets','BBI'])
temp['year'] = i
result = pd.concat([result,pd.DataFrame(temp)])
result = result.drop(['bowler'], axis = 1)
c = alt.Chart(result).mark_trail().encode(
x='year:T',
y='Wickets:Q',
size = 'Wickets:Q',
tooltip = ['Wickets:Q'],
color=alt.value("#FFAA00")
).configure_axis(
grid= GRID
).configure_view(
strokeWidth= WIDTH
).interactive()
st.altair_chart(c, use_container_width=True)
result_bowl = result.set_index('year')
result = pd.merge(result_bat, result_bowl, how = 'outer', left_on = ['year'], right_on = ['year'])
result = result[ ~ ((result['Innings_x'] == 0) & (result['Innings_y'] == 0))]
result = result.rename(columns = {'Innings_x':'Innings Bat' ,'Innings_y':'Innings Bowl' })
st.table(formatt(result))
def sesonal_stat():
st.title('Sesonal Stats')
result = pd.DataFrame()
choice = ['Innings']
player_type = st.sidebar.selectbox('Player Type', ['Batsman','Bowler'] )
if player_type == 'Batsman':
option = ['Runs','Six','Four','Hundered','Fifty','BF','Avg','SR']
flag = st.sidebar.selectbox('Category', option )
choice.append('Runs')
if flag != 'Runs':
choice.append(flag)
else:
option = ['Wickets','Balls','Runs','BBI','Avg','SR','Eco','5W','BBI']
flag = st.sidebar.selectbox('Category', option )
choice.append('Wickets')
if flag != 'Wickets':
choice.append(flag)
limit = st.sidebar.slider('Top', 1, 10)
start_year, end_year = st.sidebar.select_slider("Year", options = [2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021] , value = (2008,2021))
temp = []
for i in option:
if i not in choice:
temp.append(i)
column = st.sidebar.multiselect('Cloumns',temp)
for i in column:
choice.append(i)
innings = st.sidebar.number_input('Innings', step = 0)
innings = None if innings == 0 else innings
if 'Wickets' in choice:
wickets = st.sidebar.number_input('Wickets', step = 0)
wickets = None if wickets == 0 else wickets
else:
runs = st.sidebar.number_input('Runs', step = 0)
runs = None if runs == 0 else runs
for i in range(start_year, end_year+1):
i = season_dict[i]
match = ipl_match[ipl_match['season'] == i]
id = list(match['id'].unique())
ball = ipl_ball[ipl_ball['id'].isin(id)]
if player_type == 'Batsman' :
temp = get_run(ball,flag = flag, choice = choice, limit = limit, innings = innings, runs = runs)
else:
asc = True if flag in ['SR','Eco','Avg'] else False
temp = get_wicket(ball,flag = flag, choice = choice, limit = limit, innings = innings, wickets = wickets, asc = asc )
temp['year'] = i
result = pd.concat([result,pd.DataFrame(temp)])
result = result.reset_index()
result = result.rename(columns = {'index':'Position'})
result['Position'] += 1
if player_type == 'Batsman':
choice.insert(0,'batsman')
else:
if 'BBI' in choice:
choice.remove('BBI')
choice.insert(0,'bowler')
if flag == 'BBI':
flag = 'Wickets'
c = alt.Chart(result).mark_bar().encode(
x=alt.X(flag),
y='year',
tooltip = choice,
color = alt.Color('Position', scale=alt.Scale(scheme='redyellowgreen'))
).configure_axis(
grid=False
).configure_view(
strokeWidth= WIDTH
)
st.altair_chart(c,use_container_width=True )
result = result.set_index('year')
st.table(formatt(result))
def overall_stat():
st.title('Overall Stats')
result = pd.DataFrame()
choice = ['Innings']
player_type = st.sidebar.selectbox('Player Type', ['Batsman','Bowler'] )
if player_type == 'Batsman':
option = ['Runs','Six','Four','Hundered','Fifty','BF','Avg','SR']
flag = st.sidebar.selectbox('Category', option )
choice.append('Runs')
if flag != 'Runs':
choice.append(flag)
else:
option = ['Wickets','Balls','Runs','BBI','Avg','SR','Eco','5W','BBI']
flag = st.sidebar.selectbox('Category', option )
choice.append('Wickets')
if flag != 'Wickets':
choice.append(flag)
limit = st.sidebar.slider('Top', 1, 20,10)
temp = []
for i in option:
if i not in choice:
temp.append(i)
column = st.sidebar.multiselect('Cloumns',temp)
for i in column:
choice.append(i)
innings = st.sidebar.number_input('Innings', step = 0)
innings = None if innings == 0 else innings
if 'Wickets' in choice:
wickets = st.sidebar.number_input('Wickets', step = 0)
wickets = None if wickets == 0 else wickets
else:
runs = st.sidebar.number_input('Runs', step = 0)
runs = None if runs == 0 else runs
if player_type == 'Batsman' :
temp = get_run(ipl_ball,flag = flag, choice = choice, limit = limit, innings = innings, runs = runs)
get_player_runs(temp['batsman'].unique())
else:
asc = True if flag in ['SR','Eco','Avg'] else False
temp = get_wicket(ipl_ball,flag = flag, choice = choice, limit = limit, innings = innings, wickets = wickets, asc = asc )
get_player_wickets(temp['bowler'].unique())
result = pd.concat([result,pd.DataFrame(temp)])
result = result.reset_index()
result = result.rename(columns = {'index':'Position'})
result['Position'] += 1
result = result.set_index('Position')
st.table(formatt(result))
if player_type == 'Batsman':
c = alt.Chart(result).mark_circle( color='#EA484E').encode(
alt.X('batsman', scale=alt.Scale(zero=False)),
alt.Y('Innings', scale=alt.Scale(zero=False, padding=1)),
tooltip = ['Runs']
).configure_axis(
grid=GRID
).configure_view(
strokeWidth= WIDTH
).encode(
size=alt.Size('Runs', scale=alt.Scale(domain=[3000,5000]))
).interactive()
st.altair_chart(c, use_container_width=True)
else:
c = alt.Chart(result).mark_circle( color='#EA484E').encode(
alt.X('bowler', scale=alt.Scale(zero=False)),
alt.Y('Innings', scale=alt.Scale(zero=False, padding=1)),
tooltip = ['Wickets']
).configure_axis(
grid=GRID
).configure_view(
strokeWidth= WIDTH
).encode(
size=alt.Size('Wickets', scale=alt.Scale(domain=[100,150]))
).interactive()
st.altair_chart(c, use_container_width=True)
def one_vs_one():
st.title('One Vs One')
all_player = get_player_name(ipl_ball)
player = st.sidebar.selectbox('Player', all_player)
type_ = st.sidebar.selectbox('Vs', ['Team','Player'])
if type_ == 'Player':
player_removed = []
for i in all_player:
if i != player:
player_removed.append(i)
vs = st.sidebar.selectbox(type_,player_removed)
vs_data = []
vs_data.append(vs)
else:
team = []
for i in list(ipl_ball['batting_team'].unique()):
if i not in team_dict.values():
team.append(i)
vs = st.sidebar.selectbox(type_,team)
vs_data = []
vs_data.append(vs)
if vs in team_dict.keys():
vs_data.append(team_dict[vs])
if type_ == 'Team':
df_bat = get_team_data(vs_data, ipl_ball, 'batsman')
df_bat = pd.DataFrame(get_run(df_bat,[player] ))
df_bat = df_bat.drop(['batsman'], axis = 1)
df_bat['M'] = 'IPL'
df_bat = df_bat.set_index('M')
st.subheader('Batting Stats')
st.caption(player+' Vs '+vs_data[0])
st.table(formatt(df_bat))
df_bowl = get_team_data(vs_data, ipl_ball, 'bowler')
df_bowl = pd.DataFrame(get_wicket(df_bowl,[player] ))
df_bowl = df_bowl.drop(['bowler'], axis = 1)
df_bowl['M'] = 'IPL'
df_bowl = df_bowl.set_index('M')
st.subheader('Bowling Stats')
st.caption(player+' Vs '+vs_data[0])
st.table(formatt(df_bowl))
else:
df_bat = get_player_data(vs_data, ipl_ball, 'bowler')
df_bat = pd.DataFrame(get_run(df_bat,[player] ))
df_bat = df_bat.drop(['batsman'], axis = 1)
df_bat['M'] = 'IPL'
df_bat = df_bat.set_index('M')
st.subheader('Batting Stats')
st.caption(player+' Vs '+vs_data[0])
st.table(formatt(df_bat))
df_bowl = get_player_data([player], ipl_ball, 'batsman')
df_bowl = pd.DataFrame(get_wicket(df_bowl,vs_data ))
df_bowl = df_bowl.drop(['bowler'], axis = 1)
df_bowl['M'] = 'IPL'
df_bowl = df_bowl.set_index('M')
st.subheader('Bowling Stats')
st.caption(vs_data[0]+' Vs '+player)
st.table(formatt(df_bowl))
def over_stats():
type_ = st.sidebar.selectbox('Type', ['Runs','Wickets','Six','Four','SR'])
balls = 1
if type_ in ['SR']:
balls = st.sidebar.number_input('BF', step = 0, min_value = 1)
data = best_in_over(ipl_ball, type_, balls)
data = data.reset_index()
data = data.rename(columns = {'index':'Over'})
data['over'] += 1
st.title('Over Stats')
hide_table_row_index = """
<style>
tbody th {display:none}
.blank {display:none}
</style>
"""
st.markdown(hide_table_row_index, unsafe_allow_html=True)
choice = []
choice.append('bowler' if type_ == 'Wickets' else 'batsman')
choice.append(type_)
c = alt.Chart(data).mark_area(
line={'color':'darkgreen'},
color=alt.Gradient(
gradient='linear',
stops=[alt.GradientStop(color='white', offset=0),
alt.GradientStop(color='darkgreen', offset=1)],
x1=1,
x2=1,
y1=1.2,
y2=0
)
).encode(
x = alt.X('over',scale = alt.Scale(zero=False)),
y = '{0}'.format(type_),
tooltip = choice
).configure_axis(
grid=False
).configure_view(
strokeWidth= WIDTH
)
st.altair_chart(c,use_container_width=True)
st.table(formatt(data))
def win_predict_player():
type_ = st.sidebar.selectbox('Type', ['Batting','Bowling'])
team = []
for i in list(ipl_ball['batting_team'].unique()):
if i not in team_dict.values():
team.append(i)
team_1 = st.sidebar.selectbox('Team', team)
team_data = []
team_data.append(team_1)
if team_1 in team_dict.keys():
team_data.append(team_dict[team_1])
if type_ == 'Batting':
runs = st.sidebar.number_input('Runs', step = 0, min_value = 30)
batsman = list(ipl_ball[ipl_ball['batting_team'].isin(team_data)]['batsman'].unique())
batsman.insert(0,None)
player = st.sidebar.selectbox('Player',batsman)
opp = []
for i in list(ipl_ball['batting_team'].unique()):
if i not in team_dict.values() and i not in team_data:
opp.append(i)
opp.insert(0,None)
opp_inp = st.sidebar.selectbox('Opponent',opp)
opp_team = []
opp_team.append(opp_inp)
if opp_inp in team_dict.keys():
opp_team.append(team_dict[opp_inp])
venue = list(ipl_match['venue'].unique())
venue.insert(0,None)
venue_inp = st.sidebar.selectbox('Venue',venue)
innings = st.sidebar.slider('Minimum Innings',1,10)
win,result = decide_batsman(ipl_ball, ipl_match, team_data, player = player, runs = runs, opp = opp_team ,venue = venue_inp, thres = innings)
else:
wickets = st.sidebar.number_input('Wickets', step = 0, min_value = 2 )
bowler = list(ipl_ball[ipl_ball['bowling_team'].isin(team_data)]['bowler'].unique())
bowler.insert(0,None)
player = st.sidebar.selectbox('Player',bowler)
opp = []
for i in list(ipl_ball['bowling_team'].unique()):
if i not in team_dict.values() and i not in team_data:
opp.append(i)
opp.insert(0,None)
opp_inp = st.sidebar.selectbox('Opponent',opp)
opp_team = []
opp_team.append(opp_inp)
if opp_inp in team_dict.keys():
opp_team.append(team_dict[opp_inp])
venue = list(ipl_match['venue'].unique())
venue.insert(0,None)
venue_inp = st.sidebar.selectbox('Venue',venue)
innings = st.sidebar.slider('Minimum Innings',1,10)
win,result = decide_bowler(ipl_ball, ipl_match, team_data, player = player, wickets = wickets, opp = opp_team ,venue = venue_inp, thres = innings)
source = pd.DataFrame({"category": ['Win', 'Lost'], "value": [win, 1- win]})
st.title('Win Percent')
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = int(win*100),
domain = {'x': [0, 0.8], 'y': [0, 1]},
gauge = {'axis':{'range':[0,100]}, }))
st.plotly_chart(fig)
st.table(formatt(result))
fig.update_layout(xaxis = {'range': [0, 100]})
def get_player_runs(batsman):
result = | pd.DataFrame() | pandas.DataFrame |
"""
This script creates a boolean mask based on rules
1. is it boreal forest zone
2. In 2000, was there sufficent forest
"""
#==============================================================================
__title__ = "Hansen Active fire"
__author__ = "<NAME>"
__version__ = "v1.0(20.11.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
#==============================================================================
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
import rasterio
import xarray as xr
from dask.diagnostics import ProgressBar
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import shutil
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import itertools
# Import debugging packages
import ipdb
# ========== Import specific packages ==========
# from rasterio.warp import transform
from shapely.geometry import Polygon
import geopandas as gpd
from rasterio import features
from affine import Affine
# import fiona as fi
# import regionmask as rm
# import matplotlib.path as mplPath
from rasterio import features
from affine import Affine
# +++++ Import my packages +++++
import myfunctions.corefunctions as cf
# import MyModules.PlotFunctions as pf
# import MyModules.NetCDFFunctions as ncf
#==============================================================================
def main():
# ==========
force = True#False
# ========== Get the path ==========
spath = pathfinder()
# ========== Make the files ==========
fnames = ActiveFireMask(spath, force)
# ========== Loop over the datasets ==========
HansenMasker(fnames, force, spath, ymin=2001, ymax=2019, )
ipdb.set_trace()
#==============================================================================
def HansenMasker(fnames, force, spath, ymin=2001, ymax=2019, **kwargs):
"""Takes a list of file names and masks the hansen data"""
# ========== load the hansen ==========\
region = "SIBERIA"
# ========== Setup the loop ==========
ppath = spath+"BurntArea/HANSEN"
ft = "lossyear"
fn_out = ppath +"/HansenMODIS_activefiremask.nc"
if not os.path.isfile(fn_out) or force:
tpath = "./data/tmp/"
cf.pymkdir(tpath)
# ========== Create the outfile name ==========
fpath = "%s/%s/" % (ppath, ft)
fnout = "%sHansen_GFC-2018-v1.6_%s_%s.nc" % (fpath, ft, region)
# ========== Open the dataset ==========
ds = xr.open_dataset(fnout, chunks={'latitude': 10000, 'longitude':10000})
lmax = int(np.round(ds.latitude.values.max()))
lmin = int(np.round(ds.latitude.values.min()))
window = -1
Annfn = []
for yr, fn in zip(range(ymin, ymax), fnames):
Fyout = fntmp = tpath+"HansenActiveFire_%d.nc" % (yr)
if not os.path.isfile(Fyout) or force:
date = datefixer(yr, 12, 31)
# ========== Load the results ==========
afr = gpd.read_file(fn)
# maskre = rm.Regions_cls("AFY",[0],["activefire"], ["af"], afr.geometry)
shapes = [(shape, n+1) for n, shape in enumerate(afr.geometry)]
# ========== empty container for the files ==========
filenames = []
# ========== Chunk specific sections ==========
for lm in range(lmax, lmin, window):
print(yr, lm, pd.Timestamp.now())
fntmp = tpath+"HansenActiveFire_%d_%d.nc" % (yr, lm)
if not os.path.isfile(fntmp):
def _dsSlice(fnout, yr, lm, window, shapes, afr):
# ========== open the file ==========
ds_in = xr.open_dataset(fnout, chunks={'latitude': 1000}).sel(
dict(latitude =slice(int(lm), int(lm)+window)))#.compute()
# ========== build a mask ==========
# mask = maskre.mask(dsbool.longitude.values, dsbool.latitude.values)
# ipdb.set_trace()
transform = transform_from_latlon(ds_in['latitude'], ds_in['longitude'])
out_shape = (len(ds_in['latitude']), len(ds_in['longitude']))
raster = features.rasterize(shapes, out_shape=out_shape,
fill=0, transform=transform,
dtype="int16", **kwargs)
# ========== build a boolean array ==========
raster = raster.astype(bool)
with ProgressBar():
dsbool = (ds_in == (yr-2000) or ds_in == (yr-2000+1)).compute()
dsbool *= raster
# ========== Save the file out ==========
encoding = ({"lossyear":{'shuffle':True,'zlib':True,'complevel':5}})
dsbool.to_netcdf(fntmp, format = 'NETCDF4',encoding=encoding, unlimited_dims = ["time"])
_dsSlice(fnout, yr, lm, window, shapes, afr)
filenames.append(fntmp)
# ========== open multiple files at once ==========
dsout = xr.open_mfdataset(filenames, concat_dim="latitude")
# ========== Set the date ==========
dsout["time"] = date["time"]
# ========== rename the variable to somehting sensible ==========
dsout = dsout.rename({"lossyear":"fireloss"})
# Check its the same size as the sliced up ds
# ========== Save the file out ==========
encoding = ({"fireloss":{'shuffle':True,'zlib':True,'complevel':5}})
print ("Starting write of combined data for %d at:" % yr, pd.Timestamp.now())
with ProgressBar():
dsout.to_netcdf(Fyout, format = 'NETCDF4',encoding=encoding, unlimited_dims = ["time"])
# cleanup the excess files
for fnr in filenames:
if os.path.isfile(fnr):
os.remove(fnr)
Annfn.append(Fyout)
# with ProgressBar():
# dsbool = dsbool.compute()
# ipdb.set_trace()
dsfin = xr.open_mfdataset(
Annfn, concat_dim="time",
chunks={'latitude': 10000, 'longitude':10000})#.any(dim="time")
# date = datefixer(2018, 12, 31)
attr = dsfin.attrs
# ++++++++++ Highly recomended ++++++++++
attr["FileName"] = fn_out
attr["title"] = "ActiveFireMask"
attr["summary"] = "Activefire forestloss mask"
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__)
attr["history"] += dsfin.history
ds_out = dsfin.any(dim="time")
ds_out = ds_out.expand_dims({"time":[pd.Timestamp("2018-12-31")]})
ds_out.attrs = attr
encoding = ({"fireloss":{'shuffle':True,'zlib':True,'complevel':5}})
delayed_obj = ds_out.to_netcdf(
fn_out,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"],
compute=False)
print("Starting write of ActiveFireMask data at", pd.Timestamp.now())
with ProgressBar():
results = delayed_obj.compute()
ipdb.set_trace()
return fn_out
#==============================================================================
def datefixer(year, month, day):
"""
Opens a netcdf file and fixes the data, then save a new file and returns
the save file name
args:
ds: xarray dataset
dataset of the xarray values
return
time: array
array of new datetime objects
"""
# ========== create the new dates ==========
# +++++ set up the list of dates +++++
dates = OrderedDict()
tm = [dt.datetime(int(year) , int(month), int(day))]
dates["time"] = pd.to_datetime(tm)
dates["calendar"] = 'standard'
dates["units"] = 'days since 1900-01-01 00:00'
dates["CFTime"] = date2num(
tm, calendar=dates["calendar"], units=dates["units"])
return dates
def transform_from_latlon(lat, lon):
lat = np.asarray(lat)
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def ActiveFireMask(spath, force, ymin=2001, ymax=2019):
def _annualfire(actfire, yr):
# ========== Convert to an equal area projection ==========
print("starting equal area reprojection at: ", pd.Timestamp.now())
actfire = actfire.to_crs({'init': 'epsg:3174'})
# ========== Add a 4km buffer ==========
print("starting buffer at: ", pd.Timestamp.now())
actfire["geometry"] = actfire.geometry.buffer(4000)
# ========== Convert back to projection ==========
print("starting latlon reprojection at: ", | pd.Timestamp.now() | pandas.Timestamp.now |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
#AppAutomater.py has App graphs and data
#Graphs.py has all graphs
#Data.py has all data processing stuff
#Downloader.py is used to download files daily
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
#from apscheduler.schedulers.background import BackgroundScheduler
#import atexit
import plotly.express as px
import json
import numpy as np
import pandas as pd
from pymongo import MongoClient
#Scheduler to update data
###########################################################################
###########################################################################
def g():
#client =
#db =
#collection =
#Read Only Needed Data
###########################################################################
###########################################################################
grouped_daily_cities = collection.find_one({"index":"grouped_daily_cities"})
grouped_daily_cities = pd.DataFrame(grouped_daily_cities["data"])
grouped_cumulative_cities = collection.find_one({"index":"grouped_cumulative_cities"})
grouped_cumulative_cities = pd.DataFrame(grouped_cumulative_cities["data"])
g.grouped_daily_weekly = collection.find_one({"index":"grouped_daily_weekly"})
g.grouped_daily_weekly = pd.DataFrame(g.grouped_daily_weekly["data"])
df = collection.find_one({"index":"df"})
df = | pd.DataFrame(df["data"]) | pandas.DataFrame |
from __future__ import print_function
import os
import datetime
import sys
import pandas as pd
import numpy as np
import requests
import copy
# import pytz
import seaborn as sns
from urllib.parse import quote
import monetio.obs.obs_util as obs_util
"""
NAME: cems_api.py
PGRMMER: <NAME> ORG: ARL
This code written at the NOAA air resources laboratory
Python 3
#################################################################
The key and url for the epa api should be stored in a file called
.epaapirc in the $HOME directory.
The contents should be
key: apikey
url: https://api.epa.gov/FACT/1.0/
TO DO
-----
Date is in local time (not daylight savings)
Need to convert to UTC. This will require an extra package or api.
Classes:
----------
EpaApiObject - Base class
EmissionsCall
FacilitiesData
MonitoringPlan
Emissions
CEMS
Functions:
----------
addquarter
get_datelist
findquarter
sendrequest
getkey
"""
def test_end(endtime, current):
# if endtime None return True
if isinstance(endtime, pd._libs.tslibs.nattype.NaTType):
return True
elif not endtime:
return True
# if endtime greater than current return true
elif endtime >= current:
return True
# if endtime less than current time return true
elif endtime < current:
return False
else:
return True
def get_filename(fname, prompt):
"""
determines if file exists. If prompt is True then will prompt for
new filename if file does not exist.
"""
if fname:
done = False
iii = 0
while not done:
if iii > 2:
done = True
iii += 1
if os.path.isfile(fname):
done = True
elif prompt:
istr = "\n" + fname + " is not a valid name for Facilities Data \n"
istr += "Please enter a new filename \n"
istr += "enter None to load from the api \n"
istr += "enter x to exit program \n"
fname = input(istr)
# print('checking ' + fname)
if fname == "x":
sys.exit()
if fname.lower() == "none":
fname = None
done = True
else:
fname = None
done = True
return fname
# def get_timezone_offset(latitude, longitude):
# """
# uses geonames API
# must store username in the $HOME/.epaapirc file
# geousername: username
# """
# username = getkey()
# print(username)
# username = username["geousername"]
# url = "http://api.geonames.org/timezoneJSON?lat="
# request = url + str(latitude)
# request += "&lng="
# request += str(longitude)
# request += "&username="
# request += username
# try:
# data = requests.get(request)
# except BaseException:
# data = -99
#
# jobject = data.json()
# print(jobject)
# print(data)
# # raw offset should give standard time offset.
# if data == -99:
# return 0
# else:
# offset = jobject["rawOffset"]
# return offset
def getkey():
"""
key and url should be stored in $HOME/.epaapirc
"""
dhash = {}
homedir = os.environ["HOME"]
fname = "/.epaapirc"
if os.path.isfile(homedir + fname):
with open(homedir + fname) as fid:
lines = fid.readlines()
for temp in lines:
temp = temp.split(" ")
dhash[temp[0].strip().replace(":", "")] = temp[1].strip()
else:
dhash["key"] = None
dhash["url"] = None
dhash["geousername"] = None
return dhash
def sendrequest(rqq, key=None, url=None):
"""
Method for sending requests to the EPA API
Inputs :
--------
rqq : string
request string.
Returns:
--------
data : response object
"""
if not key or not url:
keyhash = getkey()
apiurl = keyhash["url"]
key = keyhash["key"]
if key:
# apiurl = "https://api.epa.gov/FACT/1.0/"
rqq = apiurl + rqq + "?api_key=" + key
print("Request: ", rqq)
data = requests.get(rqq)
print("Status Code", data.status_code)
if data.status_code == 429:
print("Too many requests Please Wait before trying again.")
sys.exit()
else:
print("WARNING: your api key for EPA data was not found")
print("Please obtain a key from")
print("https://www.epa.gov/airmarkets/field-audit-checklist_tool-fact-api")
print("The key should be placed in $HOME/.epaapirc")
print("Contents of the file should be as follows")
print("key: apikey")
print("url: https://api.epa.gov/FACT/1.0/")
sys.exit()
return data
def get_lookups():
"""
Request to get lookups - descriptions of various codes.
"""
getstr = "emissions/lookUps"
# rqq = self.apiurl + "emissions/" + getstr
# rqq += "?api_key=" + self.key
data = sendrequest(getstr)
jobject = data.json()
dstr = unpack_response(jobject)
return dstr
# According to lookups MODC values
# 01 primary monitoring system
# 02 backup monitoring system
# 03 alternative monitoring system
# 04 backup monitoring system
# 06 average hour before/hour after
# 07 average hourly
# 21 negative value replaced with 0.
# 08 90th percentile value in Lookback Period
# 09 95th precentile value in Lookback Period
# etc.
# it looks like values between 1-4 ok
# 6-7 probably ok
# higher values should be flagged.
def quarter2date(year, quarter):
if quarter == 1:
dt = datetime.datetime(year, 1, 1)
elif quarter == 2:
dt = datetime.datetime(year, 4, 1)
elif quarter == 3:
dt = datetime.datetime(year, 7, 1)
elif quarter == 4:
dt = datetime.datetime(year, 11, 1)
return dt
def addquarter(rdate):
"""
INPUT
rdate : datetime object
RETURNS
newdate : datetime object
requests for emissions are made per quarter.
Returns first date in the next quarter from the input date.
"""
quarter = findquarter(rdate)
quarter += 1
year = rdate.year
if quarter > 4:
quarter = 1
year += 1
month = 3 * quarter - 2
newdate = datetime.datetime(year, month, 1, 0)
return newdate
def get_datelist_sub(r1, r2):
rlist = []
qt1 = findquarter(r1)
yr1 = r1.year
qt2 = findquarter(r2)
yr2 = r2.year
done = False
iii = 0
while not done:
rlist.append(quarter2date(yr1, qt1))
if yr1 > yr2:
done = True
elif yr1 == yr2 and qt1 == qt2:
done = True
qt1 += 1
if qt1 > 4:
qt1 = 1
yr1 += 1
iii += 0
if iii > 30:
break
return rlist
def get_datelist(rdate):
"""
INPUT
rdate : tuple of datetime objects
(start date, end date)
RETURNS:
rdatelist : list of datetimes covering range specified by rdate by quarter.
Return list of first date in each quarter from
startdate to end date.
"""
if isinstance(rdate, list):
rdatelist = get_datelist_sub(rdate[0], rdate[1])
else:
rdatelist = [rdate]
return rdatelist
def findquarter(idate):
if idate.month <= 3:
qtr = 1
elif idate.month <= 6:
qtr = 2
elif idate.month <= 9:
qtr = 3
elif idate.month <= 12:
qtr = 4
return qtr
def keepcols(df, keeplist):
tcols = df.columns.values
klist = []
for ttt in keeplist:
if ttt in tcols:
# if ttt not in tcols:
# print("NOT IN ", ttt)
# print('Available', tcols)
# else:
klist.append(ttt)
tempdf = df[klist]
return tempdf
def get_so2(df):
"""
drop columns that are not in keep.
"""
keep = [
# "DateHour",
"time local",
# "time",
"OperatingTime",
# "HourLoad",
# "u so2_lbs",
"so2_lbs",
# "AdjustedFlow",
# "UnadjustedFlow",
# "FlowMODC",
"SO2MODC",
"unit",
"stackht",
"oris",
"latitude",
"longitude",
]
df = keepcols(df, keep)
if not df.empty:
df = df[df["oris"] != "None"]
return df
class EpaApiObject:
def __init__(self, fname=None, save=True, prompt=False, fdir=None):
"""
Base class for all classes that send request to EpaApi.
to avoid sending repeat requests to the api, the default option
is to save the data in a file - specified by fname.
fname : str
fdir : str
save : boolean
prompt : boolean
"""
# fname is name of file that data would be saved to.
self.status_code = None
self.df = pd.DataFrame()
self.fname = fname
self.datefmt = "%Y %m %d %H:%M"
if fdir:
self.fdir = fdir
else:
self.fdir = "./apifiles/"
if self.fdir[-1] != "/":
self.fdir += "/"
# returns None if filename does not exist.
# if prompt True then will ask for new filename if does not exist.
fname2 = get_filename(self.fdir + fname, prompt)
self.getstr = self.create_getstr()
# if the file exists load data from it.
getboolean = True
if fname2:
print("Loading from file ", self.fdir + self.fname)
self.fname = fname2
self.df, getboolean = self.load()
elif fname:
self.fname = self.fdir + fname
# if it doesn't load then get it from the api.
# if save is True then save.
if self.df.empty and getboolean:
# get sends request to api and processes data received.
self.df = self.get()
if save:
self.save()
def set_filename(self, fname):
self.fname = fname
def load(self):
chash = {"mid": str, "oris": str}
df = pd.read_csv(self.fname, index_col=[0], converters=chash, parse_dates=True)
# df = pd.read_csv(self.fname, index_col=[0])
return df, True
def save(self):
"""
save to a csv file.
"""
print("saving here", self.fname)
if not self.df.empty:
self.df.to_csv(self.fname, date_format=self.datefmt)
else:
with open(self.fname, "w") as fid:
fid.write("no data")
def create_getstr(self):
# each derived class should have
# its own create_getstr method.
return "placeholder" + self.fname
def printall(self):
data = sendrequest(self.getstr)
jobject = data.json()
rstr = self.getstr + "\n"
rstr += unpack_response(jobject)
return rstr
def return_empty(self):
return pd.DataFrame()
def get_raw_data(self):
data = sendrequest(self.getstr)
if data.status_code != 200:
return self.return_empty()
else:
return data
def get(self):
data = self.get_raw_data()
try:
self.status_code = data.status_code
except:
self.status_code = "None"
try:
jobject = data.json()
except BaseException:
return data
df = self.unpack(jobject)
return df
def unpack(self, data):
# each derived class should have
# its own unpack method.
return pd.DataFrame()
class EmissionsCall(EpaApiObject):
"""
class that represents data returned by one emissions/hourlydata call to the restapi.
Attributes
"""
def __init__(self, oris, mid, year, quarter, fname=None, calltype='CEM',
save=True, prompt=False):
self.oris = oris # oris code of facility
self.mid = mid # monitoring location id.
self.year = str(year)
self.quarter = str(quarter)
calltype = calltype.upper().strip()
if calltype=='F23': calltype='AD'
if not fname:
fname = "Emissions." + self.year + ".q" + self.quarter
if calltype=='AD':
fname += '.AD'
fname += "." + str(self.mid) + "." + str(oris) + ".csv"
self.dfall = pd.DataFrame()
self.calltype= calltype
if calltype.upper().strip() == "AD":
self.so2name = "SO2ADReportedSO2MassRate"
elif calltype.upper().strip() == "CEM":
self.so2name = "SO2CEMReportedSO2MassRate"
elif calltype.upper().strip() == "LME":
# this should probably be so2mass??? TO DO.
self.so2name = "LMEReportedSO2Mass"
else:
self.so2name = "SO2CEMReportedSO2MassRate"
self.so2nameB = "UnadjustedSO2"
super().__init__(fname, save, prompt)
# if 'DateHour' in df.columns:
# df = df.drop(['DateHour'], axis=1)
def create_getstr(self):
# for locationID in unitra:
# efile = "efile.txt"
if self.calltype.upper().strip() == "AD":
estr = "emissions/hourlyFuelData/csv"
elif self.calltype.upper().strip() == "LME":
estr = "emissions/hourlyData/csv"
else:
estr = "emissions/hourlyData/csv"
getstr = quote(
"/".join([estr, str(self.oris), str(self.mid), self.year, self.quarter])
)
return getstr
def load(self):
# Emissions call
# datefmt = "%Y %m %d %H:%M"
datefmt = self.datefmt
datefmt2 = "%Y %m %d %H:%M:%S"
chash = {"mid": str, "oris": str, "unit": str}
df = pd.read_csv(self.fname, index_col=[0], converters=chash, parse_dates=False)
# if not df.empty:
if not df.empty:
self.status_code = 200
print("SO2 DATA EXISTS")
temp = df[df['so2_lbs']>0]
if temp.empty:
print('SO2 lbs all zero')
# check for two date formats.
# -----------------------------------------
def newdate(x):
rval = x["time local"]
if isinstance(rval, float):
if np.isnan(rval):
return pd.NaT
rval = rval.replace("-", " ")
rval = rval.strip()
fail = 0
try:
rval = datetime.datetime.strptime(rval, datefmt)
except:
fail = 1
if fail == 1:
try:
rval = datetime.datetime.strptime(rval, datefmt2)
except:
fail = 2
print(self.fname)
print("WARNING: Could not parse date " + rval)
return rval
# -----------------------------------------
df["time local"] = df.apply(newdate, axis=1)
# if 'DateHour' in df.columns:
# df = df.drop(['DateHour'], axis=1)
# df = pd.read_csv(self.fname, index_col=[0])
else:
print("NO SO2 DATA in FILE")
return df, False
def return_empty(self):
return None
def get(self):
data = self.get_raw_data()
try:
self.status_code = data.status_code
except:
self.status_code = None
if data:
df = self.unpack(data)
else:
df = pd.DataFrame()
return df
def unpack(self, data):
logfile = "warnings.emit.txt"
iii = 0
cols = []
tra = []
print('----UNPACK-----------------')
for line in data.iter_lines(decode_unicode=True):
#if iii < 5:
#print('LINE')
#print(line)
# 1. Process First line
temp = line.split(',')
if temp[-1] and self.calltype=='LME':
print(line)
if iii == 0:
tcols = line.split(",")
# add columns for unit id and oris code
tcols.append("unit")
tcols.append("oris")
# add columns for other info (stack height, latitude etc).
# for edata in data2add:
# tcols.append(edata[0])
# 1a write column headers to a file.
verbose = True
if verbose:
with open("headers.txt", "w") as fid:
for val in tcols:
fid.write(val + "\n")
# print('press a key to continue ')
# input()
# 1b check to see if desired emission variable is in the file.
if self.so2name not in tcols:
with open(logfile, "a") as fid:
rstr = "ORIS " + str(self.oris)
rstr += " mid " + str(self.mid) + "\n"
rstr += "NO adjusted SO2 data \n"
if self.so2name not in tcols:
rstr += "NO SO2 data \n"
rstr += "------------------------\n"
fid.write(rstr)
print("--------------------------------------")
print("ORIS " + str(self.oris))
print("UNIT " + str(self.mid) + " no SO2 data")
print(self.fname)
print("--------------------------------------")
# return empty dataframe
return pd.DataFrame()
else:
cols = tcols
print("--------------------------------------")
print("ORIS " + str(self.oris))
print("UNIT " + str(self.mid) + " YES SO2 data")
print(self.fname)
print("--------------------------------------")
# 2. Process rest of lines
else:
lt = line.split(",")
# add input info to line.
lt.append(str(self.mid))
lt.append(str(self.oris))
# for edata in data2add:
# lt.append(edata[1])
tra.append(lt)
iii += 1
# with open(efile, "a") as fid:
# fid.write(line)
# ----------------------------------------------------
df = pd.DataFrame(tra, columns=cols)
df.apply(pd.to_numeric, errors="ignore")
df = self.manage_date(df)
if self.calltype == 'AD':
df['SO2MODC'] = -8
if self.calltype == 'LME':
df['SO2MODC'] = -9
df = self.convert_cols(df)
df = self.manage_so2modc(df)
df = get_so2(df)
# the LME data sometimes has duplicate rows.
# causing emissions to be over-estimated.
if self.calltype == 'LME':
df = df.drop_duplicates()
return df
# ----------------------------------------------------------------------------------------------
def manage_date(self, df):
"""DateHour field is originally in string form 4/1/2016 02:00:00 PM
Here, change to a datetime object.
# also need to change to UTC.
# time is local standard time (never daylight savings)
"""
# Using the %I for the hour field and %p for AM/Pm converts time
# correctly.
def newdate(xxx):
fmt = "%m/%d/%Y %I:%M:%S %p"
try:
rdt = datetime.datetime.strptime(xxx["DateHour"], fmt)
except BaseException:
# print("LINE WITH NO DATE :", xxx["DateHour"], ":")
rdt = pd.NaT
return rdt
df["time local"] = df.apply(newdate, axis=1)
df = df.drop(["DateHour"], axis=1)
return df
def manage_so2modc(self,df):
if "SO2CEMSO2FormulaCode" not in df.columns.values:
return df
def checkmodc(formula, so2modc, so2_lbs):
# if F-23 is the formula code and
# so2modc is Nan then change so2modc to -7.
if not so2_lbs or so2_lbs==0:
return so2modc
if so2modc!=0 or not formula:
return so2modc
else:
if 'F-23' in str(formula):
return -7
else:
return -10
df["SO2MODC"] = df.apply(lambda row:
checkmodc(row["SO2CEMSO2FormulaCode"],
row['SO2MODC'],
row['so2_lbs']),
axis=1)
return df
def convert_cols(self, df):
"""
All columns are read in as strings and must be converted to the
appropriate units. NaNs or empty values may be present in the columns.
OperatingTime : fraction of the clock hour during which the unit
combusted any fuel. If unit, stack or pipe did not
operate report 0.00.
"""
# three different ways to convert columns
# def toint(xxx):
# try:
# rt = int(xxx)
# except BaseException:
# rt = -99
# return rt
def tostr(xxx):
try:
rt = str(xxx)
except BaseException:
rt = "none"
return rt
def simpletofloat(xxx):
try:
rt = float(xxx)
except BaseException:
rt = 0
return rt
# calculate lbs of so2 by multiplying rate by operating time.
# checked this with FACTS
def getmass(optime, cname):
# if operating time is zero then emissions are zero.
if float(optime) < 0.0001:
rval = 0
else:
try:
rval = float(cname) * float(optime)
except BaseException:
rval = np.NaN
return rval
def lme_getmass(cname):
try:
rval = float(cname)
except BaseException:
rval = np.NaN
return rval
df["SO2MODC"] = df["SO2MODC"].map(simpletofloat)
# map OperatingTime to a float
df["OperatingTime"] = df["OperatingTime"].map(simpletofloat)
# map Adjusted Flow to a float
#df["AdjustedFlow"] = df["AdjustedFlow"].map(simpletofloat)
# df["oris"] = df["oris"].map(toint)
df["oris"] = df.apply(lambda row: tostr(row["oris"]), axis=1)
# map SO2 data to a float
# if operating time is zero then map to 0 (it is '' in file)
optime = "OperatingTime"
cname = self.so2name
if self.calltype=='LME':
df["so2_lbs"] = df.apply(lambda row: lme_getmass(row[cname]), axis=1)
else:
df["so2_lbs"] = df.apply(lambda row: getmass(row[optime], row[cname]), axis=1)
temp = df[["time local", "so2_lbs", cname, optime]]
temp = df[df["OperatingTime"] > 1.0]
if not temp.empty:
print("Operating Time greater than 1 ")
print(
temp[
["oris", "unit", "OperatingTime", "time local", "so2_lbs", self.so2name]
]
)
# -------------------------------------------------------------
# these were checks to see what values the fields were holding.
# temp is values that are not valid
# temp = temp[temp["OperatingTime"] > 0]
# print("Values that cannot be converted to float")
# print(temp[cname].unique())
# print("MODC ", temp["SO2MODC"].unique())
# ky = "MATSSstartupshutdownflat"
# if ky in temp.keys():
# print("MATSSstartupshutdownflat", temp["MATSStartupShutdownFlag"].unique())
# print(temp['date'].unique())
# ky = "Operating Time"
# if ky in temp.keys():
# print("Operating Time", temp["OperatingTime"].unique())
# if ky in df.keys():
# print("All op times", df["OperatingTime"].unique())
# for line in temp.iterrows():
# print(line)
# -------------------------------------------------------------
return df
class Emissions:
"""
class that represents data returned by emissions/hourlydata call to the restapi.
Attributes
self.df : DataFrame
Methods
__init__
add
see
https://www.epa.gov/airmarkets/field-audit-checklist-tool-fact-field-references#EMISSION
class that represents data returned by facilities call to the restapi.
# NOTES
# BAF - bias adjustment factor
# MEC - maximum expected concentraiton
# MPF - maximum potential stack gas flow rate
# monitoring plan specified monitor range.
# FlowPMA % of time flow monitoring system available.
# SO2CEMReportedAdjustedSO2 - average adjusted so2 concentration
# SO2CEMReportedSO2MassRate - average adjusted so2 rate (lbs/hr)
# AdjustedFlow - average volumetric flow rate for the hour. adjusted for
# bias.
# It looks like MassRate is calculated from concentration of SO2 and flow
# rate. So flow rate should be rate of all gasses coming out of stack.
"""
def __init__(self):
self.df = pd.DataFrame()
self.orislist = []
self.unithash = {}
# self.so2name = "SO2CEMReportedAdjustedSO2"
self.so2name = "SO2CEMReportedSO2MassRate"
self.so2nameB = "UnadjustedSO2"
def add(self, oris, locationID, year, quarter, method, logfile="warnings.emit.txt",
):
"""
oris : int
locationID : str
year : int
quarter : int
ifile : str
data2add : list of tuples (str, value)
str is name of column. value to add to column.
"""
if oris not in self.orislist:
self.orislist.append(oris)
if oris not in self.unithash.keys():
self.unithash[oris] = []
self.unithash[oris].append(locationID)
with open(logfile, "w") as fid:
dnow = datetime.datetime.now()
fid.write(dnow.strftime("%Y %m %d %H:%M/n"))
# if locationID == None:
# unitra = self.get_units(oris)
# else:
# unitra = [locationID]
if int(quarter) > 4:
print("Warning: quarter greater than 4")
sys.exit()
# for locationID in unitra:
locationID = str(locationID)
#print('call type :', method)
ec = EmissionsCall(oris, locationID, year, quarter, calltype=method)
df = ec.df
# print('EMISSIONS CALL to DF', year, quarter, locationID)
# print(df[0:10])
if self.df.empty:
self.df = df
elif not df.empty:
self.df = self.df.append(df)
# self.df.to_csv(efile)
return ec.status_code
def save(self):
efile = "efile.txt"
self.df.to_csv(efile)
def merge_facilities(self, dfac):
dfnew = pd.merge(
self.df,
dfac,
how="left",
left_on=["oris", "unit"],
right_on=["oris", "unit"],
)
return dfnew
def plot(self):
import matplotlib.pyplot as plt
df = self.df.copy()
temp1 = df[df["date"].dt.year != 1700]
sns.set()
for unit in df["unit"].unique():
temp = temp1[temp1["unit"] == unit]
temp = temp[temp["SO2MODC"].isin(["01", "02", "03", "04"])]
plt.plot(temp["date"], temp["so2_lbs"], label=str(unit))
print("UNIT", str(unit))
print(temp["SO2MODC"].unique())
# for unit in df["unit"].unique():
# temp = temp1[temp1["unit"] == unit]
# temp = temp[temp["SO2MODC"].isin(
# ["01", "02", "03", "04"]) == False]
# plt.plot(temp["date"], temp["so2_lbs"], label="bad " + str(unit))
# print("UNIT", str(unit))
# print(temp["SO2MODC"].unique())
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
for unit in df["unit"].unique():
temp = temp1[temp1["unit"] == unit]
print("BAF", temp["FlowBAF"].unique())
print("MODC", temp["FlowMODC"].unique())
print("PMA", temp["FlowPMA"].unique())
#plt.plot(temp["date"], temp["AdjustedFlow"], label=str(unit))
plt.show()
class MonitoringPlan(EpaApiObject):
"""
Stack height is converted to meters.
Request to get monitoring plans for oris code and locationID.
locationIDs for an oris code can be found in
The monitoring plan has locationAttributes which
include the stackHeight, crossAreaExit, crossAreaFlow.
It also includes monitoringSystems which includes
ststeymTypeDescription (such as So2 Concentration)
QuarterlySummaries gives so2Mass each quarter.
# currently stack height is the only information
# we want to get from monitoring plan
# request string
# date which indicates quarter of request
# oris
# mid
# stack height
------------------------------------------------------------------------------
6.0 Monitoring Method Data March 11, 2015
Environmental Protection Agency Monitoring Plan Reporting Instructions -- Page
37
If a location which has an SO2 monitor combusts both high sulfur fuel (e.g., coal
or oil)
and a low sulfur fuel, and uses a default SO2 emission rate in conjunction with
Equation
F-23 for hours in which very low sulfur fuel is combusted (see ?75.11(e)(1)),
report one
monitor method record for parameter SO2 with a monitoring methodology code
CEMF23. If only low-sulfur fuel is combusted and the F-23 calculation is used
for every
hour, report the SO2 monitoring method as F23
------------------------------------------------------------------------------
"""
def __init__(self, oris, mid, date, fname="Mplans.csv", save=True, prompt=False):
self.oris = oris # oris code of facility
self.mid = mid # monitoring location id.
self.date = date # date
self.dfall = pd.DataFrame()
self.dfmt="%Y-%m-%dT%H:%M:%S"
super().__init__(fname, save, prompt)
def to_dict(self, unit=None):
if self.df.empty:
return None
if unit:
df = self.df[self.df["name"] == unit]
else:
df = self.df.copy()
try:
mhash = df.reset_index().to_dict("records")
except:
mhash = None
return mhash
def get_stackht(self, unit):
#print(self.df)
df = self.df[self.df["name"] == unit]
#print(df)
stackhts = df['stackht'].unique()
#print('get stackht', stackhts)
return stackhts
def get_method(self, unit, daterange):
# TO DO. pick method code based on dates.
temp = self.df[self.df["name"] == unit]
sdate = daterange[0]
edate = daterange[1]
temp = temp[temp["beginDateHour"] <= sdate]
if temp.empty:
return None
temp["testdate"] = temp.apply(
lambda row: test_end(row["endDateHour"], edate), axis=1
)
temp = temp[temp["testdate"] == True]
method = temp['methodCode'].unique()
return method
def load(self):
# Multiple mplans may be saved to the same csv file.
# so this may return an emptly dataframe
# returns empty dataframe and flag to send request.
# return pd.DataFrame(), True
# df = super().load()
chash = {"mid": str, "oris": str, "name": str}
def parsedate(x, sfmt):
if not x:
return pd.NaT
elif x=='None':
return pd.NaT
else:
try:
return pd.to_datetime(x, format=sfmt)
except:
print('time value', x)
return pd.NaT
df = pd.read_csv(self.fname, index_col=[0], converters=chash,
parse_dates=['beginDateHour','endDateHour'],
date_parser=lambda x: parsedate(x, self.dfmt))
self.dfall = df.copy()
df = df[df["oris"] == self.oris]
df = df[df["mid"] == self.mid]
if not df.empty:
self.status_code = 200
return df, True
def save(self):
# do not want to overwrite other mplans in the file.
df = pd.DataFrame()
subset=["oris","name","request_date","methodCode","beginDateHour","endDateHour"]
try:
df, bval = self.load()
except BaseException:
pass
if not self.dfall.empty:
df = pd.concat([self.dfall, self.df], sort=True)
df = df.drop_duplicates(subset=subset)
df.to_csv(self.fname)
elif not self.df.empty:
self.df.to_csv(self.fname)
def create_getstr(self):
oris = self.oris
mid = self.mid
dstr = self.date.strftime("%Y-%m-%d")
mstr = "monitoringplan"
getstr = quote("/".join([mstr, str(oris), str(mid), dstr]))
return getstr
def unpack(self, data):
"""
Returns:
Information for one oris code and monitoring location.
columns
stackname, unit, stackheight, crossAreaExit,
crossAreaFlow, locID, isunit
Example ORIS 1571 unit 2 has no stack heigth.
"""
ihash = data["data"]
ft2m = 0.3048
dlist = []
# The stackname may contain multiple 'units'
stackname = ihash["unitStackName"]
stackhash = {}
shash = {}
# first go through the unitStackConfigurations
# sometimes a unit can have more than one unitStack.
# TODO - not sure how to handle this.
# y2009 3788 oris has this issue but both unit stacks have
# the same stack height so it is not an issue.
# the api seems to do emissions by the stack and not by
# the unit. so this may be a non-issue for api data.
# oris 1305 y2017 has unitStack CP001 and unitID GT1 and GT3.
# height is given for GT1 and GT3 and NOT CP001.
for stackconfig in ihash["unitStackConfigurations"]:
# this maps the unitid to the stack id.
# after reading in the data, go back and assign
# stack height to the unit based on the stackconfig.
if "unitId" in stackconfig.keys():
name = stackconfig["unitId"]
if name in shash.keys():
wstr = "-----------------------------\n"
wstr += "WARNING: unit " + name + "\n"
wstr += " oris; " + self.oris + "\n"
wstr += "has multiple unitStacks \n"
wstr += shash[name] + " " + stackconfig["unitStack"]
wstr += "-----------------------------\n"
print(wstr)
shash[name] = stackconfig["unitStack"]
else:
print("STACKconfig")
print(stackconfig)
# next through the monitoringLocations
for unithash in ihash["monitoringLocations"]:
dhash = {}
name = unithash["name"]
print('NAME ', name)
dhash["name"] = name
if name in shash.keys():
dhash["stackunit"] = shash[name]
else:
dhash["stackunit"] = name
dhash["isunit"] = unithash["isUnit"]
dhash["stackname"] = stackname
for att in unithash["locationAttributes"]:
if "stackHeight" in att.keys():
print("stackheight " + name)
print(att["stackHeight"])
try:
dhash["stackht"] = float(att["stackHeight"]) * ft2m
except:
dhash["stackht"] = np.NaN
else:
dhash["stackht"] = np.NaN
# dhash["crossAreaExit"] = att["crossAreaExit"]
# dhash["crossAreaFlow"] = att["crossAreaFlow"]
# dhash["locID"] = att["locId"]
# dhash["isunit"] = att["isUnit"]
# dlist.append(dhash)
# each monitoringLocation has list of monitoringMethods
iii=0
for method in unithash["monitoringMethods"]:
#print('METHOD LIST', method)
if 'SO2' in method["parameterCode"]:
print('SO2 data')
dhash["parameterCode"] = method["parameterCode"]
dhash["methodCode"] = method["methodCode"]
dhash["beginDateHour"] =\
pd.to_datetime(method["beginDateHour"], format=self.dfmt)
dhash["endDateHour"] = \
pd.to_datetime(method["endDateHour"], format=self.dfmt)
dhash["oris"] = self.oris
dhash["mid"] = self.mid
dhash["request_date"] = self.date
print('Monitoring Location ------------------')
print(dhash)
print('------------------')
dlist.append(copy.deepcopy(dhash))
iii+=1
# if there is no monitoring method for SO2
if iii==0:
dhash["parameterCode"] = 'None'
dhash["methodCode"] = 'None'
dhash["beginDateHour"] = pd.NaT
dhash["endDateHour"] = pd.NaT
dhash["oris"] = self.oris
dhash["mid"] = self.mid
dhash["request_date"] = self.date
print('Monitoring Location ------------------')
print(dhash)
print('------------------')
dlist.append(copy.deepcopy(dhash))
#print(dlist)
df = pd.DataFrame(dlist)
#print('DF1 ------------------')
#print(df[['oris','name','methodCode','beginDateHour']])
nseries = df.set_index("name")
nseries = nseries["stackht"]
nhash = nseries.to_dict()
def find_stackht(name, stackht, shash, nhash):
if pd.isna(stackht):
# this handles case when height is specified for the stackId
# and not the unitId
if name in shash.keys():
sid = shash[name]
stackht = nhash[sid]
# this handles case when height is specified for the unitId
# and not the stackId
else:
ahash = dict((y, x) for x, y in shash.items())
if name in ahash.keys():
sid = ahash[name]
stackht = nhash[sid]
return stackht
df["stackht"] = df.apply(
lambda row: find_stackht(row["name"], row["stackht"], shash, nhash), axis=1
)
df["stackht_unit"] = "m"
print('DF2 ------------------')
print(df)
return df
# Then have list of dicts
# unitOperations
# unitPrograms
# unitFuels
# TO DO need to change this so don't overwrite if more than one fuel.
# for fuel in unit['unitFuels']:
# chash={}
# chash['fuel'] = fuel['fuelCode']
# chash['fuelname'] = fuel['fuelDesc']
# chash['fuelindCode'] = fuel['indCode']
# unitControls
# monitoringMethods
# for method in unit['monitoringMethods']:
# bhash={}
# if method['parameterCode'] == 'SO2':
# bhash['methodCode'] = method['methodCode']
# bhash['subDataCode'] = method['subDataCode']
# mercuryToxicsStandardsMethods
# spanDetails
# systemFlows
# analyzerRanges
# emissionsFormulas
# for method in unit['emissionsFormulas']:
# if method['parameterCode'] == 'SO2':
# bhash['equationCode'] = method['equationCode']
# rectangularDuctWallEffectAdjustmentFactors
# loadlevels (load levels for different date ranges)
# monitoringDefaults
# ******
# monitoringSystems
# some systems may be more accurate than others.
# natural gas plants emissions may have less uncertainty.
# this is complicated because entires for multiple types of equipment.
# monitoringQualifications
# quarterlySummaries
# emissionSummaries
# owners
# qaTestSummaries
# reportingFrequencies
# unitStackConfigurations
# comments
# contacts
# responsibilities
class FacilitiesData(EpaApiObject):
"""
class that represents data returned by facilities call to the restapi.
Attributes:
self.fname : filename for reading and writing df to csv file.
self.df : dataframe
columns are
begin time,
end time,
isunit (boolean),
latitude,
longitude,
facility_name,
oris,
unit
Methods:
__init__
printall : returns a string with the unpacked data.
get : sends request to the restapi and calls unpack.
oris_by_area : returns list of oris codes in an area
get_units : returns a list of units for an oris code
set_filename : set filename to save and load from.
load : load datafraem from csv file
save : save dateframe to csv file
get : request facilities information from api
unpack : process the data sent back from the api
and put relevant info in a dataframe.
"""
def __init__(self, fname="Fac.csv", prompt=False, save=True):
self.plan_hash = {}
super().__init__(fname, save, prompt)
def process_time_fields(self, df):
"""
time fields give year and quarter.
This converts them to a datetime object with
date at beginning of quarter.
"""
def process_unit_time(instr):
instr = str(instr)
# there are many None in end time field.
try:
year = int(instr[0:4])
except:
return None
quarter = int(instr[4])
if quarter == 1:
dt = datetime.datetime(year, 1, 1)
elif quarter == 2:
dt = datetime.datetime(year, 4, 1)
elif quarter == 3:
dt = datetime.datetime(year, 7, 1)
elif quarter == 4:
dt = datetime.datetime(year, 10, 1)
return dt
df["begin time"] = df.apply(
lambda row: process_unit_time(row["begin time"]), axis=1
)
df["end time"] = df.apply(
lambda row: process_unit_time(row["end time"]), axis=1
)
return df
def __str__(self):
cols = self.df.columns
rstr = ", ".join(cols)
return rstr
def create_getstr(self):
"""
used to send the request.
"""
return "facilities"
def oris_by_area(self, llcrnr, urcrnr):
"""
llcrnr : tuple (float,float)
urcrnr : tuple (float,float)
returns list of oris codes in box defined by
llcrnr and urcrnr
"""
dftemp = obs_util.latlonfilter(self.df, llcrnr, urcrnr)
orislist = dftemp["oris"].unique()
return orislist
def state_from_oris(self, orislist):
"""
orislist : list of oris codes
Returns
list of state abbreviations
"""
statelist = []
temp = self.df[self.df["oris"].isin(orislist)]
return temp["state"].unique()
def get_units(self, oris):
"""
oris : int
Returns list of monitoring location ids
for a particular oris code.
"""
oris = str(oris)
# if self.df.empty:
# self.facdata()
temp = self.df[self.df["oris"] == oris]
units = temp["unit"].unique()
return units
def process_unit_time(self, instr):
instr = str(instr)
year = int(instr[0:4])
quarter = int(instr[4])
if quarter == 1:
dt = datetime.datetime(year, 1, 1)
elif quarter == 2:
dt = datetime.datetime(year, 4, 1)
elif quarter == 3:
dt = datetime.datetime(year, 7, 1)
elif quarter == 4:
dt = datetime.datetime(year, 10, 1)
return dt
def get_unit_start(self, oris, unit):
oris = str(oris)
temp = self.df[self.df["oris"] == oris]
temp = temp[temp["unit"] == unit]
start = temp["begin time"].unique()
end = temp["end time"].unique()
sdate = []
for sss in start:
sdate.append(self.process_unit_time(sss))
return sdate
def get_unit_request(self, oris, unit, sdate):
oris = str(oris)
temp = self.df[self.df["oris"] == oris]
temp = temp[temp["unit"] == unit]
temp = self.process_time_fields(temp)
temp = temp[temp["begin time"] <= sdate]
if temp.empty:
return None
temp["testdate"] = temp.apply(
lambda row: test_end(row["end time"], sdate), axis=1
)
print("--------------------------------------------")
print("Monitoring Plans available")
klist = ["testdate", "begin time", "end time", "unit", "oris", "request_string"]
print(temp[klist])
print("--------------------------------------------")
temp = temp[temp["testdate"] == True]
rstr = temp["request_string"].unique()
return rstr
def unpack(self, data):
"""
iterates through a response which contains nested dictionaries and lists.
# facilties 'data' is a list of dictionaries.
# there is one dictionary for each oris code.
# Each dictionary has a list under the key monitoringLocations.
# each monitoryLocation has a name which is what is needed
# for the locationID input into the get_emissions.
return is a dataframe with following fields.
oris
facility name
latitude
longitude
status
begin time
end time
unit id
isunit
"""
# dlist is a list of dictionaries.
dlist = []
# originally just used one dictionary but if doing
# a['dog'] = 1
# dlist.append(a)
# a['dog'] = 2
# dlist.append(a)
# for some reason dlist will then update the dictionary and will get
# dlist = [{'dog': 2}, {'dog':2}] instead of
# dlist = [{'dog': 1}, {'dog':2}]
slist = []
for val in data["data"]:
ahash = {}
ahash["oris"] = str(val["orisCode"])
ahash["facility_name"] = val["name"]
ahash["latitude"] = val["geographicLocation"]["latitude"]
ahash["longitude"] = val["geographicLocation"]["longitude"]
state1 = val["state"]
ahash["state"] = state1["abbrev"]
# ahash['time_offset'] = get_timezone_offset(ahash['latitude'],
# ahash['longitude'])
# keep track of which locations belong to a plan
plan_number = 1
# list 2
for sid in val["units"]:
unithash = {}
unitid = sid["unitId"]
unithash["unit"] = unitid
unithash["oris"] = ahash["oris"]
for gid in sid["generators"]:
capacity = gid["nameplateCapacity"]
# capacity_hash['unitid'] = capacity
unithash["capacity"] = capacity
for gid in sid["fuels"]:
if gid["indicatorDescription"].strip() == "Primary":
fuel = gid["fuelCode"]
# fuel_hash['unitid'] = fuel
unithash["primary_fuel"] = fuel
else:
unithash["primary_fuel"] = np.NaN
for gid in sid["controls"]:
if gid["parameterCode"].strip() == "SO2":
control = gid["controlCode"]
# control_hash['unitid'] = control
unithash["so2_contro"] = control
else:
unithash["so2_contro"] = np.NaN
slist.append(unithash)
unitdf = pd.DataFrame(slist)
for sid in val["monitoringPlans"]:
bhash = {}
# if sid["status"] == "Active":
bhash["plan number"] = plan_number
bhash["status"] = sid["status"]
bhash["begin time"] = str(sid["beginYearQuarter"])
bhash["end time"] = str(sid["endYearQuarter"])
# bhash["state"] = str(sid["abbrev"])
plan_name = []
blist = []
for unit in sid["monitoringLocations"]:
chash = {}
chash["unit"] = unit["name"]
chash["isunit"] = unit["isUnit"]
chash.update(ahash)
chash.update(bhash)
blist.append(chash)
# dlist.append(chash)
plan_name.append(chash["unit"])
plan_name.sort()
request_string = quote(str.join(",", plan_name))
self.plan_hash[plan_number] = request_string
for hhh in blist:
hhh["request_string"] = request_string
plan_number += 1
dlist.extend(blist)
df = pd.DataFrame(dlist)
df = pd.merge(
df, unitdf, how="left", left_on=["unit", "oris"], right_on=["unit", "oris"]
)
return df
def unpack_response(dhash, deep=100, pid=0):
"""
iterates through a response which contains nested dictionaries and lists.
dhash: dictionary which may be nested.
deep: int
indicated how deep to print out nested levels.
pid : int
"""
rstr = ""
for k2 in dhash.keys():
iii = pid
spc = " " * iii
rstr += spc + str(k2) + " " + str(type(dhash[k2])) + " : "
# UNPACK DICTIONARY
if iii < deep and isinstance(dhash[k2], dict):
rstr += "\n"
iii += 1
rstr += spc
rstr += unpack_response(dhash[k2], pid=iii)
rstr += "\n"
# UNPACK LIST
elif isinstance(dhash[k2], list):
iii += 1
rstr += "\n---BEGIN LIST---" + str(iii) + "\n"
for val in dhash[k2]:
if isinstance(val, dict):
rstr += unpack_response(val, deep=deep, pid=iii)
rstr += "\n"
else:
rstr += spc + "listval " + str(val) + str(type(val)) + "\n"
rstr += "---END LIST---" + str(iii) + "\n"
elif isinstance(dhash[k2], str):
rstr += spc + dhash[k2] + "\n"
elif isinstance(dhash[k2], int):
rstr += spc + str(dhash[k2]) + "\n"
elif isinstance(dhash[k2], float):
rstr += spc + str(dhash[k2]) + "\n"
else:
rstr += "\n"
return rstr
def get_monitoring_plan(oris, mid, mrequest, date1, dflist):
"""
oris : oris code
mid : unit id
mrequest : list of strings: request to send
date1 : date to request
dflist : in/out list of tuples [(oris, mid, stackht)]
"""
# adds to list of oris, mid, stackht which will later be turned into
# a dataframe with that information.
status_code = 204
iii = 0
mhash = None
for mr in mrequest:
print("Get Monitoring Plan " + mr)
plan = MonitoringPlan(str(oris), mr, date1)
status_code = plan.status_code
stackht = plan.get_stackht(mid)
if len(stackht) == 1:
print(len(stackht))
stackht = stackht[0]
print(stackht)
# mhash = plan.to_dict(mid)
#if mhash:
# if len(mhash) > 1:
# print(
# "CEMS class WARNING: more than one \
# Monitoring location for this unit\n"
# )
# print(str(oris) + " " + str(mid) + "---")
# for val in mhash.keys():
# print(val, mhash[val])
# sys.exit()
# else:
# mhash = mhash[0]
# stackht = float(mhash["stackht"])
else:
print('Stack height not determined ', stackht)
stackht = None
istr = "\n" + "Could not retrieve stack height from monitoring plan \n"
istr += "Please enter stack height (in meters) \n"
istr += "for oris " + str(oris) + " and unit" + str(mid) + " \n"
test = input(istr)
try:
stackht = float(test)
except:
stackht = None
method = plan.get_method(mid, [date1, date1])
print('METHODS returned', method, mid, str(oris))
# catchall so do not double count.
# currently CEM and CEMF23 result in same EmissionCall request string.
if method:
if 'CEM' in method and 'CEMF23' in method:
method = 'CEM'
dflist.append((str(oris), mid, stackht))
return dflist, method
def add_data_area(rdate, area, verbose=True):
fac = FacilitiesData()
llcrnr = (area[0], area[1])
urcrnr = (area[2], area[3])
orislist = fac.oris_by_area(llcrnr, urcrnr)
return orislist
class CEMS:
"""
Class for data from continuous emission monitoring systems (CEMS).
Data from power plants can be downloaded from
ftp://newftp.epa.gov/DMDNLoad/emissions/
Attributes
----------
efile : type string
Description of attribute `efile`.
url : type string
Description of attribute `url`.
info : type string
Information about data.
df : pandas DataFrame
dataframe containing emissions data.
Methods
----------
__init__(self)
add_data(self, rdate, states=['md'], download=False, verbose=True):
"""
def __init__(self):
self.efile = None
self.lb2kg = 0.453592 # number of kilograms per pound.
self.info = "Data from continuous emission monitoring systems (CEMS)\n"
self.df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2016 <NAME> <<EMAIL>>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
import datetime
from datetime import date
# from memory_profiler import profile
import os.path
from . import stocks_symbol_list as symlist
class cTimeSeriesDatasetSpec:
def __init__(self):
self.mSignalFrame = pd.DataFrame()
self.mExogenousDataFrame = None;
self.mExogenousVariables = None;
self.mHierarchy = None;
def getName(self):
return self.mName;
def getTimeVar(self):
return self.mTimeVar;
def getSignalVar(self):
return self.mSignalVar;
def getDescription(self):
return self.mDescription;
def getPastData(self):
return self.mPastData;
def getFutureData(self):
return self.mFutureData;
def getHorizon(self):
return self.mHorizon;
# @profile
def load_airline_passengers() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "AirLine_Passengers"
tsspec.mDescription = "AirLine Passengers"
trainfile = "https://vincentarelbundock.github.io/Rdatasets/csv/datasets/AirPassengers.csv"
cols = ["ID" , "time", "AirPassengers"];
df_train = pd.read_csv(trainfile, names = cols, sep=r',', index_col='ID', engine='python', skiprows=1);
tsspec.mTimeVar = "time";
tsspec.mSignalVar = "AirPassengers";
tsspec.mHorizon = 12;
tsspec.mPastData = df_train[:-tsspec.mHorizon];
tsspec.mFutureData = df_train.tail(tsspec.mHorizon);
return tsspec
# @profile
def load_cashflows() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "CashFlows"
tsspec.mDescription = "CashFlows dataset"
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/CashFlows.txt"
tsspec.mFullDataset = pd.read_csv(trainfile, sep=r'\t', engine='python');
tsspec.mFullDataset['Date'] = tsspec.mFullDataset['Date'].apply(lambda x : datetime.datetime.strptime(x, "%Y-%m-%d"))
tsspec.mFullDataset = tsspec.mFullDataset.head(251)
tsspec.mTimeVar = "Date";
tsspec.mSignalVar = "Cash";
tsspec.mHorizon = 21;
tsspec.mPastData = tsspec.mFullDataset[:-tsspec.mHorizon];
tsspec.mFutureData = tsspec.mFullDataset.tail(tsspec.mHorizon);
return tsspec
#ozone-la.txt
#https://datamarket.com/data/set/22u8/ozon-concentration-downtown-l-a-1955-1972#
def to_date(idatetime):
d = datetime.date(idatetime.year, idatetime.month, idatetime.day);
return d;
# @profile
def load_ozone() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "Ozone"
tsspec.mDescription = "https://datamarket.com/data/set/22u8/ozon-concentration-downtown-l-a-1955-1972"
#trainfile = "data/ozone-la.csv"
trainfile = "https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/ozone-la.csv"
cols = ["Month" , "Ozone"];
df_train = pd.read_csv(trainfile, names = cols, sep=r',', engine='python', skiprows=1);
df_train['Time'] = df_train['Month'].apply(lambda x : datetime.datetime.strptime(x, "%Y-%m"))
lType = 'datetime64[D]';
# df_train['Time'] = df_train['Time'].apply(to_date).astype(lType);
print(df_train.head());
tsspec.mTimeVar = "Time";
tsspec.mSignalVar = "Ozone";
tsspec.mHorizon = 12;
tsspec.mPastData = df_train[:-tsspec.mHorizon];
tsspec.mFutureData = df_train.tail(tsspec.mHorizon);
return tsspec
# @profile
def load_ozone_exogenous() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "Ozone"
tsspec.mDescription = "https://datamarket.com/data/set/22u8/ozon-concentration-downtown-l-a-1955-1972"
# trainfile = "data/ozone-la-exogenous.csv"
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/ozone-la-exogenous.csv"
# "https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/ozone-la.csv"
cols = ["Date", "Exog2", "Exog3", "Exog4", "Ozone"];
df_train = pd.read_csv(trainfile, names = cols, sep=',', engine='python', skiprows=1);
df_train['Time'] = df_train['Date'].apply(lambda x : datetime.datetime.strptime(x, "%Y-%m"))
tsspec.mTimeVar = "Time";
tsspec.mSignalVar = "Ozone";
tsspec.mExogenousVariables = ["Exog2", "Exog3", "Exog4"];
# this is the full dataset . must contain future exogenius data
tsspec.mExogenousDataFrame = df_train;
# tsspec.mExogenousVariables = ["Exog2"];
tsspec.mHorizon = 12;
tsspec.mPastData = df_train[:-tsspec.mHorizon];
tsspec.mFutureData = df_train.tail(tsspec.mHorizon);
print(df_train.head())
return tsspec
def load_ozone_exogenous_categorical() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "Ozone"
tsspec.mDescription = "https://datamarket.com/data/set/22u8/ozon-concentration-downtown-l-a-1955-1972"
# trainfile = "data/ozone-la-exogenous.csv"
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/ozone-la-exogenous.csv"
# "https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/ozone-la.csv"
cols = ["Date", "Exog2", "Exog3", "Exog4", "Ozone"];
df_train = pd.read_csv(trainfile, names = cols, sep=r',', engine='python', skiprows=1);
df_train['Time'] = df_train['Date'].apply(lambda x : datetime.datetime.strptime(x, "%Y-%m"))
for col in ["Exog2", "Exog3", "Exog4"]:
categs = sorted(df_train[col].unique())
cat_type = pd.api.types.CategoricalDtype(categories=categs, ordered=True)
df_train[col] = df_train[col].astype(cat_type)
ozone_shifted_2 = df_train.shift(2)
ozone_shifted_1 = df_train.shift(1)
lSig1 = df_train['Ozone'] * (ozone_shifted_2['Exog3'] == 'AW') + df_train['Ozone'] * (ozone_shifted_1['Exog3'] == 'AX')
lSig2 = df_train['Ozone'] * (ozone_shifted_1['Exog2'] >= 4)
lSig3 = df_train['Ozone'] * (ozone_shifted_1['Exog4'] <= 'P_S')
df_train['Ozone2'] = lSig1 + lSig2 + lSig3
tsspec.mTimeVar = "Time";
tsspec.mSignalVar = "Ozone2";
tsspec.mExogenousVariables = ["Exog2", "Exog3", "Exog4"];
# this is the full dataset . must contain future exogenius data
tsspec.mExogenousDataFrame = df_train;
# tsspec.mExogenousVariables = ["Exog2"];
tsspec.mHorizon = 12;
tsspec.mPastData = df_train[:-tsspec.mHorizon];
tsspec.mFutureData = df_train.tail(tsspec.mHorizon);
print(df_train.head())
return tsspec
def add_some_noise(x , p , min_sig, max_sig, e , f):
if(max_sig > min_sig):
delta = (x - min_sig) / (max_sig - min_sig);
if( (delta >= e) and (delta <= f) ):
if(np.random.random() < p):
return "A";
return "0";
def gen_trend(N , trendtype):
lTrend = pd.Series(dtype='float64');
a = (2 * np.random.random() - 1);
b = (2 * np.random.random() - 1);
c = (2 * np.random.random() - 1);
print("TREND" , a , b ,c);
lTrend = 0
if(trendtype == "ConstantTrend"):
lTrend = a
elif(trendtype == "LinearTrend"):
x = np.arange(0,N) / N ;
lTrend = a * x + b;
elif(trendtype == "PolyTrend"):
x = np.arange(0,N) / N;
lTrend = a * x * x + b * x + c;
# lTrend.plot();
return lTrend;
def gen_cycle(N , cycle_length):
lCycle = pd.Series(dtype='float64');
if(cycle_length > 0):
lCycle = np.arange(0,N) % cycle_length;
lValues = np.random.randint(0, cycle_length, size=(cycle_length, 1)) /cycle_length;
lCycle = pd.Series(lCycle).apply(lambda x : lValues[int(x)][0]);
if(cycle_length == 0):
lCycle = 0;
return lCycle;
def gen_ar(N , ar_order):
lAR = pd.Series(dtype='float64');
if(ar_order > 0):
lSig = pd.Series(np.arange(0, N) / N);
lAR = 0;
a_p = 1;
for p in range(1 , ar_order+1):
a_p = a_p * np.random.uniform();
lAR = lSig.shift(p).fillna(0) * a_p + lAR;
if(ar_order == 0):
lAR = 0;
return lAR;
def apply_old_transform(signal , transform):
transformed = None
if(transform == "exp"):
transformed = np.exp(-signal)
if(transform == "log"):
transformed = np.log(signal)
if(transform == "sqrt"):
transformed = np.sqrt(signal)
if(transform == "sqr"):
transformed = np.power(signal , 2)
if(transform == "pow3"):
transformed = np.power(signal , 3)
if(transform == "inv"):
transformed = 1.0 / (signal)
if(transform == "diff"):
transformed = signal - signal.shift(1).fillna(0.0);
if(transform == "cumsum"):
transformed = signal.cumsum();
return transformed
def apply_transform(signal , transform):
import pyaf.TS.Signal_Transformation as tstransf
arg = None
if(transform == "Quantization"):
arg = 10
if(transform == "BoxCox"):
arg = 0
tr = tstransf.create_tranformation(transform , arg)
transformed = None
if(tr is None):
transformed = apply_old_transform(signal, transform)
else :
tr.fit(signal)
transformed = tr.invert(signal)
# print(signal.head())
# print(transformed.head())
return transformed
def generate_random_TS(N , FREQ, seed, trendtype, cycle_length, transform, sigma = 1.0, exog_count = 20, ar_order = 0) :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "Signal_" + str(N) + "_" + str(FREQ) + "_" + str(seed) + "_" + str(trendtype) + "_" + str(cycle_length) + "_" + str(transform) + "_" + str(sigma) + "_" + str(exog_count) ;
print("GENERATING_RANDOM_DATASET" , tsspec.mName);
tsspec.mDescription = "Random generated dataset";
np.random.seed(seed);
df_train = pd.DataFrame();
#df_train['Date'] = np.arange(0,N)
'''
http://pandas.pydata.org/pandas-docs/stable/timeseries.html
DateOffset objects
In the preceding examples, we created DatetimeIndex objects at various frequencies by passing in frequency strings
like "M", "W", and "BM" to the freq keyword. Under the hood, these frequency strings are being translated into an
instance of pandas DateOffset, which represents a regular frequency increment.
Specific offset logic like "month", "business day", or "one hour" is represented in its various subclasses.
'''
df_train['Date'] = pd.date_range('2000-1-1', periods=N, freq=FREQ)
df_train['GeneratedTrend'] = gen_trend(N , trendtype);
df_train['GeneratedCycle'] = gen_cycle(N , cycle_length);
df_train['GeneratedAR'] = gen_ar(N , ar_order);
df_train['Noise'] = np.random.randn(N, 1) * sigma;
df_train['Signal'] = 100 * df_train['GeneratedTrend'] + 10 * df_train['GeneratedCycle'] + 1 * df_train['GeneratedAR'] + df_train['Noise']
min_sig = df_train['Signal'].min();
max_sig = df_train['Signal'].max();
# print(df_train.info())
tsspec.mExogenousVariables = [];
tsspec.mExogenousDataFrame = pd.DataFrame();
tsspec.mExogenousDataFrame['Date'] = df_train['Date']
for e in range(exog_count):
label = "exog_" + str(e+1);
tsspec.mExogenousDataFrame[label] = df_train['Signal'].apply(
lambda x : add_some_noise(x , 0.1 ,
min_sig,
max_sig,
e/exog_count ,
(e+3)/exog_count ));
tsspec.mExogenousVariables = tsspec.mExogenousVariables + [ label ];
# print(tsspec.mExogenousDataFrame.info())
# this is the full dataset . must contain future exogenius data
pos_signal = df_train['Signal'] - min_sig + 1.0;
df_train['Signal'] = apply_transform(pos_signal , transform)
# df_train.to_csv(tsspec.mName + ".csv");
tsspec.mTimeVar = "Date";
tsspec.mSignalVar = "Signal";
lHorizon = min(12, max(1, N // 30));
tsspec.mHorizon = {}
tsspec.mHorizon[tsspec.mSignalVar] = lHorizon
tsspec.mHorizon[tsspec.mName] = lHorizon
tsspec.mFullDataset = df_train;
tsspec.mFullDataset[tsspec.mName] = tsspec.mFullDataset['Signal'];
tsspec.mPastData = df_train[:-lHorizon];
tsspec.mFutureData = df_train.tail(lHorizon);
return tsspec
# @profile
def load_NN5():
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "NN5";
tsspec.mDescription = "NN5 competition final dataset";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/NN5-Final-Dataset.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mFullDataset['Day'] = tsspec.mFullDataset['Day'].apply(lambda x : datetime.datetime.strptime(x, "%d-%b-%y"))
# tsspec.mFullDataset = tsspec.mFullDataset
# tsspec.mFullDataset.fillna(method = "ffill", inplace = True);
tsspec.mHorizon = {};
for sig in tsspec.mFullDataset.columns:
tsspec.mHorizon[sig] = 56
# df_test = tsspec.mFullDataset.tail(tsspec.mHorizon);
df_train = tsspec.mFullDataset;
tsspec.mTimeVar = "Day";
tsspec.mSignalVar = "Signal";
# tsspec.mPastData = df_train[:-tsspec.mHorizon];
# tsspec.mFutureData = df_train.tail(tsspec.mHorizon);
return tsspec
# @profile
def load_NN3_part1():
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "NN3_Part1";
tsspec.mDescription = "NN3 competition final dataset - part 1";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/NN3-Final-Dataset-part1.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mFullDataset['Date'] = np.arange(0, tsspec.mFullDataset.shape[0])
# tsspec.mFullDataset.fillna(method = "ffill", inplace = True);
tsspec.mHorizon = {};
for sig in tsspec.mFullDataset.columns:
tsspec.mHorizon[sig] = 18
#df_test = tsspec.mFullDataset.tail(tsspec.mHorizon);
df_train = tsspec.mFullDataset;
#.head(tsspec.mFullDataset.shape[0] - tsspec.mHorizon);
tsspec.mTimeVar = "Date";
tsspec.mSignalVar = "Signal";
return tsspec;
# @profile
def load_NN3_part2():
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "NN3_Part2";
tsspec.mDescription = "NN3 competition final dataset - part 2";
trainfile = "data/NN3-Final-Dataset-part2.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mFullDataset['Date'] = np.arange(0, tsspec.mFullDataset.shape[0])
# tsspec.mFullDataset.fillna(method = "ffill", inplace = True);
#df_test = tsspec.mFullDataset.tail(tsspec.mHorizon);
df_train = tsspec.mFullDataset
tsspec.mHorizon = {};
for sig in tsspec.mFullDataset.columns:
tsspec.mHorizon[sig] = 18
#.head(tsspec.mFullDataset.shape[0] - tsspec.mHorizon);
tsspec.mTimeVar = "Date";
tsspec.mSignalVar = "Signal";
return tsspec;
# @profile
def load_MWH_dataset(name):
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "MWH " + name;
tsspec.mDescription = "MWH dataset ... " + name;
lSignal = name;
lTime = 'Time';
print("LAODING_MWH_DATASET" , name);
trainfile = "https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/FMA/" + name +".csv";
# trainfile = "data/FMA/" + name + ".csv"
df_train = pd.read_csv(trainfile, sep=r',', header=None, engine='python', skipinitialspace=True);
# print(df_train.head(3));
type1 = np.dtype(df_train[df_train.columns[0]])
if(type1.kind == 'O'):
# there is (probably) a header, re-read the csv file
df_train = pd.read_csv(trainfile, sep=r',', header=0, engine='python', skipinitialspace=True);
if(df_train.shape[1] == 1):
# add dome fake date column
df_train2 = pd.DataFrame();
df_train2[lTime] = range(0, df_train.shape[0]);
df_train2[lSignal] = df_train[df_train.columns[0]];
df_train = df_train2.copy();
# keep only the first two columns (as date and signal)
df_train = df_train[[df_train.columns[0] , df_train.columns[1]]].dropna();
# rename the first two columns (as date and signal)
df_train.columns = [lTime , lSignal];
# print("MWH_SIGNAL_DTYPE", df_train[lSignal].dtype)
# print(df_train.head())
# df_train.to_csv("mwh-" + name + ".csv");
# print(df_train.info())
if(df_train[lSignal].dtype == np.object):
df_train[lSignal] = df_train[lSignal].astype(np.float64); ## apply(lambda x : float(str(x).replace(" ", "")));
# df_train[lSignal] = df_train[lSignal].apply(float)
tsspec.mFullDataset = df_train;
# print(tsspec.mFullDataset.info())
tsspec.mTimeVar = lTime;
tsspec.mSignalVar = lSignal;
tsspec.mHorizon = {};
lHorizon = 1
tsspec.mHorizon[lSignal] = lHorizon
tsspec.mPastData = df_train[:-lHorizon];
tsspec.mFutureData = df_train.tail(lHorizon);
return tsspec
# @profile
def load_M1_comp() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "M1_COMP";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/M1.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mHorizons = tsspec.mFullDataset[['NF' , 'Series']].copy();
tsspec.mHorizons['Series'] = tsspec.mHorizons['Series'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Series']
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Index'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset.drop(['Series', 'N Obs', 'Seasonality', 'NF', 'Type', 'Starting date', 'Category'], axis=1, inplace=True);
tsspec.mFullDataset.set_index(['Index'], inplace=True)
tsspec.mFullDataset = tsspec.mFullDataset.T
tsspec.mFullDataset.reindex()
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = 'Date';
tsspec.mHorizon = {};
for i in range(0, tsspec.mHorizons.shape[0]):
tsspec.mHorizon[tsspec.mHorizons['Series'][i]] = tsspec.mHorizons['NF'][i];
del tsspec.mHorizons;
return tsspec
# @profile
def load_M2_comp() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "M2_COMP";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/M2.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mHorizons = tsspec.mFullDataset[['NF' , 'Series']].copy();
tsspec.mHorizons['Series'] = tsspec.mHorizons['Series'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Series']
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Index'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset.drop(['Series', 'N', 'Seasonality', 'Starting Date', 'Ending Date'], axis=1, inplace=True);
tsspec.mFullDataset.set_index(['Index'], inplace=True)
tsspec.mFullDataset = tsspec.mFullDataset.T
tsspec.mFullDataset.reindex()
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = 'Date';
tsspec.mHorizon = {};
for i in range(0, tsspec.mHorizons.shape[0]):
tsspec.mHorizon[tsspec.mHorizons['Series'][i]] = tsspec.mHorizons['NF'][i];
#del tsspec.mHorizons;
return tsspec
# @profile
def load_M3_Y_comp() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "M3_Y_COMP";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/M3_Yearly.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mHorizons = tsspec.mFullDataset[['NF' , 'Series']].copy();
tsspec.mHorizons['Series'] = tsspec.mHorizons['Series'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Series']
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Index'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset.drop(['Series', 'N', 'NF', 'Starting Year' , 'Category' , 'Unnamed: 5'], axis=1, inplace=True);
tsspec.mFullDataset.set_index(['Index'], inplace=True)
tsspec.mFullDataset = tsspec.mFullDataset.T
tsspec.mFullDataset.reindex()
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = 'Date';
tsspec.mHorizon = {};
for i in range(0, tsspec.mHorizons.shape[0]):
tsspec.mHorizon[tsspec.mHorizons['Series'][i]] = tsspec.mHorizons['NF'][i];
#del tsspec.mHorizons;
return tsspec
# @profile
def load_M3_Q_comp() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "M3_Q_COMP";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/M3_Quarterly.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mHorizons = tsspec.mFullDataset[['NF' , 'Series']].copy();
tsspec.mHorizons['Series'] = tsspec.mHorizons['Series'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Series']
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Index'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset.drop(['Series', 'N', 'NF', 'Starting Year' , 'Category' , 'Starting Quarter'], axis=1, inplace=True);
tsspec.mFullDataset.set_index(['Index'], inplace=True)
tsspec.mFullDataset = tsspec.mFullDataset.T
tsspec.mFullDataset.reindex()
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = 'Date';
tsspec.mHorizon = {};
for i in range(0, tsspec.mHorizons.shape[0]):
tsspec.mHorizon[tsspec.mHorizons['Series'][i]] = tsspec.mHorizons['NF'][i];
#del tsspec.mHorizons;
return tsspec
# @profile
def load_M3_M_comp() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "M3_M_COMP";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/M3_Monthly.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mHorizons = tsspec.mFullDataset[['NF' , 'Series']].copy();
tsspec.mHorizons['Series'] = tsspec.mHorizons['Series'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Series']
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Index'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset.drop(['Series', 'N', 'NF', 'Starting Year' , 'Category' , 'Starting Month'], axis=1, inplace=True);
tsspec.mFullDataset.set_index(['Index'], inplace=True)
tsspec.mFullDataset = tsspec.mFullDataset.T
tsspec.mFullDataset.reindex()
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = 'Date';
tsspec.mHorizon = {};
for i in range(0, tsspec.mHorizons.shape[0]):
tsspec.mHorizon[tsspec.mHorizons['Series'][i]] = tsspec.mHorizons['NF'][i];
#del tsspec.mHorizons;
return tsspec
# @profile
def load_M3_Other_comp() :
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "M3_Other_COMP";
trainfile = "https://raw.githubusercontent.com/antoinecarme/pyaf/master/data/M3_Other.csv"
tsspec.mFullDataset = pd.read_csv(trainfile, sep='\t', header=0, engine='python');
tsspec.mHorizons = tsspec.mFullDataset[['NF' , 'Series']].copy();
tsspec.mHorizons['Series'] = tsspec.mHorizons['Series'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Series']
tsspec.mFullDataset['Index'] = tsspec.mFullDataset['Index'].apply(lambda x : x.replace(" ", ""))
tsspec.mFullDataset.drop(['Series', 'N', 'NF', 'Category', 'Unnamed: 4', 'Unnamed: 5'], axis=1, inplace=True);
tsspec.mFullDataset.set_index(['Index'], inplace=True)
tsspec.mFullDataset = tsspec.mFullDataset.T
tsspec.mFullDataset.reindex()
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = 'Date';
tsspec.mHorizon = {};
for i in range(0, tsspec.mHorizons.shape[0]):
tsspec.mHorizon[tsspec.mHorizons['Series'][i]] = tsspec.mHorizons['NF'][i];
#del tsspec.mHorizons;
return tsspec
# @profile
def load_M4_comp(iType = None) :
"""
generated by script data/m4comp.R using the excellent M4Comp R package.
"""
tsspecs = {};
trainfile = "https://github.com/antoinecarme/pyaf/blob/master/data/M4Comp/M4Comp_" + iType + ".csv.gz?raw=true"
# trainfile = "data/M4Comp/M4Comp_" + iType + ".csv.gz"
df_full = pd.read_csv(trainfile, sep=',', header=0, engine='python', compression='gzip');
lHorizons = df_full[['H' , 'ID']].copy();
lHorizons['ID'] = lHorizons['ID'].apply(lambda x : x.replace(" ", ""));
lHorizons['H'] = lHorizons['H'].apply(lambda x : int(x))
for i in range(0, df_full.shape[0]):
tsspec = cTimeSeriesDatasetSpec();
series_name = lHorizons['ID'][i]
tsspec.mName = series_name;
if(((i+1) % 500) == 0):
print("loading ", i+1 , "/" , df_full.shape[0] , series_name);
tsspec.mPastData = pd.Series(df_full['PAST'][i].split(",")).apply(float);
tsspec.mFutureData = pd.Series(df_full['FUTURE'][i].split(",")).apply(float);
tsspec.mFullDataset = pd.DataFrame();
tsspec.mFullDataset[series_name] = tsspec.mPastData.append(tsspec.mFutureData).reindex();
tsspec.mFullDataset['Date'] = range(0 , tsspec.mFullDataset.shape[0])
tsspec.mTimeVar = "Date";
tsspec.mSignalVar = series_name;
tsspec.mFullDataset.reindex()
tsspec.mHorizon = {};
tsspec.mHorizon[series_name] = lHorizons['H'][i];
tsspec.mCategory = "M4Comp";
tsspecs[tsspec.mName] = tsspec;
return tsspecs
def get_stock_web_link():
YAHOO_LINKS_DATA = {}
lines = []
with open('data/yahoo_list.txt') as data_file:
lines = data_file.readlines()
lines = [line.rstrip('\n') for line in lines]
import re
for line in lines:
csv = line.replace('.csv', '')
csv = re.sub(r"^(.*)yahoo_", "", csv);
# print("YAHOO_LINKS_DATA" , csv, line)
YAHOO_LINKS_DATA[csv] = line;
print("ACQUIRED_YAHOO_LINKS" , len(YAHOO_LINKS_DATA));
return YAHOO_LINKS_DATA;
def load_yahoo_stock_price( stock , iLocal = True, YAHOO_LINKS_DATA = None) :
filename = YAHOO_LINKS_DATA.get(stock);
if(filename is None):
raise Exception("MISSING " + stock)
# print("YAHOO_DATA_LINK" , stock, filename);
tsspec = cTimeSeriesDatasetSpec();
tsspec.mName = "Yahoo_Stock_Price_" + stock
tsspec.mDescription = "Yahoo Stock Price using yahoo-finance package"
df_train = pd.DataFrame();
if(iLocal):
filename = "data/yahoo/" + filename
else:
base_uri = "https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/YahooFinance/";
filename = base_uri + filename;
print("YAHOO_DATA_LINK_URI" , stock, filename);
if(os.path.isfile(filename)):
# print("already downloaded " + stock , "reloading " , filename);
df_train = | pd.read_csv(filename) | pandas.read_csv |
# MIT License
#
# Copyright (c) 2020-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
File: load_all_data.py
Author: <NAME>
Date: 2020
This file contains the necessary functions to load and process all of the CSV
data files.
'''
import pandas as pd
import numpy as np
import os
try:
import cathode.constants as cc
except ImportError:
### Ad-hoc solution if we don't have the cathode package
### Just define the constants...
class cc:
class M:
Ar = 39.948
Xe = 131.293
Hg = 200.59
atomic_mass = 1.66053904e-27
Boltzmann = 1.38064852e-23
e = 1.6021766208e-19
kB = 1.38064852e-23
mu0 = 4 * np.pi * 1e-6
sccm2eqA = 0.07174496294893724
Torr = 133.32236842105263
from import_db import dtypes,from_np_array,fileDictionary,cathodeList
def special_cases(df,cat):
'''
Deals with special cases on cathode-by-cathode basis.
'''
if cat == 'NEXIS':
### Fill two pressures
# 1. For 25 A, 5.5 sccm, we can arguably get the average pressure from
# the two closest cases (24 A and 26 A)
# /!\ This is an estimate to get an idea of the total pressure /!\
# It allows to plot a variety of things vs. pressure-diameter
# bcond: the location where we should put new data
bcond = (df.cathode == cat)
bcond &= (df.massFlowRate_sccm == 5.5)
bcond &= (df.orificeDiameter == 3.0)
bcond &= (df.dischargeCurrent == 25.0)
# datacond: the location we use as the data source
datacond = (df.cathode == cat)
datacond &= (df.massFlowRate_sccm == 5.5)
datacond &= (df.orificeDiameter == 2.5)
tdf = df[datacond]
ddf = tdf[np.isclose(tdf.dischargeCurrent,25,atol=1)]
averagePressure = np.nanmean(ddf.totalPressure)
df.loc[df[bcond].index,'totalPressure'] = averagePressure
# 2. For 25 A, 10 sccm, use the closest case (22 A, 10 sccm)
# /!\ This is an estimate to get an idea of the total pressure /!\
# It allows to plot a variety of things vs. pressure-diameter
bcond = df.cathode == cat
bcond &= (df.massFlowRate_sccm == 10.0)
bcond &= (df.orificeDiameter == 2.75)
bcond &= (df.dischargeCurrent == 25.0)
datacond = (df.cathode == cat)
datacond &= (df.massFlowRate_sccm == 10.0)
datacond &= (df.orificeDiameter == 2.5)
datacond &= (df.dischargeCurrent == 22.0)
averagePressure = df.at[df[datacond].index[0],'totalPressure']
df.loc[df[bcond].index,'totalPressure'] = averagePressure
elif (cat == 'JPL-1.5cm-3mm') or (cat == 'JPL-1.5cm-5mm'):
### The case 13.1 sccm, 25 A is pretty much the same as 13.0 sccm, 25 A
### Same with 13.1 sccm, 25.1 A is pretty much the same as 13.0 sccm, 25 A
### Use that value for the total pressure is pretty much the same as 13.0 sccm, 25 A
bcond = df.cathode == cat
bcond &= (df.massFlowRate_sccm == 13.1)
bcond &= ((df.dischargeCurrent == 25.0) | (df.dischargeCurrent == 25.1))
datacond = (df.cathode == cat)
datacond &= (df.massFlowRate_sccm == 13.0)
datacond &= (df.dischargeCurrent == 25.0)
averagePressure = df.at[df[datacond].index[0],'totalPressure']
df.loc[df[bcond].index,'totalPressure'] = averagePressure
elif (cat == 'AR3') or (cat == 'EK6'):
### Consider the emitter temperature to be that of the orifice
bcond = df.cathode == cat
df.loc[df[bcond].index,'insertTemperatureAverage'] = \
df.loc[df[bcond].index,'orificeTemperature']
return df
def generate_dataframe():
'''
Loads the CSV data for all the cathodes specified by name in cathodeList.
Stores the corresponding results in a dataframe.
Inputs: None
Outputs:
- df: the filled dataframe
'''
### Pandas representation of the list of files to load
df_Files = pd.DataFrame(fileDictionary,
columns = np.sort(list(fileDictionary)),
index = cathodeList)
### Pandas representation of the data that will be loaded
# Empty dataframe
df = np.empty(0, dtype=dtypes)
df = pd.DataFrame(df)
### Iterate through all the pressure files
root_folder = os.path.dirname(__file__)
root_folder = os.path.join(root_folder,'..')
for index, row in df_Files.iterrows():
if pd.isnull(row['folder']):
continue
else:
### Load data
df = load_single_cathode(df,row)
### Fill further geometry info
Lemitter, Lupstream = row['additionalGeometry']
df.loc[(df.cathode==row['cathode']),'insertLength'] = Lemitter
df.loc[(df.cathode==row['cathode']),'upstreamPressurePoint'] = Lupstream
df.loc[(df.cathode==row['cathode']),'reference'] = row['reference']
df.loc[(df.cathode==row['cathode']),'note'] = row['note']
df = populate_gas_info(df)
df = split_by_name(df)
df = calculate_electron_density_average(df)
df = convert_flow_rates(df)
### Deal with any special cases
for cat in cathodeList:
df = special_cases(df,cat)
return df
def load_pressure_data(df,datafile,cathode):
df_from_csv = | pd.read_csv(datafile,comment='#',delimiter=',') | pandas.read_csv |
import os
import pandas as pd
import requests
import mixpanel as mp
MIXPANEL_API_KEY = os.environ.get('MIXPANEL_API_KEY')
MIXPANEL_API_SECRET = os.environ.get('MIXPANEL_API_SECRET')
keys = (MIXPANEL_API_KEY, MIXPANEL_API_SECRET)
DATA_LOCATION = './data/ppe-responses.csv'
HOSPITALS_LOCATION = './data/hospital_locations.csv'
FINAL_LOCATION = './data/ppe-merged-responses.csv'
START_DATE = '22 April, 2020'
def main():
remote_df = mp.read_events(
keys,
events='ppe-survey-1',
start=START_DATE,
exclude_mp=False,
columns=['time', 'mp_country_code', 'hospital', 'sufficient-supply', 'distinct_id']
)
remote_df.to_csv(DATA_LOCATION)
local_df = pd.read_csv(DATA_LOCATION,
index_col='distinct_id',
parse_dates=['time']
)
gb_df = local_df[local_df["mp_country_code"] == 'GB'][6:]
gb_df['year'] = pd.DatetimeIndex(gb_df['time']).year
gb_df['month'] = pd.DatetimeIndex(gb_df['time']).month
gb_df['day'] = | pd.DatetimeIndex(gb_df['time']) | pandas.DatetimeIndex |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Identically as how we did with the training data set, we randomly divided the test files into different
folders, then we generated different data frames and stored all of them in one single hdf file as our
validation daata set
"""
#%%
os.chdir('./test')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf_test = pd.HDFStore('/home/ubuntu/data/jiahao/files/test.hdf5', mode='w')
#%%
hdf_test.put(value=merged_df, key="df")
#%%
os.chdir('./test_1')
mzid_files_1 = glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_1, key="df1")
#%%
os.chdir('./test_2')
mzid_files_2 = glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_2 = pd.merge(mzid_df_2,spectra_df_2,how='left',on=['file','id'])
merged_df_2 = merged_df_2[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_2, key="df2")
#%%
os.chdir('./test_4')
mzid_files_4 = glob.glob('*.mzid')
indexed_mzid_4 = mzid.chain.from_iterable(mzid_files_4,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_4 = []
for entry in(indexed_mzid_4):
all_mzid_4.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_4)
mzid_df_4 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_4 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_4.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_4)
spectra_df_4 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_4 = pd.merge(mzid_df_4,spectra_df_4,how='left',on=['file','id'])
merged_df_4 = merged_df_4[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_4, key="df4")
#%%
os.chdir('./test_5')
mzid_files_5 = glob.glob('*.mzid')
indexed_mzid_5 = mzid.chain.from_iterable(mzid_files_5,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_5 = []
for entry in(indexed_mzid_5):
all_mzid_5.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_5)
mzid_df_5 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_5 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_5.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_5)
spectra_df_5 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_5 = pd.merge(mzid_df_5,spectra_df_5,how='left',on=['file','id'])
merged_df_5 = merged_df_5[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_5, key="df5")
#%%
os.chdir('./test_6')
mzid_files_6 = glob.glob('*.mzid')
indexed_mzid_6 = mzid.chain.from_iterable(mzid_files_6,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_6 = []
for entry in(indexed_mzid_6):
all_mzid_6.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_6)
mzid_df_6 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_6 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_6.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_6)
spectra_df_6 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_6 = pd.merge(mzid_df_6,spectra_df_6,how='left',on=['file','id'])
merged_df_6 = merged_df_6[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_6, key="df6")
# %%
os.chdir('./test_7')
mzid_files_7 = glob.glob('*.mzid')
indexed_mzid_7 = mzid.chain.from_iterable(mzid_files_7,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_7 = []
for entry in(indexed_mzid_7):
all_mzid_7.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_7)
mzid_df_7 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_7 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_7)
spectra_df_7 = | pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities}) | pandas.DataFrame |
import glob, pandas as pd, time, datetime
| pd.set_option('mode.chained_assignment', None) | pandas.set_option |
"""
data_curation_functions.py
Extract Kevin's functions for curation of public datasets
Modify them to match Jonathan's curation methods in notebook
01/30/2020
"""
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
import seaborn as sns
import pdb
from atomsci.ddm.utils.struct_utils import base_smiles_from_smiles
import atomsci.ddm.utils.datastore_functions as dsf
#from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.utils import curate_data as curate
import atomsci.ddm.utils.struct_utils as struct_utils
import atomsci.ddm.utils.curate_data as curate_data, imp
def set_data_root(dir):
global data_root, data_dirs
data_root = dir
#data_dirs = dict(ChEMBL = '%s/ChEMBL' % data_root, DTC = '%s/DTC' % data_root,
# Excape = '%s/Excape' % data_root)
data_dirs = dict(DTC = '%s/dtc' % data_root,
Excape = '%s/excape' % data_root)
log_var_map = {
'IC50': 'pIC50',
'AC50': 'pIC50',
'Solubility': 'logSolubility',
'CL': 'logCL'
}
pub_dsets = dict(
CYP2D6 = dict(IC50='cyp2d6'),
CYP3A4 = dict(IC50='cyp3a4'),
JAK1 = dict(IC50="jak1"),
JAK2 = dict(IC50="jak2"),
JAK3 = dict(IC50="jak3"),
)
# ----------------------------------------------------------------------------------------------------------------------
# Generic functions for all datasets
# ----------------------------------------------------------------------------------------------------------------------
# Note: Functions freq_table and labeled_freq_table have been moved to ddm.utils.curate_data module.
# ----------------------------------------------------------------------------------------------------------------------
def standardize_relations(dset_df, db='DTC'):
"""
Standardize the censoring operators to =, < or >, and remove any rows whose operators
don't map to a standard one.
"""
relation_cols = dict(ChEMBL='Standard Relation', DTC='standard_relation')
rel_col = relation_cols[db]
dset_df[rel_col].fillna('=', inplace=True)
ops = dset_df[rel_col].values
if db == 'ChEMBL':
# Remove annoying quotes around operators
ops = [op.lstrip("'").rstrip("'") for op in ops]
op_dict = {
">": ">",
">=": ">",
"<": "<",
"<=": "<",
"=": "="
}
ops = np.array([op_dict.get(op, "@") for op in ops])
dset_df[rel_col] = ops
dset_df = dset_df[dset_df[rel_col] != "@"]
return dset_df
# ----------------------------------------------------------------------------------------------------------------------
# DTC-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
"""
Upload a raw dataset to the datastore from the given data frame.
Returns the datastore OID of the uploaded dataset.
"""
def upload_file_dtc_raw_data(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category':assay_category,
'assay_endpoint' : 'multiple values',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id'
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
# description=description,
# tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
# override_check=True, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
'''
# ----------------------------------------------------------------------------------------------------------------------
def get_dtc_jak_smiles():
"""
Use PubChem REST API to download SMILES strings for InChi strings in DTC JAK123 data table
"""
jak_file = "%s/jak123_dtc.csv" % data_dirs['DTC']
dset_df = pd.read_csv(jak_file, index_col=False)
jak_dtc_df = jak_dtc_df[~jak_dtc_df.standard_inchi_key.isna()]
inchi_keys = sorted(set(jak_dtc_df.standard_inchi_key.values))
smiles_df, fail_list, discard_list = pu.download_smiles(inchi_keys)
smiles_df.to_csv('%s/jak123_inchi_smiles.csv' % data_dirs['DTC'], index=False)
# ----------------------------------------------------------------------------------------------------------------------
'''
def filter_dtc_data(orig_df,geneNames):
"""
Extract JAK1, 2 and 3 datasets from Drug Target Commons database, filtered for data usability.
"""
# filter criteria:
# gene_names == JAK1 | JAK2 | JAK3
# InChi key not missing
# standard_type IC50
# units NM
# standard_relation mappable to =, < or >
# wildtype_or_mutant != 'mutated'
# valid SMILES
# maps to valid RDKit base SMILES
# standard_value not missing
# pIC50 > 3
#--------------------------------------------------
# Filter dataset on existing columns
dset_df = orig_df[orig_df.gene_names.isin(geneNames) &
~(orig_df.standard_inchi_key.isna()) &
(orig_df.standard_type == 'IC50') &
(orig_df.standard_units == 'NM') &
~orig_df.standard_value.isna() &
~orig_df.compound_id.isna() &
(orig_df.wildtype_or_mutant != 'mutated') ]
return dset_df
def ic50topic50(x) :
return -np.log10((x/1000000000.0))
def down_select(df,kv_lst) :
for k,v in kv_lst :
df=df[df[k]==v]
return df
def get_smiles_dtc_data(nm_df,targ_lst,save_smiles_df):
save_df={}
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
#to ignore censored data
#save_df[targ]=jak1_df
#to include censored data
save_df[targ]=jak1_df_tmp
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df= | pd.concat(lst) | pandas.concat |
import requests
import json
import traceback
import sqlite3
import server.app.decode_fbs as decode_fbs
import scanpy as sc
import anndata as ad
import pandas as pd
import numpy as np
import diffxpy.api as de
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import matplotlib.patches as mpatches
from matplotlib import rcParams
import plotly.graph_objects as go
import plotly.io as plotIO
import base64
import math
from io import BytesIO
import sys
import time
import os
import re
import glob
import subprocess
strExePath = os.path.dirname(os.path.abspath(__file__))
import pprint
ppr = pprint.PrettyPrinter(depth=6)
import server.compute.diffexp_generic as diffDefault
import pickle
from pyarrow import feather
sys.setrecursionlimit(10000)
sc.settings.verbosity = 2
rcParams.update({'figure.autolayout': True})
api_version = "/api/v0.2"
import threading
jobLock = threading.Lock()
def getLock(lock):
while not lock.acquire():
time.sleep(1.0)
def freeLock(lock):
lock.release()
def route(data,appConfig):
#ppr.pprint("current working dir:%s"%os.getcwd())
data = initialization(data,appConfig)
#ppr.pprint(data)
try:
getLock(jobLock)
taskRes = distributeTask(data["method"])(data)
freeLock(jobLock)
return taskRes
except Exception as e:
freeLock(jobLock)
return 'ERROR @server: '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))
#return distributeTask(data["method"])(data)
import server.app.app as app
def initialization(data,appConfig):
# obtain the server host information
data = json.loads(str(data,encoding='utf-8'))
# update the environment information
data.update(VIPenv)
# updatting the hosting data information
if appConfig.is_multi_dataset():
data["url_dataroot"]=appConfig.server_config.multi_dataset__dataroot['d']['base_url']
data['h5ad']=os.path.join(appConfig.server_config.multi_dataset__dataroot['d']['dataroot'], data["dataset"])
else:
data["url_dataroot"]=None
data["dataset"]=None
data['h5ad']=appConfig.server_config.single_dataset__datapath
# setting the plotting options
if 'figOpt' in data.keys():
setFigureOpt(data['figOpt'])
# get the var (gene) and obv index
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
data['obs_index'] = scD.get_schema()["annotations"]["obs"]["index"]
data['var_index'] = scD.get_schema()["annotations"]["var"]["index"]
return data
def setFigureOpt(opt):
sc.set_figure_params(dpi_save=int(opt['dpi']),fontsize= float(opt['fontsize']),vector_friendly=(opt['vectorFriendly'] == 'Yes'),transparent=(opt['transparent'] == 'Yes'),color_map=opt['colorMap'])
rcParams.update({'savefig.format':opt['img']})
def getObs(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## obtain the category annotation
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
selAnno = [data['obs_index']]+data['grp']
dAnno = list(scD.get_obs_keys())
anno = []
sel = list(set(selAnno)&set(dAnno))
if len(sel)>0:
tmp = scD.data.obs.loc[selC,sel].astype('str')
tmp.index = cNames
anno += [tmp]
sel = list(set(selAnno)-set(dAnno))
if len(sel)>0:
annotations = scD.dataset_config.user_annotations
if annotations:
labels = annotations.read_labels(scD)
tmp = labels.loc[list(scD.data.obs.loc[selC,data['obs_index']]),sel]
tmp.index = cNames
anno += [tmp]
obs = pd.concat(anno,axis=1)
#ppr.pprint(obs)
## update the annotation Abbreviation
combUpdate = cleanAbbr(data)
if 'abb' in data.keys():
for i in data['grp']:
obs[i] = obs[i].map(data['abb'][i])
return combUpdate, obs
def getObsNum(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## obtain the category annotation
obs = pd.DataFrame()
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
selAnno = data['grpNum']
dAnno = list(scD.get_obs_keys())
sel = list(set(selAnno)&set(dAnno))
if len(sel)>0:
obs = scD.data.obs.loc[selC,sel]
obs.index = cNames
return obs
def getVar(data):
## obtain the gene annotation
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
gInfo = scD.data.var
gInfo.index = list(gInfo[data['var_index']])
gInfo = gInfo.drop([data['var_index']],axis=1)
return gInfo
def collapseGeneSet(data,expr,gNames,cNames,fSparse):
Y = expr
if 'geneGrpColl' in data.keys() and not data['geneGrpColl']=='No' and 'geneGrp' in data.keys() and len(data['geneGrp'])>0:
data['grpLoc'] = []
data['grpID'] = []
if fSparse:
Y = pd.DataFrame.sparse.from_spmatrix(Y,columns=gNames,index=cNames)
for aN in data['geneGrp'].keys():
if data['geneGrpColl']=='mean':
Y = pd.concat([Y,Y[data['geneGrp'][aN]].mean(axis=1).rename(aN)],axis=1,sort=False)
if data['geneGrpColl']=='median':
Y = pd.concat([Y,Y[data['geneGrp'][aN]].median(axis=1).rename(aN)],axis=1,sort=False)
for gene in data['geneGrp'][aN]:
if gene in data['genes']:
data['genes'].remove(gene)
data['genes'] += [aN]
gNames = list(Y.columns)
return Y,gNames
def createData(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## onbtain the expression matrix
gNames = []
expr = []
fSparse = False
X = []
if 'genes' in data.keys():
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
if not type(scD.data.X) is np.ndarray:
fSparse = True
if len(data['genes'])>0:
fullG = list(scD.data.var[data['var_index']])
selG = sorted([fullG.index(i) for i in data['genes']]) #when data loaded backed, incremental is required
X = scD.data.X[:,selG]
gNames = [fullG[i] for i in selG] #data['genes']
else:
X = scD.data.X
gNames = list(scD.data.var[data['var_index']])
if 'figOpt' in data.keys() and data['figOpt']['scale'] == 'Yes':
X = sc.pp.scale(X,zero_center=(data['figOpt']['scaleZero'] == 'Yes'),max_value=(float(data['figOpt']['scaleMax']) if data['figOpt']['clipValue']=='Yes' else None))
X = X[selC]
if fSparse:
expr = X
else:
expr = pd.DataFrame(X,columns=gNames,index=cNames)
expr,gNames = collapseGeneSet(data,expr,gNames,cNames,fSparse)
#ppr.pprint("finished expression ...")
## obtain the embedding
embed = {}
if 'layout' in data.keys():
layout = data['layout']
if isinstance(layout,str):
layout = [layout]
if len(layout)>0:
for one in layout:
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
embed['X_%s'%one] = pd.DataFrame(scD.data.obsm['X_%s'%one][selC][:,[0,1]],columns=['%s1'%one,'%s2'%one],index=cNames)
#ppr.pprint("finished layout ...")
## obtain the category annotation
combUpdate, obs = getObs(data)
## create a custom annotation category and remove cells which are not in the selected annotation
if combUpdate and len(data['grp'])>1:
newGrp = 'Custom_combine'
combineGrp = list(data['combine'].keys());
obs[newGrp] = obs[combineGrp[0]]
for i in combineGrp:
if not i==combineGrp[0]:
obs[newGrp] += ":"+obs[i]
selC = ~obs[newGrp].str.contains("Other").to_numpy()
expr = expr[selC]
for i in embed.keys():
embed[i] = embed[i][selC]
obs = obs[selC].astype('category')
obs[newGrp].cat.set_categories(data['combineOrder'],inplace=True)
data['grp'] = [newGrp]
obs = obs.astype('category')
## empty selection
if expr.shape[0]==0 or expr.shape[1]==0:
return []
#ppr.pprint("finished obv ...")
return sc.AnnData(expr,obs,var=pd.DataFrame([],index=gNames),obsm={layout:embed[layout].to_numpy() for layout in embed.keys()})
def cleanAbbr(data):
updated = False
if 'abb' in data.keys() and 'combine' in data.keys():
if len(data['combine'])>0:
updated = True
for cate in data['abb'].keys():
if cate in data['combine'].keys():
for anName in data['abb'][cate].keys():
if not anName in data['combine'][cate]:
data['abb'][cate][anName] = "Other";
else:
if not data['abb'][cate][anName]==anName:
data['combineOrder'] = [one.replace(anName,data['abb'][cate][anName]) for one in data['combineOrder']]
else:
data['abb'][cate] = {key:"Other" for key in data['abb'][cate].keys()}
return updated
def errorTask(data):
raise ValueError('Error task!')
def distributeTask(aTask):
return {
'SGV':SGV,
'SGVcompare':SGVcompare,
'PGV':PGV,
'VIOdata':VIOdata,
'HEATplot':pHeatmap,
'HEATdata':HeatData,
'GD':GD,
'DEG':DEG,
'DOT':DOT,
'EMBED':EMBED,
'TRAK':TRACK,
'DUAL':DUAL,
'MARK': MARK,
'MINX':MINX,
'DENS':DENS,
'DENS2D':DENS2D,
'SANK':SANK,
'STACBAR':STACBAR,
'HELLO':HELLO,
'CLI':CLI,
'preDEGname':getPreDEGname,
'preDEGvolcano':getPreDEGvolcano,
'preDEGmulti':getPreDEGbubble,
'mergeMeta': mergeMeta,
'isMeta': isMeta,
'testVIPready':testVIPready,
'Description':getDesp,
'GSEAgs':getGSEA,
'SPATIAL':SPATIAL,
'saveTest':saveTest,
'getBWinfo':getBWinfo,
'plotBW':plotBW
}.get(aTask,errorTask)
def HELLO(data):
return 'Hi, connected.'
def iostreamFig(fig):
#getLock(iosLock)
figD = BytesIO()
#ppr.pprint('io located at %d'%int(str(figD).split(" ")[3].replace(">",""),0))
fig.savefig(figD,bbox_inches="tight")
#ppr.pprint(sys.getsizeof(figD))
#ppr.pprint('io located at %d'%int(str(figD).split(" ")[3].replace(">",""),0))
imgD = base64.encodebytes(figD.getvalue()).decode("utf-8")
figD.close()
#ppr.pprint("saved Fig")
#freeLock(iosLock)
if 'matplotlib' in str(type(fig)):
plt.close(fig)#'all'
return imgD
def Msg(msg):
fig = plt.figure(figsize=(5,2))
plt.text(0,0.5,msg)
ax = plt.gca()
ax.axis('off')
return iostreamFig(fig)
def SPATIAL(data):
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
#ppr.pprint(vars(scD.data.uns["spatial"]))
spatial=scD.data.uns["spatial"]
if (data['embedding'] == "get_spatial_list"):
return json.dumps({'list':list(spatial)})
library_id=list(spatial)[0]
if (data['embedding'] in list(spatial)):
library_id=data['embedding']
height, width, depth = spatial[library_id]["images"][data['resolution']].shape
embedding = 'X_'+data['embedding']
spatialxy = scD.data.obsm[embedding]
tissue_scalef = spatial[library_id]['scalefactors']['tissue_' + data['resolution'] + '_scalef']
i = data['spots']['spoti_i']
x = 0
y = 1
# from original embedding to (0,1) coordinate system (cellxgene embedding)
scalex = (data['spots']['spot0_x'] - data['spots']['spoti_x']) / (spatialxy[0][x] - spatialxy[i][x])
scaley = (data['spots']['spot0_y'] - data['spots']['spoti_y']) / (spatialxy[0][y] - spatialxy[i][y])
# image is in (-1,0,1) coordinate system, so multiplied by 2
translatex = (spatialxy[i][x]*scalex - data['spots']['spoti_x']) * 2
translatey = (spatialxy[i][y]*scaley - data['spots']['spoti_y']) * 2
scale = 1/tissue_scalef * scalex * 2
# Addtional translate in Y due to flipping of the image if needed
ppr.pprint(scalex)
ppr.pprint(scaley)
ppr.pprint(translatex)
ppr.pprint(translatey)
# from (-1,0,1) (image layer) to (0,1) coordinate system (cellxgene embedding). Overlapping (0,0) origins of both.
translatex = -(1+translatex)
if (translatey > -0.1):
flip = True
translatey = -(1+translatey) + height*scale
else:
flip = False
translatey = -(1+translatey)
returnD = [{'translatex':translatex,'translatey':translatey,'scale':scale}]
dpi=100
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
if (flip):
ax.imshow(np.flipud(spatial[library_id]["images"][data['resolution']]), interpolation='nearest')
else:
ax.imshow(spatial[library_id]["images"][data['resolution']], interpolation='nearest')
figD = BytesIO()
plt.savefig(figD, dpi=dpi)
ppr.pprint(sys.getsizeof(figD))
imgD = base64.encodebytes(figD.getvalue()).decode("utf-8")
figD.close()
plt.close(fig)
return json.dumps([returnD, imgD])
def MINX(data):
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
minV = min(scD.data.X[0])
return '%.1f'%minV
def geneFiltering(adata,cutoff,opt):
## 1. remove cells if the max expression of all genes is lower than the cutoff
if opt==1:
#sT = time.time()
#ix = adata.to_df().apply(lambda x: max(x)>float(cutoff),axis=1)
#ppr.pprint(time.time()-sT)
#sT=time.time()
df = adata.to_df()
ix = df[df>float(cutoff)].count(axis=1)>0
#ppr.pprint(time.time()-sT)
#sT = time.time()
#ix = pd.DataFrame((adata.X>float(cutoff)).sum(1)>0,index=list(adata.obs.index)).iloc[:,0]
#ppr.pprint(time.time()-sT)
adata = adata[ix,]
## 2. Set all expression level smaller than the cutoff to be NaN not for plotting without removing any cells
elif opt==2:
def cutoff(x):
return x if x>float(cutoff) else None
X = adata.to_df()
X=X.applymap(cutoff)
adata = sc.AnnData(X,adata.obs)
return adata
def SGV(data):
# figure width and heights depends on number of unique categories
# characters of category names, gene number
#ppr.pprint("SGV: creating data ...")
adata = createData(data)
#ppr.pprint("SGV: data created ...")
adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
a = list(set(list(adata.obs[data['grp'][0]])))
ncharA = max([len(x) for x in a])
w = len(a)/4+1
h = ncharA/6+2.5
ro = math.acos(10/max([15,ncharA]))/math.pi*180
##
fig = plt.figure(figsize=[w,h])
sc.pl.violin(adata,data['genes'],groupby=data['grp'][0],ax=fig.gca(),show=False)
fig.autofmt_xdate(bottom=0.2,rotation=ro,ha='right')
return iostreamFig(fig)
def SGVcompare(data):
adata = createData(data)
#adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
# plot in R
strF = ('%s/SGV%f.csv' % (data["CLItmp"],time.time()))
X=pd.concat([adata.to_df(),adata.obs[data['grp']]],axis=1,sort=False)
X[X.iloc[:,0]>=float(data['cellCutoff'])].to_csv(strF,index=False)
strCMD = " ".join(["%s/Rscript"%data['Rpath'],strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']])
#ppr.pprint(strCMD)
res = subprocess.run([strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#
img = res.stdout.decode('utf-8')
os.remove(strF)
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in R: "+res.stderr.decode('utf-8'))
return img
def VIOdata(data):
adata = createData(data)
adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
return pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
def unique(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def updateGene(data):
grpID = []
grpLoc=[]
allG = []
if 'geneGrp' in data.keys():
for aN in data['geneGrp'].keys():
grpLoc += [(len(allG),len(allG)+len(data['geneGrp'][aN])-1)]
allG += data['geneGrp'][aN]
grpID += [aN]
data['genes'] = unique(allG+data['genes'])
data['grpLoc'] = grpLoc
data['grpID'] = grpID
def PGV(data):
# figure width and heights depends on number of unique categories
# characters of category names, gene number #pecam1 pdpn
updateGene(data)
#ppr.pprint("PGV: creating data ...")
adata = createData(data)
#ppr.pprint("PGV: data created ...")
adata = geneFiltering(adata,data['cutoff'],1)
if adata.shape[0]==0 or adata.shape[1]==0:
return Msg('No cells in the condition!')
a = list(set(list(adata.obs[data['grp'][0]])))
ncharA = max([len(x) for x in a])
w = max([3,ncharA/8])+len(data['genes'])/2+1.5
h = len(a)+0.5
swapAx = False
##
if data['by']=='Columns':
a = w
w = h
h = a
swapAx = True
if 'split_show' in data['figOpt']['scanpybranch']: #.dev140+ge9cbc5f
vp = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],return_fig=True,figsize=(w,h),swap_axes=swapAx,var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])
vp.add_totals().style(yticklabels=True, cmap=data['color']).show()
#vp.add_totals().show()
fig = vp#plt.gcf()
else:
fig = plt.figure(figsize=[w,h])
axes = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],show=False,ax=fig.gca(),swap_axes=swapAx,
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])
return iostreamFig(fig)
def pHeatmap(data):
# figure width is depends on the number of categories was choose to show
# and the character length of each category term
# if the number of element in a category is smaller than 10, "Set1" or "Set3" is choosen
# if the number of element in a category is between 10 and 20, default is choosen
# if the number of element in a category is larger than 20, husl is choosen
#Xsep = createData(data,True)
#adata = sc.AnnData(Xsep['expr'],Xsep['obs'])
#sT = time.time()
adata = createData(data)
data['grp'] += data['addGrp']
#Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
#ppr.pprint('HEAT data reading cost %f seconds' % (time.time()-sT) )
#sT = time.time()
exprOrder = True
if data['order']!="Expression":
exprOrder = False;
adata = adata[adata.obs.sort_values(data['order']).index,]
#s = adata.obs[data['order']]
#ix = sorted(range(len(s)), key=lambda k: s[k])
#adata = adata[ix,]
colCounter = 0
colName =['Set1','Set3']
grpCol = list()
grpLegend = list()
grpWd = list()
grpLen = list()
h = 8
w = len(data['genes'])/3+0.3
for gID in data['grp']:
grp = adata.obs[gID]
Ugrp = grp.unique()
if len(Ugrp)<10:
lut = dict(zip(Ugrp,sns.color_palette(colName[colCounter%2],len(Ugrp)).as_hex()))
colCounter += 1
elif len(Ugrp)<20:
lut = dict(zip(Ugrp,sns.color_palette(n_colors=len(Ugrp)).as_hex()))
else:
lut = dict(zip(Ugrp,sns.color_palette("husl",len(Ugrp)).as_hex()))
grpCol.append(grp.map(lut))
grpLegend.append([mpatches.Patch(color=v,label=k) for k,v in lut.items()])
grpWd.append(max([len(x) for x in Ugrp]))#0.02*fW*max([len(x) for x in Ugrp])
grpLen.append(len(Ugrp)+2)
w += 2
Zscore=None
heatCol=data['color']
heatCenter=None
colTitle="Expression"
if data['norm']=='zscore':
Zscore=1
#heatCol="vlag"
heatCenter=0
colTitle="Z-score"
#ppr.pprint('HEAT data preparing cost %f seconds' % (time.time()-sT) )
#sT = time.time()
try:
g = sns.clustermap(adata.to_df(),
method="ward",row_cluster=exprOrder,z_score=Zscore,cmap=heatCol,center=heatCenter,
row_colors=pd.concat(grpCol,axis=1).astype('str'),yticklabels=False,xticklabels=True,
figsize=(w,h),colors_ratio=0.05,
cbar_pos=(.3, .95, .55, .02),
cbar_kws={"orientation": "horizontal","label": colTitle,"shrink": 0.5})
except Exception as e:
return 'ERROR: Z score calculation failed for 0 standard diviation. '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))
#ppr.pprint('HEAT plotting cost %f seconds' % (time.time()-sT) )
#sT = time.time()
g.ax_col_dendrogram.set_visible(False)
#g.ax_row_dendrogram.set_visible(False)
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
grpW = [1.02]
grpH = [1.2]
cumulaN = 0
cumulaMax = 0
characterW=1/40 # a character is 1/40 of heatmap width
characterH=1/40 # a character is 1/40 of heatmap height
for i in sorted(range(len(grpLen)),key=lambda k:grpLen[k]):#range(5):#
cumulaN += grpLen[i]
if cumulaN>(10+1/characterH):
grpW.append(grpW[-1]+cumulaMax)
grpH = [1.2]
cumulaN =0
cumulaMax=0
leg = g.ax_heatmap.legend(handles=grpLegend[i],frameon=True,title=data['grp'][i],loc="upper left",
bbox_to_anchor=(grpW[-1],grpH[-1]),fontsize=5)#grpW[i],0.5,0.3
#leg = g.ax_heatmap.legend(handles=grpLegend[0],frameon=True,title=data['grp'][0],loc="upper left",
# bbox_to_anchor=(1.02,1-i*0.25),fontsize=5)#grpW[i],0.5,0.
cumulaMax = max([cumulaMax,grpWd[i]*characterW])
grpH.append(grpH[-1]-grpLen[i]*characterH)
leg.get_title().set_fontsize(6)#min(grpSize)+2
g.ax_heatmap.add_artist(leg)
#ppr.pprint('HEAT post plotting cost %f seconds' % (time.time()-sT) )
return iostreamFig(g)#json.dumps([iostreamFig(g),Xdata])#)#
def HeatData(data):
adata = createData(data)
Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
return Xdata
def GD(data):
adata = None;
for one in data['cells'].keys():
#sT = time.time()
oneD = data.copy()
oneD.update({'cells':data['cells'][one],
'genes':[],
'grp':[]})
D = createData(oneD)
#ppr.pprint("one grp aquire data cost %f seconds" % (time.time()-sT))
D.obs['cellGrp'] = one
if adata is None:
adata = D
else:
#sT =time.time()
adata = adata.concatenate(D)
#ppr.pprint("Concatenate data cost %f seconds" % (time.time()-sT))
if adata is None:
return Msg("No cells were satisfied the condition!")
##
adata.obs.astype('category')
cutOff = 'geneN_cutoff'+data['cutoff']
#sT = time.time()
#adata.obs[cutOff] = adata.to_df().apply(lambda x: sum(x>float(data['cutoff'])),axis=1)
#ppr.pprint(time.time()-sT)
#sT = time.time()
#df = adata.to_df()
#adata.obs[cutOff] = df[df>float(data['cutoff'])].count(axis=1)
#ppr.pprint(time.time()-sT)
sT = time.time()
adata.obs[cutOff] = (adata.X >float(data['cutoff'])).sum(1)
ppr.pprint(time.time()-sT)
##
w = 3
if len(data['cells'])>1:
w += 3
fig = plt.figure(figsize=[w,4])
sc.pl.violin(adata,cutOff,groupby='cellGrp',ax=fig.gca(),show=False,rotation=0,size=2)
return iostreamFig(fig)
def getGSEA(data):
strGSEA = '%s/gsea/'%strExePath
return json.dumps(sorted([os.path.basename(i).replace(".symbols.gmt","") for i in glob.glob(strGSEA+"*.symbols.gmt")]))
def DEG(data):
adata = None;
genes = data['genes']
data['genes'] = []
comGrp = 'cellGrp'
if 'combine' in data.keys():
if data['DEmethod']=='default':
combUpdate, obs = getObs(data)
if combUpdate and len(data['grp'])>1:
obs[comGrp] = obs[data['grp'][0]]
for i in data['grp']:
if i!=data['grp'][0]:
obs[comGrp] += ":"+obs[i]
mask = [obs[comGrp].isin([data['comGrp'][i]]) for i in [0,1]]
else:
data['figOpt']['scale'] = 'No'
adata = createData(data)
comGrp = data['grp'][0]
adata = adata[adata.obs[comGrp].isin(data['comGrp'])]
else:
mask = [pd.Series(range(data['cellN'])).isin(data['cells'][one].values()) for one in data['comGrp']]
for one in data['comGrp']:
oneD = data.copy()
oneD['cells'] = data['cells'][one]
oneD['genes'] = []
oneD['grp'] = []
oneD['figOpt']['scale']='No'
#oneD = {'cells':data['cells'][one],
# 'genes':[],
# 'grp':[],
# 'figOpt':{'scale':'No'},
# 'url':data['url']}
D = createData(oneD)
D.obs[comGrp] = one
if adata is None:
adata = D
else:
adata = adata.concatenate(D)
if data['DEmethod']=='default':
if sum(mask[0]==True)<10 or sum(mask[1]==True)<10:
raise ValueError('Less than 10 cells in a group!')
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
res = diffDefault.diffexp_ttest(scD,mask[0].to_numpy(),mask[1].to_numpy(),scD.data.shape[1])# shape[cells as rows, genes as columns]
gNames = list(scD.data.var[data['var_index']])
deg = pd.DataFrame(res,columns=['gID','log2fc','pval','qval'])
gName = pd.Series([gNames[i] for i in deg['gID']],name='gene')
deg = pd.concat([deg,gName],axis=1).loc[:,['gene','log2fc','pval','qval']]
else:
if not 'AnnData' in str(type(adata)):
raise ValueError('No data extracted by user selection')
adata.obs.astype('category')
nm = None
if data['DEmethod']=='wald':
nm = 'nb'
if data['DEmethod']=='wald':
res = de.test.wald(adata,formula_loc="~1+"+comGrp,factor_loc_totest=comGrp)
elif data['DEmethod']=='t-test':
res = de.test.t_test(adata,grouping=comGrp)
elif data['DEmethod']=='rank':
res = de.test.rank_test(adata,grouping=comGrp)
else:
raise ValueError('Unknown DE methods:'+data['DEmethod'])
#res = de.test.two_sample(adata,comGrp,test=data['DEmethod'],noise_model=nm)
deg = res.summary()
deg = deg.sort_values(by=['qval']).loc[:,['gene','log2fc','pval','qval']]
deg['log2fc'] = -1 * deg['log2fc']
## plot in R
#strF = ('/tmp/DEG%f.csv' % time.time())
strF = ('%s/DEG%f.csv' % (data["CLItmp"],time.time()))
deg.to_csv(strF,index=False)
#ppr.pprint([strExePath+'/volcano.R',strF,'"%s"'%';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0]])
res = subprocess.run([strExePath+'/volcano.R',strF,';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0],str(data['sigFDR']),str(data['sigFC']),data['Rlib']],capture_output=True)#
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in volcano.R: "+res.stderr.decode('utf-8'))
img = res.stdout.decode('utf-8')
# GSEA
GSEAimg=""
GSEAtable=pd.DataFrame()
if data['gsea']['enable']:
res = subprocess.run([strExePath+'/fgsea.R',
strF,
'%s/gsea/%s.symbols.gmt'%(strExePath,data['gsea']['gs']),
str(data['gsea']['gsMin']),
str(data['gsea']['gsMax']),
str(data['gsea']['padj']),
data['gsea']['up'],
data['gsea']['dn'],
str(data['gsea']['collapse']),
data['figOpt']['img'],
str(data['figOpt']['fontsize']),
str(data['figOpt']['dpi']),
data['Rlib']],capture_output=True)#
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in fgsea.R: "+res.stderr.decode('utf-8'))
GSEAimg = res.stdout.decode('utf-8')
GSEAtable = pd.read_csv(strF)
GSEAtable['leadingEdge'] = GSEAtable['leadingEdge'].apply(lambda x:'|'.join(x.split('|')[:10]))
os.remove(strF)
#####
gInfo = getVar(data)
deg.index = deg['gene']
deg = pd.concat([deg,gInfo],axis=1,sort=False)
#return deg.to_csv()
if not data['topN']=='All':
deg = deg.iloc[range(int(data['topN'])),]
#deg.loc[:,'log2fc'] = deg.loc[:,'log2fc'].apply(lambda x: '%.2f'%x)
#deg.loc[:,'pval'] = deg.loc[:,'pval'].apply(lambda x: '%.4E'%x)
#deg.loc[:,'qval'] = deg.loc[:,'qval'].apply(lambda x: '%.4E'%x)
#ppr.pprint(GSEAtable)
#ppr.pprint(GSEAtable.sort_values('pval'))
return json.dumps([deg.to_csv(index=False),img,GSEAtable.to_csv(index=False),GSEAimg])#json.dumps([deg.values.tolist(),img])
def DOT(data):
#ppr.pprint("DOT, starting ...")
updateGene(data)
# Dot plot, The dotplot visualization provides a compact way of showing per group, the fraction of cells expressing a gene (dot size) and the mean expression of the gene in those cell (color scale). The use of the dotplot is only meaningful when the counts matrix contains zeros representing no gene counts. dotplot visualization does not work for scaled or corrected matrices in which zero counts had been replaced by other values, see http://scanpy-tutorials.readthedocs.io/en/multiomics/visualizing-marker-genes.html
data['figOpt']['scale'] = 'No';
#ppr.pprint("DOT: creating data ...")
adata = createData(data)
#ppr.pprint("DOT: data created!")
if len(adata)==0:
return Msg('No cells in the condition!')
#return adata
grp = adata.obs[data['grp'][0]].unique()
if len(grp)<10:
col = np.array(sns.color_palette('Set1',len(grp)).as_hex())
elif len(grp)<20:
col = np.array(sns.color_palette(n_colors=len(grp)).as_hex())
else:
col = np.array(sns.color_palette("husl",len(grp)).as_hex())
adata.uns[data['grp'][0]+'_colors'] = col
#ppr.pprint(sc.__version__)
if 'split_show' in data['figOpt']['scanpybranch']:#.dev140+ge9cbc5f
dp = sc.pl.dotplot(adata,data['genes'],groupby=data['grp'][0],expression_cutoff=float(data['cutoff']),mean_only_expressed=(data['mean_only_expressed'] == 'Yes'),
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'],
return_fig=True)#
dp = dp.add_totals(size=1.2).legend(show_size_legend=True,width=float(data['legendW'])).style(cmap=data['color'], dot_edge_color='black', dot_edge_lw=1, size_exponent=1.5)
dp.show()
fig = dp.get_axes()['mainplot_ax'].figure
else:
sc.pl.dotplot(adata,data['genes'],groupby=data['grp'][0],show=False,expression_cutoff=float(data['cutoff']),mean_only_expressed=(data['mean_only_expressed'] == 'Yes'),var_group_positions=data['grpLoc'],var_group_labels=data['grpID'], color_map=data['color'])
fig = plt.gcf()
#ppr.pprint(adata)
return iostreamFig(fig)
def EMBED(data):
adata = createData(data)
if len(data['grpNum'])>0:
adata.obs = pd.concat([adata.obs,getObsNum(data)],axis=1)
subSize = 4
ncol = int(data['ncol'])
ngrp = len(data['grp'])
ngrpNum = len(data['grpNum'])
ngene = len(data['genes'])
nrow = ngrp+math.ceil(ngrpNum/ncol)+math.ceil(ngene/ncol)
if 'splitGrp' in data.keys():
splitName = list(adata.obs[data['splitGrp']].unique())
nsplitRow = math.ceil(len(splitName)/ncol)
nrow = ngrp+math.ceil(ngrpNum/ncol)+ngene*nsplitRow
step =11
grpCol = {gID:math.ceil(len(list(adata.obs[gID].unique()))/step) for gID in data['grp']}
rcParams['figure.constrained_layout.use'] = False
fig = plt.figure(figsize=(ncol*subSize,subSize*nrow))
gs = fig.add_gridspec(nrow,ncol,wspace=0.2)
for i in range(ngrp):
grpName = adata.obs[data['grp'][i]].value_counts().to_dict()
grpPalette = None
plotOrder = None
dotSize = None
if len(grpName)==2 and max(grpName.values())/min(grpName.values())>10:
grpPalette = {max(grpName,key=grpName.get):'#c0c0c030',min(grpName,key=grpName.get):'#de2d26ff'}
plotOrder = min(grpName,key=grpName.get) #list(grpPalette.keys()) #
grpPalette = [grpPalette[k] for k in list(adata.obs[data['grp'][i]].cat.categories)]
dotSize = adata.obs.apply(lambda x: 360000/adata.shape[1] if x['HIVcell']==plotOrder else 120000/adata.shape[1],axis=1).tolist()
ax = sc.pl.embedding(adata,data['layout'],color=data['grp'][i],ax=fig.add_subplot(gs[i,0]),show=False,palette=grpPalette,groups=plotOrder,size=dotSize)
if grpCol[data['grp'][i]]>1:
ax.legend(ncol=grpCol[data['grp'][i]],loc=6,bbox_to_anchor=(1,0.5),frameon=False)
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
for i in range(ngrpNum):
x = int(i/ncol)+ngrp
y = i % ncol
ax = sc.pl.embedding(adata,data['layout'],color=data['grpNum'][i],ax=fig.add_subplot(gs[x,y]),show=False)#,wspace=0.25
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
if 'splitGrp' in data.keys():
vMax = adata.to_df().apply(lambda x: max(x))
vMin = adata.to_df().apply(lambda x: min(x))
dotSize = 120000 / adata.n_obs
for i in range(ngene):
for j in range(len(splitName)):
x = ngrp + math.ceil(ngrpNum/ncol) + i*nsplitRow+int(j/ncol)
y = j % ncol
ax = sc.pl.embedding(adata,data['layout'],ax=fig.add_subplot(gs[x,y]),show=False)#color=data['genes'][i],wspace=0.25,
ax = sc.pl.embedding(adata[adata.obs[data['splitGrp']]==splitName[j]],data['layout'],color=data['genes'][i],
vmin=vMin[data['genes'][i]],vmax=vMax[data['genes'][i]],ax=ax,show=False,
size=dotSize,title='{} in {}'.format(data['genes'][i],splitName[j]))
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
else:
for i in range(ngene):
x = int(i/ncol)+ngrp+math.ceil(ngrpNum/ncol)
y = i % ncol
ax = sc.pl.embedding(adata,data['layout'],color=data['genes'][i],ax=fig.add_subplot(gs[x,y]),show=False)
ax.set_xlabel('%s1'%data['layout'])
ax.set_ylabel('%s2'%data['layout'])
return iostreamFig(fig)
def TRACK(data):
updateGene(data)
adata = createData(data)
if len(adata)==0:
return Msg('No cells in the condition!')
w = math.log2(adata.n_obs)
h = adata.n_vars/2
## a bug in scanpy reported: https://github.com/theislab/scanpy/issues/1265, if resolved the following code is not needed
if len(data['grpLoc'])>0 and data['grpLoc'][len(data['grpLoc'])-1][1] < (len(data['genes'])-1):
data['grpLoc'] += [(data['grpLoc'][len(data['grpLoc'])-1][1]+1,len(data['genes'])-1)]
data['grpID'] += ['others']
##############
#ppr.pprint(data['grpLoc'])
#ppr.pprint(data['grpID'])
ax = sc.pl.tracksplot(adata,data['genes'],groupby=data['grp'][0],figsize=(w,h),
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'],
show=False)
fig=ax['track_axes'][0].figure
return iostreamFig(fig)
def cut(x,cutoff,anno):
iC = x[x>cutoff].count()
if iC ==0:
return "None"
elif iC==2:
return "Both"
elif x[0]>cutoff:
return anno[0]
elif x[1]>cutoff:
return anno[1]
return "ERROR"
def dualExp(df,cutoff,anno):
label = ['None']+list(anno)+['Both']
a = df.iloc[:,0]>cutoff
b = df.iloc[:,1]>cutoff
return pd.Series([label[i] for i in list(a+2*b)],index=df.index,dtype='category')
def DUAL(data):
adata = createData(data)
adata.obs['Expressed'] = dualExp(adata.to_df(),float(data['cutoff']),adata.var_names)
sT = time.time()
pCol = {"None":"#AAAAAA44","Both":"#EDDF01AA",data['genes'][0]:"#1CAF82AA",data['genes'][1]:"#FA2202AA"}
adata.uns["Expressed_colors"]=[pCol[i] for i in adata.obs['Expressed'].cat.categories]
rcParams['figure.figsize'] = 4.5, 4
fig = sc.pl.embedding(adata,data['layout'],color='Expressed',return_fig=True,show=False,legend_fontsize="small")
plt.xlabel('%s1'%data['layout'])
plt.ylabel('%s2'%data['layout'])
rcParams['figure.figsize'] = 4, 4
return iostreamFig(fig)
def MARK(data):
adata = createData(data)
if len(adata)==0:
return Msg('No cells in the condition!')
## remove the annotation whose cell counts are smaller than 2 to avoid division by zero
vCount = adata.obs[data["grp"][0]].value_counts()
keepG = [key for key,val in vCount.items() if val>2]
adata = adata[adata.obs[data["grp"][0]].isin(keepG),:]
if len(adata.obs[data['grp'][0]].unique())<3:
return 'ERROR @server: {}'.format('Less than 3 groups in selected cells! Please use DEG for 2 groups')
#return json.dumps([[['name','scores'],['None','0']],Msg('Less than 3 groups in selected cells!Please use DEG for 2 groups')])
sc.tl.rank_genes_groups(adata,groupby=data["grp"][0],n_genes=int(data['geneN']),method=data['markMethod'])#
ppr.pprint(int(data['geneN']))
sc.pl.rank_genes_groups(adata,n_genes=int(data['geneN']),ncols=min([3,len(adata.obs[data['grp'][0]].unique())]),show=False)
fig =plt.gcf()
gScore = adata.uns['rank_genes_groups']
#ppr.pprint(gScore)
pKeys = [i for i in ['names','scores','logfoldchanges','pvals','pvals_adj'] if i in gScore.keys()]
scoreM = [pKeys+['Group']]
for i in gScore['scores'].dtype.names:
for j in range(len(gScore['scores'][i])):
one = []
for k in pKeys:
if k=='logfoldchanges':
one += ['%.2f' % gScore[k][i][j]]
elif k in ['pvals','pvals_adj']:
one += ['%.4E' % gScore[k][i][j]]
elif k=='scores':
one += ['%.4f' % gScore[k][i][j]]
else:
one += [gScore[k][i][j]]
scoreM += [one+[i]]
return json.dumps([scoreM,iostreamFig(fig)])
def DENS(data):
#sT = time.time()
adata = createData(data)
#ppr.pprint("read data cost: %f seconds" % (time.time()-sT))
#sT = time.time()
adata.obs['None'] = pd.Categorical(['all']*adata.shape[0])
bw=float(data['bw'])
sGrp = data['category'][0]
cGrp = data['category'][1]
defaultFontsize = 16
if 'figOpt' in data.keys():
defaultFontsize = float(data['figOpt']['fontsize'])
subSize = 4
#split = list(adata.obs[sGrp].unique())
split = sorted(list(adata.obs[sGrp].cat.categories))
genes = sorted(list(adata.var.index))
#colGrp = list(adata.obs[cGrp].unique())
colGrp = sorted(list(adata.obs[cGrp].cat.categories))
legendCol = math.ceil(len(colGrp)/(len(split)*11))
fig = plt.figure(figsize=(len(genes)*subSize,len(split)*(subSize-1)))
plt.xlabel("Expression",labelpad=20,fontsize=defaultFontsize+1)
#plt.ylabel(sGrp,labelpad=50,fontsize=defaultFontsize+1)
plt.xticks([])
plt.yticks([])
plt.box(on=None)
#plt.xlabel("Expression")
#plt.ylabel(sGrp)
gs = fig.add_gridspec(len(split),len(genes),wspace=0.2)#
#dataT = 0
#plotT = 0
for i in range(len(split)):
#resT = time.time()
Dobs = adata[adata.obs[sGrp]==split[i]].obs[cGrp]
D = adata[adata.obs[sGrp]==split[i]].to_df()
#dataT += (time.time()-resT)
for j in range(len(genes)):
ax = fig.add_subplot(gs[i,j])
#resT = time.time()
for one in colGrp:
if sum(Dobs==one)<1:
sns.kdeplot([0],label=one)
else:
sns.kdeplot(D[Dobs==one][genes[j]].to_numpy(),bw_method=bw,label=one)
ax.set_ylabel("",fontsize=defaultFontsize)
if i==0:
ax.set_title(genes[j],fontsize=defaultFontsize+2)
if j==0:
ax.set_ylabel(split[i],fontsize=defaultFontsize)
if i==0 and j==(len(genes)-1):
ax.legend(prop={'size': 10},title = cGrp,loc=2,bbox_to_anchor=(1,1),ncol=legendCol,frameon=False)#
else:
leg = ax.get_legend()
if not leg==None:
leg.remove()
#fig.text(0.6,0.09,"Expression",ha='center')
#ppr.pprint("plotting data cost: %f seconds" % dataT)
#ppr.pprint("plotting plot cost: %f seconds" % plotT)
#ppr.pprint("plotting total cost: %f seconds" % (time.time()-sT))
return iostreamFig(fig)
def SANK(data):
updateGene(data)
if len(data['genes'])==0:
tmp, D = getObs(data)
D = D.apply(lambda x:x.apply(lambda y:x.name+":"+y))
else:
adata = createData(data)
D = pd.concat([adata.obs.apply(lambda x:x.apply(lambda y:x.name+":"+y)),
adata.to_df().apply(lambda x:pd.cut(x,int(data['sankBin'])).apply(lambda y:x.name+":"+'%.1f_%.1f'%(y.left,y.right)))],
axis=1,sort=False)
D = D.astype('str').astype('category')
if data['obs_index'] in D.columns:
del D[data['obs_index']]
colName =['Set1','Set3','viridis']
labels = []
cols = []
colindex = 0
for gID in D.columns:
gNames = list(D[gID].unique())
labels += gNames
if len(gNames) <10:
cols += sns.color_palette(colName[colindex%2],len(gNames)).as_hex()
colindex += 1
else:
cols += sns.color_palette(colName[2],len(gNames)).as_hex()
sIDs =[]
dIDs =[]
v=[]
Dnames = data['sankOrder']#list(D.columns)
#maxGrp = 0
#ppr.pprint(Dnames)
for i in range(len(Dnames)-1):
oneName = Dnames[i:i+2]
#maxGrp = max(maxGrp,len(D[oneName[0]].unique()))
summaryOne = D.groupby(oneName).size().reset_index(name='Count')
summaryOne=summaryOne[summaryOne['Count']>0]
sIDs += list(summaryOne[oneName[0]].apply(lambda x: labels.index(x)))
dIDs += list(summaryOne[oneName[1]].apply(lambda x: labels.index(x)))
v += list(summaryOne['Count'])
data_trace = dict(
type='sankey',
domain=dict(x=[0,1],y=[0,1]),
orientation='h',
valueformat = ".0f",
node = dict(
pad = 10,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = labels,
color = cols
),
link = dict(
source = sIDs,
target = dIDs,
value = v
)
)
## if the image is requested
if 'imgSave' in data.keys():
layout = dict(
font = dict(size=int(data['figOpt']['fontsize'])),
height= int(data['imgH']),
width = int(data['imgW'])*D.shape[1]
)
fig = go.Figure(data=[go.Sankey(data_trace)],layout=layout)
img = plotIO.to_image(fig,data['imgSave'])
return base64.encodebytes(img).decode('utf-8')
layout = dict(
font = dict(size=int(data['figOpt']['fontsize'])),
height= int(data['imgH']),
width = int(data['imgW'])*D.shape[1],
updatemenus= [
dict(
y=0.9,
buttons=[
dict(
label='Thick',
method='restyle',
args=['node.thickness', 15]
),
dict(
label='Thin',
method='restyle',
args=['node.thickness', 8]
)
]
),
dict(
y=0.8,
buttons=[
dict(
label='Small gap',
method='restyle',
args=['node.pad', 15]
),
dict(
label='Large gap',
method='restyle',
args=['node.pad', 20]
)
]
),
dict(
y=0.7,
buttons=[
dict(
label='Snap',
method='restyle',
args=['arrangement', 'snap']
),
dict(
label='Perpendicular',
method='restyle',
args=['arrangement', 'perpendicular']
),
dict(
label='Freeform',
method='restyle',
args=['arrangement', 'freeform']
),
dict(
label='Fixed',
method='restyle',
args=['arrangement', 'fixed']
)
]
),
dict(
y=0.6,
buttons=[
dict(
label='Horizontal',
method='restyle',
args=['orientation','h']#{,'height':700,'width':250*D.shape[1]}
),
dict(
label='Vertical',
method='restyle',
args=['orientation','v']#{'orientation': 'v','height':250*D.shape[1],'width':700}
)
]
)
]
)
fig = go.Figure(data=[go.Sankey(data_trace)],layout=layout)
div = plotIO.to_html(fig)
return div#[div.find('<div>'):(div.find('</div>')+6)]
def DENS2D(data):
adata = createData(data)
## plot in R
strF = ('%s/DENS2D%f.csv' % (data["CLItmp"],time.time()))
adata.to_df().to_csv(strF)#
res = subprocess.run([strExePath+'/Density2D.R',strF,data['figOpt']['img'],str(data['cutoff']),str(data['bandwidth']),data['figOpt']['colorMap'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#
img = res.stdout.decode('utf-8')
os.remove(strF)
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in R: "+res.stderr.decode('utf-8'))
return img
def toInt(x):
if len(x)==0:
return 0
return int(x)
def STACBAR(data):
if len(data['genes'])==0:
tmp, D = getObs(data)
D = D.apply(lambda x:x.apply(lambda y:y))
else:
adata = createData(data)
D = pd.concat([adata.obs.apply(lambda x:x.apply(lambda y:y)),
adata.to_df().apply(lambda x:pd.cut(x,int(data['Nbin'])).apply(lambda y:'%s:%.1f_%.1f'%(x.name,y.left,y.right)))],
axis=1,sort=False)
D = D.astype('str').astype('category')
if data['obs_index'] in D.columns:
del D[data['obs_index']]
cellN = D.groupby(list(D.columns)).size().reset_index(name="Count")
strCol = data['colorBy']
tmp = list(D.columns)
tmp.remove(strCol)
strX = tmp[0]
returnD = [{'name':i,
'sales':[{'year':j,#.replace(strX+':',''),
'profit':toInt(cellN[(cellN[strCol]==i) & (cellN[strX]==j)]['Count'])}
for j in cellN[strX].unique()]}
for i in cellN[strCol].unique()]
return json.dumps(returnD)
def CLI(data):
strPath = data["CLItmp"]+('/CLI%f' % time.time())
script = data['script']
del data['script']
adata = createData(data)
strData = strPath + '.h5ad'
adata.write(strData)
#with open(strData,'wb') as f:
#pickle.dump(adata,f)
ppr.pprint(len(re.findall(r'```',script)))
if (len(re.findall(r'```',script)) >0):
strScript = strPath + '.Rmd'
with open(strScript,'w') as f:
f.writelines(['---\noutput:\n html_document:\n code_folding: hide\n---\n\n```{r}\nstrPath <- "%s"\n```\n\n'%strPath])
f.write(script)
#ppr.pprint(subprocess.run('which Rscript',capture_output=True,shell=True).stdout.decode('utf-8'))
res = subprocess.run('Rscript -e \'rmarkdown::render("%s", output_file="%s.html")\''%(strScript,strPath),capture_output=True,shell=True)
if (os.path.exists('%s.html'%strPath)):
with open('%s.html'%strPath,'r') as file:
html = file.read()
else:
html = ''
ppr.pprint(res.stdout.decode('utf-8'))
ppr.pprint(res.stderr.decode('utf-8'))
else:
strScript = strPath + '.py'
with open(strScript,'w') as f:
f.writelines(['%load_ext rpy2.ipython\n','from anndata import read_h5ad\n','adata=read_h5ad("%s")\n'%strData, 'strPath="%s"\n\n'%strPath])
#f.writelines(['%load_ext rpy2.ipython\n','import pickle\n','with open("%s","rb") as f:\n'%strData,' adata=pickle.load(f)\n','strPath="%s"\n\n'%strPath])
f.writelines(['%%R\n','strPath="%s"\n\n'%strPath])
f.write(script)
ppr.pprint(subprocess.run('which Rscript',capture_output=True,shell=True).stdout.decode('utf-8'))
ppr.pprint(subprocess.run('which pandoc',capture_output=True,shell=True).stdout.decode('utf-8'))
ppr.pprint(subprocess.run("Rscript -e 'reticulate::py_config()'",capture_output=True,shell=True).stdout.decode('utf-8'))
res = subprocess.run('jupytext --to notebook --output - %s | jupyter nbconvert --ExecutePreprocessor.timeout=1800 --to html --execute --stdin --stdout'%strScript,capture_output=True,shell=True)
html = res.stdout.decode('utf-8')
h,s,e = html.partition('<div class="cell border-box-sizing code_cell rendered">')
h1,s,e = e.partition('<div class="cell border-box-sizing code_cell rendered">') ## remove the first cell
h1,s,e = e.partition('<div class="cell border-box-sizing code_cell rendered">') ## remove the second cell
html = h+s+e
if 'Error' in res.stderr.decode('utf-8'):
html = 'ERROR @server:\nstderr:\n' + res.stderr.decode('utf-8') + '\nstdout:\n' + res.stdout.decode('utf-8')
for f in glob.glob(strPath+"*"):
try:
os.remove(f)
except:
continue
return html
def getDesp(data):
strF = re.sub("h5ad$","txt",data["h5ad"])
if not os.path.isfile(strF):
return ""
txt = ""
with open(strF,'r') as fp:
for line in fp:
txt = "%s<br>%s"%(txt,line)
return txt
def getPreDEGname(data):
strF = re.sub("h5ad$","db",data["h5ad"])
if not os.path.isfile(strF):
#ppr.pprint(strF+" is NOT found!")
return ""
conn = sqlite3.connect(strF)
df = pd.read_sql_query("select DISTINCT contrast,tags from DEG;", conn)
conn.close()
return json.dumps(list(df['contrast']+"::"+df['tags']))
def getPreDEGvolcano(data):
strF = re.sub("h5ad$","db",data["h5ad"])
comGrp = data["compSel"].split("::")
conn = sqlite3.connect(strF)
df = | pd.read_sql_query("select gene,log2fc,pval,qval from DEG where contrast=? and tags=?;", conn,params=comGrp) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""Copy of final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1JsZAdNd67Fcn-S5prbt1w33R4wxE_9ep
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm_notebook as tqdm
# %matplotlib inline
"""## Data loading """
application_train = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/application_train.csv")
application_test = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/application_test.csv")
# pos_cash = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/POS_CASH_balance.csv")
# installments = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/installments_payments.csv")
# credit_df = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/credit_card_balance.csv");
# b=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau.csv")
# bur=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau_balance.csv")
# prev=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/previous_application.csv")
print("application_train.shape:",application_train.shape)
print("application_test.shape :",application_test.shape)
train_id = application_train["SK_ID_CURR"]
train_target = application_train["TARGET"]
test_id = application_test["SK_ID_CURR"]
application_train.head()
application_test.head()
"""we have one extra column in the application_train data , i.e TARGET """
application_train['TARGET'].value_counts()
fig = plt.figure(figsize =(15, 5))
plt.subplot(1,2,1)
plt.pie(application_train["TARGET"].value_counts(),labels = ["TARGET=0","TARGET=1"],autopct='%1.2f%%')
plt.subplot(1,2,2)
sns.countplot(x="TARGET",palette ="Set2",data=application_train)
plt.tight_layout()
plt.show()
"""Imbalanced dataset"""
application_train.dtypes.value_counts()
obj_type = application_train.dtypes[application_train.dtypes=='object'].index
float_type = application_train.dtypes[application_train.dtypes=='float64'].index
int_type = application_train.dtypes[application_train.dtypes=='int64'].index
def missing_data(data):
total = data.isnull().sum().sort_values(ascending = False)
percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending = False)
return | pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
def prepare_titanic(test_size=0.3, random_state=123):
print('Download or read from disk.')
ds = tfds.load('titanic', split='train')
# Turn DataSet adapter into DataFrame
print('Convert to pandas.DataFrame')
X = []
y = []
for ex in tfds.as_numpy(ds):
x_i, y_i = ex['features'], ex['survived']
X.append(x_i)
y.append(y_i)
df_X = pd.DataFrame(X)
features = list(df_X.columns)
y = pd.Series(y, name='survived')
print('Partition into Train and Test')
print(f' Test size = {test_size}')
print(f' random_state = {random_state}')
df_train, df_test, y_train, y_test = train_test_split(
df_X, y, test_size=test_size, random_state=random_state
)
return df_train, df_test, y_train, y_test
def encode_features(df):
df = df.copy()
# Columns requiring special handling
df['cabin'] = np.where(df['cabin'] == b'Unknown', 1, 0)
df['ticket'] = df['ticket'].astype(str).str.extract('(\d+)').fillna(-1)
embarked_dummies = | pd.get_dummies(df['embarked'], prefix='embarked') | pandas.get_dummies |
import pandas as pd
import numpy as np
import pickle
from .utils import *
def predNextDays(optmod_name, opt_mod, var_name, pred_days):
pred = (opt_mod[optmod_name]['mod_data'][var_name])[opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period'] -1 :opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period']+pred_days]
print("Mod: %s \t Next days: %s: \t %s" %(optmod_name, var_name, str([int(x) for x in pred])))
print("Mod: %s \t Variation: %s: \t %s" %(optmod_name, var_name, str([int(x) for x in (pred[1:len(pred)] - pred[0:len(pred)-1])])))
class ModelStats:
def __init__(self, model, act_data, pred_days = 10):
self.model = model
self.act_data = act_data
self.data = pd.DataFrame(self.calcData())
self.data.set_index("date", inplace=True)
def printKpi(self, date, kpi_name, title, num_format = 'd', bperc = False):
var_uff = "uff_" + kpi_name
var_mod = "mod_" + kpi_name
if "uff_" + kpi_name in self.data.columns.tolist():
#print(("%30s: %7" + num_format + " vs %7" + num_format + " (%5" + num_format + " vs %5" + num_format + "), errore: %" + num_format + "") %(
print(("%30s: %7s vs %7s (%5s vs %5s), errore: %s") %(
title,
format_number(self.data[var_uff][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_uff][np.datetime64(date, 'D')] - self.data[var_uff][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc),
format_number(self.data[var_uff][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc)
))
else:
#print(("%30s: %7" + num_format + " (%5" + num_format + ")") %(
print(("%30s: %7s (%5s)") %(
title,
format_number(self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc)
))
def printKpis(self, date):
self.printKpi(date, 'Igc_cum', "Tot Infected")
self.printKpi(date, 'Igc', "Currently Infected")
self.printKpi(date, 'Igci_t', "Currently in Int. Care")
self.printKpi(date, 'Gc_cum', "Tot Recovered")
self.printKpi(date, 'M_cum', "Tot Dead")
print()
self.printKpi(date, 'Igc_cum_pinc', "% Increase, Infected", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Gc_Igc', "% Mortality Rate", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_M_Igc', "% Known Recovery Rate", num_format=".3f", bperc = True)
print()
self.printKpi(date, 'ratio_Gccum_Igccum', "% Recovered / Tot", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Mcum_Igccum', "% Dead / Tot", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Igci_Igc', "% Intensive Care", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Igcn_Igc', "% Non Intensive Care", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_I_Igc', "% Total Infected / Known Infected", num_format=".3f", bperc = True)
self.printKpi(date, 'R0_t', "R0", num_format=".3f")
print()
print()
print("*** 7 days ahead predictions ***")
self.printPredict(date, 'Igc_cum', "Tot Infettati", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Igc', "Attualmente Infetti", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Igci_t', "Attualmente in Intensiva", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Gc_cum', "Tot Guariti", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'M_cum', "Tot Morti", pred_step = 7, bperc = False)
def printPredict(self, curr_date, kpi_name, title, pred_step = 7, bperc = False):
var_mod = "mod_" + kpi_name
data = self.data[var_mod][np.datetime64(curr_date, 'D') : np.datetime64(np.datetime64(curr_date, 'D') + np.timedelta64(pred_step, 'D'))]
data_delta = pd.Series(data).diff(1)
data_str = "["
for val in data:
data_str = " " + data_str + " {:7s}".format(format_number(val)) + " "
data_str = data_str + "]"
data_delta_str = "["
for val in data_delta:
#data_delta_str = " " + data_delta_str + " {:7s}".format(format_number(val)) + " "
#print(val)
#if math.isfinite(val):
data_delta_str = " " + data_delta_str + " {:7s}".format(str(format_number(val))) + " "
#else:
# data_delta_str = " " + data_delta_str + " {:7s}".format("0") + " "
data_delta_str = data_delta_str + "]"
print(("%30s: %60s") %(
title,
data_str
))
print(("%30s: %60s") %(
"Var.",
data_delta_str
))
def calcData(self):
def calcDataVar(data):
istart = self.model['i_start']
#iend = istart + len(data)
mod_len = len(self.model['mod_data']['dat_Igc'])
#return [np.NaN for i in range (0, istart)] + data.tolist() + [np.NaN for i in range(istart + len(data) -1, mod_len-1)]
return [np.NaN for i in range (0, istart)] + data.tolist()[self.act_data.i_start:] + [np.NaN for i in range(istart + len(data[self.act_data.i_start:]) -1, mod_len-1)]
def calcDataVarDate(data):
istart = self.model['i_start']
mod_len = len(self.model['mod_data']['dat_Igc'])
#first_date = data[0] - np.timedelta64(istart, 'D')
first_date = data[self.act_data.i_start] - np.timedelta64(istart, 'D')
return [np.datetime64(first_date + np.timedelta64(i, 'D'), 'D') for i in range (0, mod_len)]
uff_Igci_t = calcDataVar(self.act_data.dat_Igci_t)
uff_Igcn_t = calcDataVar(self.act_data.dat_Igcn_t)
uff_Igc = calcDataVar(self.act_data.dat_Igc)
uff_Igc_cum = calcDataVar(self.act_data.dat_Igc_cum)
uff_Gc_cum = calcDataVar(self.act_data.dat_Gc_cum)
uff_M_cum = calcDataVar(self.act_data.dat_M_cum)
uff_Gc = [np.NaN] + np.diff(uff_Gc_cum).tolist()
uff_M = [np.NaN] + np.diff(uff_M_cum).tolist()
uff_Igc_cum_pinc = (pd.Series(uff_Igc_cum)/pd.Series(uff_Igc_cum).shift(1)) - 1
uff_ratio_Gc_Igc = (pd.Series(uff_Gc)/pd.Series(uff_Igc).shift(1))
uff_ratio_M_Igc = (pd.Series(uff_M)/pd.Series(uff_Igc).shift(1))
uff_ratio_Gccum_Igccum = (np.array(uff_Gc_cum)/np.array(uff_Igc_cum)).tolist()
uff_ratio_Mcum_Igccum = (np.array(uff_M_cum)/np.array(uff_Igc_cum)).tolist()
uff_ratio_Igci_Igc = (np.array(uff_Igci_t)/np.array(uff_Igc)).tolist()
uff_ratio_Igcn_Igc = (np.array(uff_Igcn_t)/np.array(uff_Igc)).tolist()
mod_Igci_t = self.model['mod_data']['dat_Igci_t']
mod_Igcn_t = self.model['mod_data']['dat_Igcn_t']
mod_Ias_t = self.model['mod_data']['dat_Ias_t']
mod_Igs_t = self.model['mod'].Igs_t
mod_Igc = self.model['mod_data']['dat_Igc']
mod_Igc_cum = self.model['mod_data']['dat_Igc_cum']
mod_I = self.model['mod_data']['dat_I']
#mod_NIs_t = self.model['mod_data']['dat_NIs']
mod_G = self.model['mod_data']['dat_G']
mod_Gc = self.model['mod_data']['dat_Gc']
mod_M = self.model['mod_data']['dat_M']
mod_G_cum = self.model['mod_data']['dat_G_cum']
mod_Gc_cum = self.model['mod_data']['dat_Gc_cum']
mod_M_cum = self.model['mod_data']['dat_M_cum']
mod_Popi_t = self.model['mod_data']['dat_Popi_t']
mod_R0_t = self.model['mod_data']['dat_R0_t']
mod_Igc_cum_pinc = (pd.Series(mod_Igc_cum)/pd.Series(mod_Igc_cum).shift(1)) - 1
mod_ratio_M_Igc = (pd.Series(mod_M)/pd.Series(mod_Igc).shift(1))
mod_ratio_Gc_Igc = ( | pd.Series(mod_Gc) | pandas.Series |
import os
import math
import copy
import random
import calendar
import csv
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import sqlite3
import seaborn as sns
#from atnresilience import atn_analysis as atn
import atn_analysis
import db_tools
# Set global styles for plots
plt.rcParams["font.family"] = "Times New Roman"
sns.set_palette("colorblind")
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
line_type = {1:'-',2:'--',3:':',4:'-.'}
def remove_frequency(db_path, file, airline, include_data, can_limit, zs_limit, processed_direc):
"""
Creates a dictionary of airports and their removal frequency for a given airline
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a dictionary containing airport removal frequency values
Notes
-----
"""
df_net_tuple = pd.DataFrame()
df_net = atn_analysis.raw_query(db_path, file, airline)
df_net_tuple["Origin"] = df_net.Origin_Airport_Code
df_net_tuple["Destination"] = df_net.Destination_Airport_Code
graph = [tuple(x) for x in df_net_tuple.to_records(index=False)]
G = nx.Graph()
G.add_edges_from(graph)
tempG = G.copy()
Airport_Dict = {}
for i in G.nodes():
Airport_Dict[i] = 0
Total_List = get_remove_list(db_path, file,include_data, airline, can_limit, zs_limit, processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
airport_list = Total_List[j]
for l in airport_list:
tempG.remove_node(l)
Airport_Dict[l] = Airport_Dict[l] + 1
tempG = G.copy()
return(Airport_Dict)
def weighted_edge(db_path, file, airline):
"""
Creates a data frame of origin airports, destination airports and weights for each route
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a data frame containing each respective weighted route from an origin airport to a destination
Notes
-----
"""
df = atn_analysis.raw_query(db_path, file, airline)
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
file_str = int(str(file)[:4])
if calendar.isleap(file_str) == 1:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status
weight_values = [math.log(y, 10) for y in df_tuple.Weight.values]
for i in range(0, len(weight_values)):
df_tuple.Weight.values[i] = weight_values[i]
return(df_tuple)
def get_remove_list(db_path, file, include_data, airline, can_limit, zs_limit, processed_direc):
"""
Return a remove_list in a year (airline specific, include_data specific) based on cancelation limit and z_score limit.
Parameters
----------
file: int
Year of selected data
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
airline: string
Airline to get data from. This is the 2 letter airline code (ex: AA, UA, DL, WN)
can_limit: float
Cancellation Limit. Between 0 and 1
zs_limit: float
z-score limit. Between 0 and 1
Returns
-------
Pandas df
Notes
-----
"""
z_score_path = '%s%s_%s_Zdata_%s.csv'%(processed_direc, file,airline,include_data)
#df_score = pd.read_csv(raw_file_drop, index_col="Date")
df_score = pd.read_csv(z_score_path, index_col = "Day_of_Year")
df_score.index = pd.to_datetime(df_score.index)
airport_list = df_score.columns.tolist()
df = atn_analysis.raw_query(db_path,file,airline)
df = df[df['Origin_Airport_Code'].isin(airport_list)] # Filtering to make sure airports are equal in both directions
df = df[df['Destination_Airport_Code'].isin(airport_list)]
by_origin_count = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].count()
by_origin = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].sum()
by_origin.Can_Status = by_origin.Can_Status / by_origin_count.Can_Status
#print(by_origin)
df_score["idx"] = df_score.index
df_score = pd.melt(df_score, id_vars='idx', value_vars=airport_list)
df_score = df_score.sort_values(['idx', 'variable'], ascending=[True, True])
df_score.columns = ["Date", "Airports", "Z_Score"]
df_score.set_index('Date')
df_score["Cancellations"] = by_origin.Can_Status
### Creating the or conditions. First is the percentage of delayed flights and the second is the z-score
df_score["Z_score_9901"] = np.where((df_score['Cancellations'] > can_limit) | (df_score['Z_Score'] > zs_limit), 1, 0)
#print(df_score)
### Creating pivot table for easy manipulation. This creates the date as the index with the properties corresponding to
### it and finally repeats this trend for all airports being considered.
df_pivot = df_score.pivot_table('Z_score_9901', ['Date'], 'Airports')
#print(df_pivot)
s = np.asarray(np.where(df_pivot == 1, ['{}'.format(x) for x in df_pivot.columns], '')).tolist()
s_nested = []
for k in s:
p = list(filter(None,k))
#p = filter(None,k)
s_nested.append(p)
#s_nested.extend(p)
return s_nested
def inv_average_shortest_path_length(graph, weight=None):
"""
Creates an unweight inverse average path length graph
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the IAPL unweighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_shortest_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (unweighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def inv_average_shortest_path_length_W(graph, weight=None):
"""
Creates the table atn_performance in the database at the specified input location if one does not exist.
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the inverse average path length weighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_dijkstra_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (weighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def Data_Driven_W(file_list, airline_list, include_data, can_limit, zs_limit, processed_direc, graph_direc):
"""
Calculate the cluster size and IAPL for each day in a year after removal based on data-driven method.
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
can_limit: float
Cancellation threshold
zs_limit: float
z-score threshold
Returns
-------
The cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for file in file_list:
## iteration of years first
figure_num = 1
CSV_df = pd.DataFrame(columns = airline_list)
for airline in airline_list:
# CSV_df[airline] = [1,2,3,4]
# CSV_file = "%s_DD_IAPL.csv" %(file)
# CSV_df.to_csv(CSV_file, index=False)
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE) (Weighted Graph)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
df_tuple.Weight = 1/df_tuple.Weight
## Output lists initialization:
#day_IAPL = 0
day_CS = 0
#output_IAPL = []
output_CS = []
NoD = []
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
## Set up the weighted graph
G.add_weighted_edges_from(graph)
#print(G.nodes())
tempG = G.copy() #use temporary graph for the loop
## Remove list for the whole year
Total_Remove_List = get_remove_list(db_path,file,include_data, airline, can_limit, zs_limit,processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
## Remove the nodes in each day and get the CS and IAPL data
#day_IAPL = 0
Day_Remove_List = Total_Remove_List[j]
NoD.append(j)
for l in Day_Remove_List:
tempG.remove_node(l)
#largest_component_b = max(nx.connected_components(tempG), key=len)
#day_IAPL =(inv_average_shortest_path_length_W(tempG))
largest_component_b = max(nx.connected_components(tempG), key=len)
day_CS = len(largest_component_b)
#len(largest_component_b) = cluster size
#cluster fraction = cluster size/number of nodes
#output_IAPL.append(day_IAPL)
output_CS.append(day_CS)
#sum_IAPL = sum_IAPL + (inv_average_shortest_path_length(tempG))
tempG = G.copy()
## plotting command
plt.figure(figure_num)
#line = plt.plot(NoD,output_IAPL, label="{}".format(airline))
line = plt.plot(NoD,output_CS, label="{}".format(airline))
plt.legend()
#CSV_df[airline] = output_IAPL
CSV_df[airline] = output_CS
#CSV_file = "%s_DD_IAPL.csv" %(file)
CSV_file = "%s%s_DD_CS.csv" %(graph_direc,file)
CSV_df.to_csv(CSV_file, index=False)
#plt.title("{} Data Driven IAPL".format(str(file)))
plt.xlabel("Day")
#plt.ylabel("IAPL")
plt.ylabel("Cluster Size")
#plt.savefig("{}_Data_Driven_IAPL.png".format(str(file)))
plt.savefig("%s%s_Data_Driven_CS.png"%(graph_direc,file))
plt.show()
figure_num = figure_num + 1
def Pure_Graph_W_Shu(file_list, airline_list, include_data, processed_direc, rep_num):
"""
Calculate the linear algebraic connectivity, cluster size and IAPL for each day in a year after random removal based on Pure Graph method.
Random Removal set up by shuffle function
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
rep_num: int
Number of repititions
Returns
-------
csv with the cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for airline in airline_list:
rep_ite = 1
Total_AC = []
Total_Cluster_Size = []
Total_IAPL = []
for i in range(len(file_list)):
## initialize the output lists
Total_AC.append(0)
Total_Cluster_Size.append(0)
Total_IAPL.append(0)
## Save the data in csv
filename1 = "%s%s_ACR.csv" %(processed_direc,airline)
with open(filename1, 'w') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(file_list)
filename2 = "%s%s_IAPLR.csv" %(processed_direc,airline)
with open(filename2, 'w') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(file_list)
filename3 = "%s%s_CSR.csv" %(processed_direc,airline)
with open(filename3, 'w') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(file_list)
while rep_ite < rep_num+1:
## start the reptition
year_IAPL = []
year_Cluster_Size = []
year_AC = []
for file in file_list:
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE)
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 19:41:56 2021
@author: u0117123
"""
#Import modules
import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegression
#Input variables
Validation_Area="Tervuren"
#Referece objects with features path
refObjectPath = r'C:\Users\u0117123\Box Sync\FWO\WP1\Point-cloud-extractions\processing\5_Clustering_classification\Reference'
ClusteredObjectPath = r'C:\Users\u0117123\Box Sync\FWO\WP1\Point-cloud-extractions\processing\5_Clustering_classification\3SA\r10\RF_all'
#%% LOGISTIC REGRESSION MODEL
### STEP 1 ### IMPORT DATA
data_density_loop_all = pd.read_csv(refObjectPath + "\data_density_loop_Reference.csv", sep=";", index_col=(0))
data_density_loop = data_density_loop_all.loc[data_density_loop_all['location'] != Validation_Area]
data_density_loop['height7_1'] = data_density_loop['height7']/data_density_loop['height1']
data_density_loop['height7_2'] = data_density_loop['height7']/data_density_loop['height2']
data_density_loop['height5_1'] = data_density_loop['height5']/data_density_loop['height1']
data_density_loop['height10_2'] = data_density_loop['height10']/data_density_loop['height2']
data_density_loop['height10_1'] = data_density_loop['height10']/data_density_loop['height1']
columns_x = ["min_z", "max_z", "min_slope_rel", "max_slope_rel", "area",
"m_z_chm","m_nr_returns", "3D_dens","height7_1", "height5_1",
"height10_2", "height10_1", "height7_2"]
data_density_loop_x = data_density_loop[columns_x] #independent variables
data_density_loop_ground_p_density = data_density_loop[["ground_p_density"]]
data_density_loop_y = data_density_loop[["Type"]] #Response variable
#Convert response variable to binary values (shrub = 1; tree = 0)
shrub = ["shrub"]
data_density_loop_y["y"] = np.where(data_density_loop_y["Type"].isin(shrub), "1", "0")
data_density_loop_y = data_density_loop_y.drop(['Type'], axis=1)
# convert dataframe response variable to matrix
conv_arr = data_density_loop_y.values
y_array = conv_arr.ravel()
#%%## STEP 2 ### Check for correlations
import matplotlib.pyplot as plt
import seaborn as sns
# Create correlation matrix & selecting upper triangle
cor_matrix = data_density_loop_x.corr().abs()
plt.figure(figsize = (20,10)) # Size of the figure
sns.heatmap(data_density_loop_x.corr().abs(),annot = True)
plt.show()
upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape),k=1).astype(np.bool))
#print(upper_tri)
# Droping the column with correlation > 95%
to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)] #height5_1, height10_1
#print(); print(to_drop)
data_density_loop_x_dropCorr = data_density_loop_x.drop(to_drop, axis=1)
#print(); print(data_density_loop_x_dropCorr.head())
#%%## STEP 3 ### Cross validation loop
#merge independent variables and dependent variable
data_density_loop_xy_dropCorr = pd.concat([data_density_loop_x_dropCorr,data_density_loop_y], axis=1)
data_density_loop_xy_dropCorr = data_density_loop_xy_dropCorr.reset_index(drop=True)
#split in 10 parts
data_density_loop_xy_dropCorr_shuffled = data_density_loop_xy_dropCorr.sample(frac=1, random_state=1) #shuffle dataframe
data_density_loop_xy_dropCorr_shuffled_List = np.array_split(data_density_loop_xy_dropCorr_shuffled, 10)
#Empty dataframes
rfe_features_append = []
sp_features_append = []
accuracy_append = []
#for loop cross validation
for x in range(10):
trainList = []
for y in range(10):
if y == x :
testdf = data_density_loop_xy_dropCorr_shuffled_List[y]
else:
trainList.append(data_density_loop_xy_dropCorr_shuffled_List[y])
traindf = pd.concat(trainList)
#independent variables and response variable
X_train = traindf.drop(columns=['y'])
y_train = traindf['y']
X_test = testdf.drop(columns=['y'])
y_test = testdf['y']
### STEP 3.1 ### Create scaler
from sklearn import preprocessing
import numpy as np
scaler = preprocessing.StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_train_scaled = pd.DataFrame(data = X_train_scaled, columns=X_train.columns)
X_test_scaled = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(data = X_test_scaled, columns=X_test.columns)
### STEP 3.2 ### Feature selection
### Step 3.2.1 Recursive Feature Elimination
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
rfe = RFE(logreg, n_features_to_select = 5) # running RFE with 5 variables as output
rfe = rfe.fit(X_train_scaled, y_train)
#create training and testing dataframe with selected features
col_rfe = X_train_scaled.columns[rfe.support_]
X_train_scaled_rfe = X_train_scaled[col_rfe]
X_test_scaled_rfe = X_test_scaled[col_rfe]
#create dataframe with selected features per fold
rfe_features_columns = ["fold", "features"]
rfe_features = pd.DataFrame(columns = rfe_features_columns)
rfe_features["features"] = X_train_scaled_rfe.columns
rfe_features["fold"] = x
rfe_features_append.append(rfe_features)
### STEP 3.2.2 Select Percentile (ANOVA F-value, retain features with 50% highest score)
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
sp = SelectPercentile(f_classif, percentile=70).fit(X_train_scaled, y_train)
index_selfeat = (sp.get_support(indices=True)).tolist()
X_train_scaled_sp = X_train_scaled.iloc[:,index_selfeat]
X_test_scaled_sp = X_test_scaled.iloc[:,index_selfeat]
#create dataframe with selected features per fold
sp_features_columns = ["fold", "features"]
sp_features = pd.DataFrame(columns = sp_features_columns)
sp_features["features"] = X_train_scaled_sp.columns
sp_features["fold"] = x
sp_features_append.append(sp_features)
### STEP 4 ### Build models using all or selected features
### STEP 4.1 Full model
logreg_Full = LogisticRegression(random_state=0).fit(X_train_scaled, y_train)
# print('Logistic Regression score for training set: %f' % logreg_Full.score(X_train_scaled, y_train))
y_pred_full = logreg_Full.predict(X_test_scaled)
score_full = logreg_Full.score(X_test_scaled, y_test) # Use score method to get accuracy of model
### STEP 4.2 Recursive Feature Elimination
logreg_RFE = LogisticRegression(random_state=0).fit(X_train_scaled_rfe, y_train)
# print('Logistic Regression score for training set: %f' % logreg_RFE.score(X_train_scaled_rfe, y_train))
y_pred_rfe = logreg_RFE.predict(X_test_scaled_rfe)
score_rfe = logreg_RFE.score(X_test_scaled_rfe, y_test) # Use score method to get accuracy of model
### STEP 4.3 Select Percentile
logreg_SP = LogisticRegression(random_state=0).fit(X_train_scaled_sp, y_train)
# print('Logistic Regression score for training set: %f' % logreg_SP.score(X_train_scaled_sp, y_train))
y_pred_sp = logreg_SP.predict(X_test_scaled_sp)
score_sp = logreg_SP.score(X_test_scaled_sp, y_test) # Use score method to get accuracy of model
#create dataframe with scores per fold
accuracy_columns = ["fold", "accuracy_full", "accuracy_rfe", "accuracy_sp"]
accuracy = pd.DataFrame(columns = accuracy_columns)
new_row = {'accuracy_full':score_full, 'accuracy_rfe':score_rfe, 'accuracy_sp':score_sp, 'fold':x}
accuracy = accuracy.append(new_row, ignore_index=True)
accuracy_append.append(accuracy)
rfe_features_append = | pd.concat(rfe_features_append) | pandas.concat |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from datetime import datetime
import json
import logging
import webbrowser
import numpy as np
import pandas as pd
from czsc.Data.data_fq import data_stock_to_fq
from czsc.Fetch.mongo import FACTOR_DATABASE
from czsc.Fetch.tdx import get_bar
from czsc.Indicator import IndicatorSet
from czsc.Utils.echarts_plot import kline_pro
from czsc.Utils.logs import util_log_info
from czsc.Utils.trade_date import TradeDate, util_get_real_date, util_get_next_day
from czsc.Utils.transformer import DataEncoder
def identify_direction(v1, v2):
if v1 > v2: # 前面几根可能都是包含,这里直接初始赋值-1,上升趋势为正数
direction = 1
else:
direction = -1
return direction
def update_fx(bars, new_bars: list, fx_list: list, trade_date: list):
"""更新分型序列
k线中有direction,fx中没有direction字段
分型记对象样例:
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': -1, 低点用—1表示
'value': 138.0,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': +1, 高点用+1表示
'value': 150.67,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
"""
assert len(bars) > 0
bar = bars[-1].copy()
if len(trade_date) > 1:
if TradeDate(bar['date']) < TradeDate(trade_date[-1]):
util_log_info('{} data is older than {} !'.format(bar['date'], trade_date[-1]))
return
trade_date.append(bar['date'])
# 第1根K线没有方向,不需要任何处理
if len(bars) < 2:
new_bars.append(bar)
return False
last_bar = new_bars[-1]
cur_h, cur_l = bar['high'], bar['low']
last_h, last_l, last_dt = last_bar['high'], last_bar['low'], last_bar['date']
# 处理过包含关系,只需要用一个值识别趋势
direction = identify_direction(cur_h, last_h)
# 第2根K线只需要更新方向
if len(bars) < 3:
bar.update(direction=direction)
new_bars.append(bar)
return False
last_direction = last_bar.get('direction')
# 没有包含关系,需要进行分型识别,趋势有可能改变
if (cur_h > last_h and cur_l > last_l) or (cur_h < last_h and cur_l < last_l):
new_bars.append(bar)
# 分型识别
if last_direction * direction < 0:
bar.update(direction=direction)
if direction < 0:
fx = {
"date": last_bar['date'],
"fx_mark": 1,
"value": last_bar['high'],
"fx_start": new_bars[-3]['date'], # 记录分型的开始和结束时间
"fx_end": bar['date'],
# "direction": bar['direction'],
}
else:
fx = {
"date": last_bar['date'],
"fx_mark": -1,
"value": last_bar['low'],
"fx_start": new_bars[-3]['date'], # 记录分型的开始和结束时间
"fx_end": bar['date'],
# "direction": bar['direction'],
}
fx_list.append(fx)
return True
bar.update(direction=last_direction + np.sign(last_direction))
return False
# 有包含关系,不需要进行分型识别,趋势不改变,direction数值增加
bar.update(direction=last_direction + np.sign(last_direction))
new_bars.pop(-1) # 有包含关系的前一根数据被删除,这里是个技巧
# 有包含关系,按方向分别处理,同时需要更新日期
if last_direction > 0:
if cur_h < last_h:
bar.update(high=last_h, date=last_dt)
if cur_l < last_l:
bar.update(low=last_l)
elif last_direction < 0:
if cur_l > last_l:
bar.update(low=last_l, date=last_dt)
if cur_h > last_h:
bar.update(high=last_h)
else:
logging.error('{} last_direction: {} is wrong'.format(last_dt, last_direction))
raise ValueError
new_bars.append(bar)
return False
class XdList(object):
"""存放线段"""
def __init__(self, bars, indicators, trade_date):
# 传入的是地址,不要修改
self.bars = bars
self.indicators = indicators
self.trade_date = trade_date
# item存放数据元素
self.xd_list = [] # 否则指向同一个地址
# 低级别的中枢
self.zs_list = []
self.sig_list = []
# next是低一级别的线段
self.next = None
# prev 指向高一级别的线段
self.prev = None
def __len__(self):
return len(self.xd_list)
def __getitem__(self, item):
return self.xd_list[item]
def __setitem__(self, key, value):
self.xd_list[key] = value
def append(self, value):
self.xd_list.append(value)
def update_zs(self):
"""
{
'zs_start': 进入段的起点
'zs_end': 离开段的终点
'ZG': 中枢高点,
'ZD': 中枢低点,
'GG': 中枢最低点,
'DD': 中枢最高点,
'xd_list': list[dict]
'location': 中枢位置
}
"""
xd_list = self.xd_list
if len(xd_list) < 3:
return False
zs_list = self.zs_list
if len(zs_list) < 1:
assert len(xd_list) < 4
zg = xd_list[0] if xd_list[0]['fx_mark'] > 0 else xd_list[1]
zd = xd_list[0] if xd_list[0]['fx_mark'] < 0 else xd_list[1]
zs = {
'ZG': zg,
'ZD': zd,
'GG': [zg], # 初始用list储存,记录高低点的变化过程,中枢完成时可能会回退
'DD': [zd], # 根据最高最低点的变化过程可以识别时扩散,收敛,向上还是向下的形态
'xd_list': xd_list[:2],
'weight': 1, # 记录中枢中段的数量
'location': 0, # 初始状态为0,说明没有方向, -1 表明下降第1个中枢, +2 表明上升第2个中枢
'real_loc': 0 # 除去只有一段的中枢
}
zs_list.append(zs)
return False
# 确定性的笔参与中枢构建
last_zs = zs_list[-1]
xd = xd_list[-2]
if TradeDate(last_zs['xd_list'][-1]['date']) >= TradeDate(xd['date']):
# 已经计算过中枢
return False
if xd['fx_mark'] > 0:
# 三卖 ,滞后,实际出现了一买信号
if xd['value'] < last_zs['ZD']['value']:
zs_end = last_zs['xd_list'].pop(-1)
if zs_end['date'] == last_zs['DD'][-1]['date']:
last_zs['DD'].pop(-1)
last_zs.update(
zs_end=zs_end,
weight=last_zs['weight'] - 1,
DD=last_zs['DD'],
real_loc=last_zs['real_loc'] + 1 if last_zs['weight'] == 2 else last_zs['real_loc']
)
zs = {
'zs_start': xd_list[-4],
'ZG': xd,
'ZD': zs_end,
'GG': [xd],
'DD': [zs_end],
'xd_list': [zs_end, xd],
'weight': 1,
'location': -1 if last_zs['location'] >= 0 else last_zs['location'] - 1,
'real_loc': -1 if last_zs['real_loc'] >= 0 else last_zs['real_loc'] - 1,
}
zs_list.append(zs)
return True
elif xd['value'] < last_zs['ZG']['value']:
last_zs.update(ZG=xd)
# 有可能成为离开段
elif xd['value'] > last_zs['GG'][-1]['value']:
last_zs['GG'].append(xd)
elif xd['fx_mark'] < 0:
# 三买,滞后,实际出现了一卖信号
if xd['value'] > last_zs['ZG']['value']:
zs_end = last_zs['xd_list'].pop(-1)
if zs_end['date'] == last_zs['GG'][-1]['date']:
last_zs['GG'].pop(-1)
last_zs.update(
zs_end=zs_end,
weight=last_zs['weight'] - 1,
GG=last_zs['GG'],
real_loc=last_zs['real_loc'] - 1 if last_zs['weight'] == 2 else last_zs['real_loc']
)
zs = {
'zs_start': xd_list[-4],
'ZG': zs_end,
'ZD': xd,
'GG': [zs_end],
'DD': [xd],
'xd_list': [zs_end, xd],
'weight': 1,
'location': 1 if last_zs['location'] <= 0 else last_zs['location'] + 1,
'real_loc': 1 if last_zs['real_loc'] <= 0 else last_zs['real_loc'] + 1,
}
zs_list.append(zs)
return True
elif xd['value'] > last_zs['ZD']['value']:
last_zs.update(ZD=xd)
# 有可能成为离开段
elif xd['value'] < last_zs['DD'][-1]['value']:
last_zs['DD'].append(xd)
else:
raise ValueError
last_zs['xd_list'].append(xd)
last_zs['weight'] = last_zs['weight'] + 1
return False
def update_xd_eigenvalue(self):
trade_date = self.trade_date
xd = self.xd_list[-1]
last_xd = self.xd_list[-2]
# xd.update(pct_change=(xd['value'] - last_xd['value']) / last_xd['value'])
#
start = trade_date.index(last_xd['date'])
end = trade_date.index(xd['date'])
kn = end - start + 1
fx_mark = kn * np.sign(xd.get('fx_mark', xd.get('direction', 0)))
dif = self.indicators.macd[end]['dif']
macd = sum([x['macd'] for x in self.indicators.macd[start: end + 1] if fx_mark * x['macd'] > 0])
xd.update(fx_mark=fx_mark, dif=dif, macd=macd)
# xd.update(fx_mark=fx_mark, dif=dif, avg_macd=macd/kn)
def update_xd(self):
"""更新笔分型序列
分型记对象样例:
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': -8, 低点,负数,表示下降趋势持续的K线根数
'value': 138.0,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
{
'date': Timestamp('2020-11-26 00:00:00'),
'fx_mark': 7, 高点, 正数,表示上升趋势持续的根数
'value': 150.67,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
"""
# 至少3根同类型分型才可能出现线段,最后1根bi不确定,因此最后一段也不确定
if self.next is None:
self.next = XdList(self.bars, self.indicators, self.trade_date)
bi_list = self.xd_list
xd_list = self.next
if len(bi_list) < 4:
return False
if len(xd_list) < 1:
# 线段不存在,初始化线段,找4个点的最高和最低点组成线段
bi_list = bi_list[:-1].copy()
bi_list = sorted(bi_list, key=lambda x: x['value'], reverse=False)
if TradeDate(bi_list[0]['date']) < TradeDate(bi_list[-1]['date']):
xd_list.append(bi_list[0])
xd_list.append(bi_list[-1])
else:
xd_list.append(bi_list[-1])
xd_list.append(bi_list[0])
xd_list.update_xd_eigenvalue()
return True
bi3 = bi_list[-3]
xd = bi_list[-1].copy()
last_xd = xd_list[-1]
xd2 = xd_list[-2]
# if xd['date'] > pd.to_datetime('2016-07-12'):
# print('test')
# 非分型结尾段,直接替换成分型, 没有新增段,后续不需要处理,同一个端点确认
if 'direction' in last_xd or xd['date'] == last_xd['date']:
xd_list[-1] = xd # 日期相等的情况是否已经在内存中修改过了?
xd_list.update_xd_eigenvalue()
return True
# assert xd['date'] > last_xd['date']
if TradeDate(xd['date']) <= TradeDate(last_xd['date']):
util_log_info('The {} quotes bar input maybe wrong!'.format(xd['date']))
if bi3['fx_mark'] > 0: # -1和-3笔的方向相同,-1笔由于是未确认笔,可能没有fx_mark字段
# 同向延续
if last_xd['fx_mark'] > 0 and xd['value'] > last_xd['value']:
xd_list[-1] = xd
xd_list.update_xd_eigenvalue()
return True
# 反向判断
elif last_xd['fx_mark'] < 0:
# 价格判断
if xd['value'] > xd2['value']:
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
# 出现三笔破坏线段,连续两笔,一笔比一笔高,寻找段之间的最高点
elif TradeDate(bi3['date']) > TradeDate(last_xd['date']) and xd['value'] > bi3['value']:
index = -5
bi = bi_list[index]
# # 连续两个高点没有碰到段前面一个低点
# try:
# if TradeDate(bi['date']) < TradeDate(last_xd['date']) and \
# bi_list[index - 1]['value'] > bi3['value'] and \
# bi_list[index]['value'] > xd['value']:
# return False
# except Exception as err:
# pass
# # util_log_info('Last xd {}:{}'.format(last_xd['date'], err))
while TradeDate(bi['date']) > TradeDate(last_xd['date']):
if xd['value'] < bi['value']:
xd = bi
index = index - 2
bi = bi_list[index]
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
elif bi3['fx_mark'] < 0:
# 同向延续
if last_xd['fx_mark'] < 0 and xd['value'] < last_xd['value']:
xd_list[-1] = xd
xd_list.update_xd_eigenvalue()
return True
# 反向判断
elif last_xd['fx_mark'] > 0:
# 价格判断
if xd['value'] < xd2['value']:
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
# 出现三笔破坏线段,连续两笔,一笔比一笔低,将最低的一笔作为段的起点,避免出现最低点不是端点的问题
elif TradeDate(bi3['date']) > TradeDate(last_xd['date']) and xd['value'] < bi3['value']:
index = -5
bi = bi_list[index]
# 连续两个个低点没有碰到段前面一高低点
# try:
# if TradeDate(bi['date']) < TradeDate(last_xd['date']) and \
# bi_list[index - 1]['value'] < bi3['value'] and \
# bi_list[index]['value'] < xd['value']:
# return False
# except Exception as err:
# pass
# # util_log_info('Last xd {}:{}'.format(last_xd['date'], err))
while TradeDate(bi['date']) > TradeDate(last_xd['date']):
if xd['value'] > bi['value']:
xd = bi
index = index - 2
bi = bi_list[index]
xd_list.append(xd)
xd_list.update_xd_eigenvalue()
return True
return False
def update_sig(self):
"""
线段更新后调用,判断是否出现买点
"""
if len(self.zs_list) < 1:
return False
zs = self.zs_list[-1]
xd = self.xd_list[-1]
xd_list = zs['xd_list'].copy()
if 'zs_start' in zs:
xd_list.insert(0, zs['zs_start'])
sig = {
'date': self.bars[-1]['date'],
'real_loc': zs['real_loc'],
'location': zs['location'],
'weight': zs['weight'],
# 'fx_mark': xd['fx_mark'],
# 'last_mark': last_xd['fx_mark'],
# 'time_ratio': abs(xd['fx_mark'] / last_xd['fx_mark']) * 100,
# 'pct_change': xd['pct_change'] * 100,
# 'macd': xd['macd'],
# 'avg_macd': xd['avg_macd'],
}
# if sig['date'] >= pd.to_datetime('2021-07-28'):
# print(sig['date'])
if xd['fx_mark'] > 0: # 上升趋势
# sig.update(GG_macd=zs['GG'][-1].get('macd', np.nan), GG_avg_macd=zs['GG'][-1].get('avg_macd', np.nan))
# if zs['location'] > 0 and zs.get('zs_start', False):
# sig.update(start_macd=zs['zs_start']['macd'], start_avg_macd=zs['zs_start']['avg_macd'])
sig.update(boll=self.indicators.boll[-1].get('UB', np.nan) / self.bars[-1]['high'] * 100 - 100)
if xd['value'] > zs['GG'][-1]['value']:
xd_mark = -1 # 如果weight=1, 背驰,有可能1卖
# resistance = np.nan
# support = zs['GG'][-1]['value'] / xd['value'] - 1
elif xd['value'] > zs['ZG']['value']:
xd_mark = -2 # 如果weight=1, 背驰,有可能2卖
# resistance = zs['GG'][-1]['value'] / xd['value'] - 1
# support = zs['ZG']['value'] / xd['value'] - 1
elif xd['value'] > zs['ZD']['value']:
if sig['weight'] == 1:
xd_mark = -2
else:
xd_mark = -2.5
# resistance = zs['ZG']['value'] / xd['value'] - 1
# support = zs['ZD']['value'] / xd['value'] - 1
elif xd['value'] > zs['DD'][-1]['value']:
xd_mark = -3 # 三卖
# resistance = zs['ZD']['value'] / xd['value'] - 1
# support = zs['DD'][-1]['value'] / xd['value'] - 1
else:
xd_mark = -4 # 三卖
# resistance = zs['DD'][-1]['value'] / xd['value'] - 1
# support = np.nan
elif xd['fx_mark'] < 0: # 下降趋势
# sig.update(DD_macd=zs['DD'][-1].get('macd', np.nan), DD_avg_macd=zs['DD'][-1].get('avg_macd', np.nan))
# if zs['location'] < 0 and zs.get('zs_start', False):
# sig.update(start_macd=zs['zs_start']['macd'], start_avg_macd=zs['zs_start']['avg_macd'])
sig.update(boll=100 - self.indicators.boll[-1].get('LB', np.nan) / self.bars[-1]['low'] * 100)
if xd['value'] > zs['GG'][-1]['value']: # >GG的情况不会出现,因为当3买没有确认时,离开段的最高点也归属于当前中枢
xd_mark = 4 # 三买
# resistance = np.nan
# support = zs['GG'][-1]['value'] / xd['value'] - 1
elif xd['value'] > zs['ZG']['value']:
xd_mark = 3
# resistance = zs['GG'][-1]['value'] / xd['value'] - 1
# support = zs['ZG']['value'] / xd['value'] - 1
elif xd['value'] > zs['ZD']['value']:
if sig['weight'] == 1:
xd_mark = 2
else:
xd_mark = 2.5
# resistance = zs['ZG']['value'] / xd['value'] - 1
# support = zs['ZD']['value'] / xd['value'] - 1
elif xd['value'] >= zs['DD'][-1]['value']: # 如果和中枢最低点的值相同,归为2买,因为段没有升级
xd_mark = 2 # 如果weight=1, 背驰,有可能2买
# resistance = zs['ZD']['value'] / xd['value'] - 1
# support = zs['DD'][-1]['value'] / xd['value'] - 1
else:
xd_mark = 1 # 如果weight=1, 背驰,有可能1买
# resistance = zs['DD'][-1]['value'] / xd['value'] - 1
# support = np.nan
else:
raise ValueError
# sig.update(xd_mark=xd_mark, support=support * 100, resistance=resistance * 100)
sig.update(xd_mark=xd_mark)
start_xd = xd_list[-1]
# 当前线段持续的时间和幅度,下跌趋势回撤的比例
sig.update(valueback=(self.bars[-1]['close'] / start_xd['value'] - 1) * 100)
sig.update(timeback=xd['fx_mark'])
if xd_mark in [3, -3, 4, -4]: # 3买卖点,macd指标比较没有意义
sig.update(start=start_xd['fx_start'], dif=0, macd=0)
self.sig_list.append(sig)
return
direction = np.sign(xd['fx_mark'])
xd_list.reverse()
# 寻找段的起点,比较背离,一般是中枢+进入段的最高点或者最点
for idx, _xd in enumerate(xd_list[1:]):
if idx % 2 == 0: # 同向段
if _xd['value'] * direction > xd['value'] * direction:
break
else:
if _xd['value'] * direction < start_xd['value'] * direction:
start_xd = _xd
# break
sig.update(start=start_xd['fx_start'])
index = xd_list.index(start_xd) - 1
if index < 0: # 只有当前一笔,无法比较
sig.update(dif=0, macd=0)
self.sig_list.append(sig)
return
cmp_xd = xd_list[index]
compare_dif = cmp_xd.get('dif')
compare_macd = cmp_xd.get('macd')
dif = xd.get('dif')
macd = xd.get('macd')
if compare_dif and dif:
if dif * direction > compare_dif * direction:
sig.update(dif=-1)
else:
sig.update(dif=1)
if compare_macd and macd:
if macd * direction > compare_macd * direction:
sig.update(macd=-1)
else:
sig.update(macd=1)
self.sig_list.append(sig)
def update(self):
self.update_zs()
# 计算对应买卖点
self.update_sig()
return self.update_xd()
def update_bi(new_bars: list, fx_list: list, bi_list: XdList, trade_date: list):
"""更新笔序列
笔标记对象样例:和分型标记序列结构一样
{
'date': Timestamp('2020-11-26 00:00:00'),
'code': code,
'fx_mark': 'd',
'value': 138.0,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
{
'date': Timestamp('2020-11-26 00:00:00'),
'code': code,
'fx_mark': 'g',
'value': 150.67,
'fx_start': Timestamp('2020-11-25 00:00:00'),
'fx_end': Timestamp('2020-11-27 00:00:00'),
}
return: True 笔的数据出现更新,包括新增笔或者笔的延续
"""
# 每根k线都要对bi进行判断
bar = new_bars[-1].copy()
if TradeDate(bar['date']) < TradeDate(trade_date[-1]):
# 包含的K线,不会改变bi的状态,不需要处理
return False
if len(fx_list) < 2:
return False
bi = fx_list[-1].copy()
# 没有笔时.最开始两个分型作为第一笔,增量更新时从数据库取出两个端点构成的笔时确定的
if len(bi_list) < 1:
bi2 = fx_list[-2].copy()
bi_list.append(bi2)
bi_list.append(bi)
bi_list.update_xd_eigenvalue()
return False
last_bi = bi_list[-1]
bar.update(value=bar['high'] if bar['direction'] > 0 else bar['low'])
# if bar['date'] > pd.to_datetime('2020-09-08'):
# print('error')
# k 线确认模式,当前K线的日期比分型K线靠后,说明进来的数据时K线
if TradeDate(bar['date']) > TradeDate(bi['fx_end']):
if 'direction' not in last_bi: # bi的结尾是分型
# 趋势延续替代,首先确认是否延续, 由于处理过包含,高低点可能不正确,反趋势的极值点会忽略
# 下一根继续趋势,端点后移,如果继续反趋势,该点忽略
# todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断
if (last_bi['fx_mark'] > 0 and bar['high'] > last_bi['value']) \
or (last_bi['fx_mark'] < 0 and bar['low'] < last_bi['value']):
bi_list[-1] = bar
bi_list.update_xd_eigenvalue()
return True
try:
kn_inside = trade_date.index(bar['date']) - trade_date.index(last_bi['fx_end']) - 1
except:
print('error')
# todo 至少2根k线, 时间确认必须被和前一笔方向相反,会出现端点不是极值点的情况
if kn_inside > 1 and bar['direction'] * last_bi['fx_mark'] < 0:
# 寻找同向的第一根分型
index = -1
while TradeDate(bi['date']) > TradeDate(last_bi['date']):
if bar['direction'] * bi['fx_mark'] > 0:
break
index = index - 1
bi = fx_list[index]
if (bar['direction'] * bi['fx_mark'] > 0) \
and (np.sign(bar['direction']) * bar['value'] < bi['fx_mark'] * bi['value']):
bi['fx_end'] = bar['date'] # 影响似乎不大?
bi_list.append(bi)
else:
bi_list.append(bar)
bi_list.update_xd_eigenvalue()
return True
# 只有一个端点,没有价格确认
if len(bi_list) < 2:
return False
# 价格确认
# todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断
if (last_bi['fx_mark'] < 0 and bar['high'] > bi_list[-2]['value']) \
or (last_bi['fx_mark'] > 0 and bar['low'] < bi_list[-2]['value']):
bi_list.append(bar)
bi_list.update_xd_eigenvalue()
return True
else: # 原有未出现分型笔的延续
assert bar['direction'] * last_bi['direction'] > 0
# if bar['direction'] * last_bi['direction'] < 0:
# print('error')
# return False
bi_list[-1] = bar
bi_list.update_xd_eigenvalue()
return True
return False
# 非分型结尾笔,直接替换成分型, 没有新增笔,后续不需要处理,同一个端点确认
if 'direction' in last_bi or bi['date'] == last_bi['date']:
bi_list[-1] = bi
bi_list.update_xd_eigenvalue()
return True
# fx_end处理,分型处理完后,因为分型确认滞后,所以还需要对fx_end 也就是当前K线进行处理,否则会出现缺失或者识别滞后的问题
# 由于时分型,只需要判断延续的问题,因此K线的方向要和上一笔一致
def handle_fx_end():
assert bar['date'] == bi['fx_end']
if bar['direction'] * last_bi['fx_mark'] < 0:
return False
if last_bi['fx_mark'] * bar['value'] > last_bi['fx_mark'] * last_bi['value']:
bi_list[-1] = bar
bi_list.update_xd_eigenvalue()
return True
# 分型处理,连续高低点处理,只判断是否后移,没有增加笔
# bi的fx_mark不一定为+1或者-1,因为要用sign函数取符号
# todo 为什么用 and 连接两个 if 结果错误
if last_bi['fx_mark'] * bi['fx_mark'] > 0:
if np.sign(last_bi['fx_mark']) * last_bi['value'] < bi['fx_mark'] * bi['value']:
bi_list[-1] = bi
bi_list.update_xd_eigenvalue()
return True
else:
# 笔确认是条件1、时间破坏,两个不同分型间至少有一根K线,2、价格破坏,向下的一笔破坏了上一笔的低点
kn_inside = trade_date.index(bi['fx_start']) - trade_date.index(last_bi['fx_end']) - 1
if kn_inside > 0: # 两个分型间至少有1根k线,端点有可能不是高低点
index = -2
while TradeDate(fx_list[index]['date']) > TradeDate(last_bi['date']):
# 分析的fx_mark取值为-1和+1
if (bi['fx_mark'] * fx_list[index]['fx_mark'] > 0) \
and (bi['fx_mark'] * bi['value'] < fx_list[index]['fx_mark'] * fx_list[index]['value']):
bi = fx_list[index].copy()
# 分型结尾不变
bi['fx_end'] = fx_list[-1]['fx_end']
index = index - 1
bi_list.append(bi)
bi_list.update_xd_eigenvalue()
return True
# 只有一个端点,没有价格确认
if len(bi_list) < 2:
return False
# 价格确认
# todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断
if (bi['fx_mark'] > 0 and bi['value'] > bi_list[-2]['value']) \
or (bi['fx_mark'] < 0 and bi['value'] < bi_list[-2]['value']):
bi_list.append(bi)
bi_list.update_xd_eigenvalue()
return True
return handle_fx_end()
class CzscBase:
def __init__(self):
# self.freq = freq
# assert isinstance(code, str)
# self.code = code.upper()
self.trade_date = [] # 用来查找索引
self.bars = []
self.indicators = IndicatorSet(self.bars)
# self.indicators = None
self.new_bars = []
self.fx_list = []
self.xd_list = XdList(self.bars, self.indicators, self.trade_date) # bi作为线段的head
self.sig_list = []
def update(self):
# 有包含关系时,不可能有分型出现,不出现分型时才需要
self.indicators.update()
try:
update_fx(bars=self.bars, new_bars=self.new_bars, fx_list=self.fx_list, trade_date=self.trade_date)
except:
print('error')
if not update_bi(
new_bars=self.new_bars, fx_list=self.fx_list, bi_list=self.xd_list, trade_date=self.trade_date
):
return
# 新增确定性的笔才处理段
xd_list = self.xd_list
result = True
index = 0
while result:
result = xd_list.update()
# 计算对应买卖点
if len(xd_list.sig_list) > 0:
signal = xd_list.sig_list[-1]
# signal.update(xd=index)
# self.sig_list.append(signal)
if index == 0:
signal.update(xd=0)
self.sig_list.append(signal)
else:
# 有趋势或者中枢段升级
if xd_list.zs_list[-1]['location'] != 0 or xd_list.zs_list[-1]['weight'] > 7:
last_sig = self.sig_list[-1]
last_sig.update(xd=index, xd_mark=signal['xd_mark'])
last_sig['real_loc'] = signal['real_loc']
last_sig['location'] = signal['location']
last_sig['weight'] = signal['weight']
last_sig['valueback'] = signal['valueback']
last_sig['timeback'] = signal['timeback']
# if signal['xd_mark'] in [1, -1]:
last_sig['dif{}'.format(index)] = signal.get('dif')
last_sig['macd{}'.format(index)] = signal.get('macd')
# else:
# util_log_info('High level xd {} == low level xd {}'.format(index, index - 1))
temp_list = xd_list
xd_list = xd_list.next
xd_list.prev = temp_list
index = index + 1
# 必须实现,每次输入一个行情数据,然后调用update看是否需要更新
def on_bar(self, bar):
"""
输入数据格式
Index(['open', 'high', 'low', 'close', 'amount', 'volume', 'date', 'code'], dtype='object')
'date' 未 timestamp volume用来画图
"""
raise NotImplementedError
class CzscMongo(CzscBase):
def __init__(self, code='rul8', data=None, start=None, end=None, freq='day', exchange=None):
# 只处理一个品种
super().__init__()
self.code = code
self.freq = freq
self.exchange = exchange
# self._bi_list = fetch_future_bi_day(self.code, limit=2, format='dict')
self._bi_list = []
self.old_count = len(self._bi_list)
if len(self._bi_list) > 0:
# self.fx_list = self._bi_list
start = self._bi_list[-1]['fx_end']
elif start is None:
start = '1990-01-01'
if data is None:
self.data = get_bar(code, start=start, end=end, freq=freq, exchange=exchange)
# self.data = get_bar(code, start, end='2020-12-09', freq=freq, exchange=exchange)
else:
self.data = data
def draw(self, chart_path=None):
if len(self.bars) < 1:
return
chart = kline_pro(
kline=self.bars, fx=self.fx_list,
bs=[], xd=self.xd_list,
# title=self.code + '_' + self.freq, width='1520px', height='580px'
title=self.code + '_' + self.freq, width='2540px', height='850px'
)
if not chart_path:
chart_path = 'E:\\signal\\{}_{}.html'.format(self.code, self.freq)
chart.render(chart_path)
webbrowser.open(chart_path)
def on_bar(self, bar):
"""
bar 格式
date 默认为 Timestamp,主要时画图函数使用
"""
bar = bar.to_dict()
# if 'trade' in bar:
# bar['vol'] = bar.pop('trade')
# bar['date'] = pd.to_datetime(bar['date'])
self.bars.append(bar)
try:
self.update()
except Exception as error:
util_log_info(error)
def run(self, start=None, end=None):
if self.data is None or self.data.empty:
util_log_info('{} {} quote data is empty'.format(self.code, self.freq))
return
self.data.apply(self.on_bar, axis=1)
# self.save()
def save(self, collection=FACTOR_DATABASE.future_bi_day):
try:
logging.info('Now Saving Future_BI_DAY==== {}'.format(str(self.code)))
code = self.code
old_count = self.old_count
new_count = len(self._bi_list)
# 更新的数据,最后一个数据是未确定数据
update_count = new_count - old_count
if update_count < 2:
return
bi_list = self._bi_list[old_count:new_count - 1]
start = bi_list[0]['date']
end = bi_list[-1]['date']
logging.info(
'UPDATE_Future_BI_DAY \n Trying updating {} from {} to {}'.format(code, start, end),
)
collection.insert_many(bi_list)
except Exception as error:
print(error)
def save_sig(self, collection=FACTOR_DATABASE.czsz_sig_day):
try:
logging.info('Now Saving CZSC_SIG_DAY==== {}'.format(str(self.code)))
code = self.code
xd = self.xd_list
index = 0
sig = []
while xd:
df = pd.DataFrame(xd.sig_list)
df['xd'] = index
df['code'] = code
df['exchange'] = self.exchange
sig.append(df)
xd = xd.next
index = index + 1
sig_df = pd.concat(sig).set_index(['date', 'xd']).sort_index()
old_count = self.old_count
new_count = len(self._bi_list)
# 更新的数据,最后一个数据是未确定数据
update_count = new_count - old_count
if update_count < 2:
return
bi_list = self._bi_list[old_count:new_count - 1]
start = bi_list[0]['date']
end = bi_list[-1]['date']
logging.info(
'UPDATE_Future_BI_DAY \n Trying updating {} from {} to {}'.format(code, start, end),
)
collection.insert_many(bi_list)
except Exception as error:
print(error)
def to_csv(self):
if len(self.sig_list) < 1:
return
sig_df = pd.DataFrame(self.sig_list).set_index('date')
filename = 'E:\\signal\\{}_{}_{}.csv'.format(self.code, self.freq, sig_df.index[-1].strftime('%Y-%m-%d'))
sig_df.to_csv(filename)
def to_df(self):
xd = self.xd_list
index = 0
sig = []
while xd:
df = pd.DataFrame(xd.sig_list)
df['xd'] = index
df['code'] = self.code
df['exchange'] = self.exchange
sig.append(df)
xd = xd.next
index = index + 1
try:
sig_df = pd.concat(sig).set_index(['date', 'xd']).sort_index()
return sig_df
except:
util_log_info("{} signal is empty!".format(self.code))
return pd.DataFrame()
def to_json(self):
xd = self.xd_list
if len(xd) < 1:
return
index = 0
data = []
while xd:
data.append(
{
'xd{}'.format(index): xd.xd_list,
'zs{}'.format(index): xd.zs_list,
'sig{}'.format(index): xd.sig_list
}
)
xd = xd.next
index = index + 1
with open("{}_{}.json".format(self.code, self.freq), "w") as write_file:
json.dump(data, write_file, indent=4, sort_keys=True, cls=DataEncoder)
def calculate_bs_signals(security_df: pd.DataFrame, last_trade_date=None):
sig_list = []
if len(security_df) < 1:
util_log_info("=============Security list is empty!==========")
return
class_name = security_df.iloc[0]['class']
if last_trade_date is None:
last_trade_date = util_get_real_date(datetime.today().strftime('%Y-%m-%d'))
# last_trade_time = pd.to_datetime(util_get_next_day(last_trade_date))
last_trade_date = pd.to_datetime(last_trade_date)
index = 0
for code, item in security_df.iterrows():
exchange = item['exchange']
util_log_info("============={} {} Signal==========".format(code, exchange))
try:
hq = get_bar(code, end=last_trade_date, freq='day', exchange=exchange)
except:
util_log_info("============={} {} read hq incorrectly!==========".format(code, exchange))
continue
# future流动性过滤,成交量过滤
if class_name == 'future':
amount = hq.iloc[-1]['volume']
if amount < 10000:
util_log_info(
"===={} {} volume is few!====".format(code, exchange)
)
continue
else:
amount = hq.iloc[-1]['amount']
# 可转债价格过滤
if class_name == 'convertible':
if hq.iloc[-1]['close'] > 130:
util_log_info(
"===={} {} price is too high!====".format(code, exchange)
)
continue
if amount < 1000000: # 100万
util_log_info(
"===={} {} amount is few!====".format(code, exchange)
)
continue
elif class_name == 'hkconnect':
try:
amount = hq.iloc[-1]['hk_stock_amount']
except:
util_log_info(
"===={} {} KeyError: 'hk_stock_amount'====".format(code, exchange)
)
amount = hq.iloc[-1]['volume'] * hq.iloc[-1]['close'] * 100
if amount < 10000000: # 1000万
util_log_info(
"===={} {} amount is few!====".format(code, exchange)
)
continue
else:
if amount < 10000000: # 1000万
util_log_info(
"===={} {} amount is few!====".format(code, exchange)
)
continue
try:
# 复权处理
if class_name in ['stock', 'ETF']:
hq = data_stock_to_fq(hq, fqtype='qfq')
czsc_day = CzscMongo(code=code, data=hq, freq='day', exchange=exchange)
except Exception as error:
util_log_info("{} : {}".format(code, error))
continue
if len(czsc_day.data) < 1:
util_log_info("==========={} {} 0 Quotes==========".format(code, exchange))
continue
if czsc_day.data.iloc[-1]['date'] < last_trade_date:
util_log_info(
"=={} {} last trade date {}==".format(
code, exchange, czsc_day.data.iloc[-1]['date'].strftime('%Y-%m-%d'))
)
continue
czsc_day.run()
sig_day_list = czsc_day.sig_list
if len(sig_day_list) < 1:
continue
last_day_sig = sig_day_list[-1]
if last_day_sig['date'] < last_trade_date:
util_log_info(
"===={} {} last Signal {}====".format(code, exchange, last_day_sig['date'].strftime('%Y-%m-%d'))
)
continue
# 笔中枢走势的起点,如果是上升趋势的买点,从当前中枢的最高点开始计算,如果是卖点,从上升趋势的起点开始
xd_list = czsc_day.xd_list
zs_list = xd_list.zs_list
if len(zs_list) < 1:
continue
xd_mark = last_day_sig['xd_mark']
if xd_mark < 0: # 只考虑做多
continue
# if xd_mark < 0:
# xd = zs_list[-1]['DD'][-1]
# else:
# xd = zs_list[-1]['GG'][-1]
#
# start = xd.get('fx_start')
last_day_sig.update(deviation=last_day_sig.get('dif') + last_day_sig.get('macd', 0))
for idx in range(1, last_day_sig['xd'] + 1):
dif = 0 if np.isnan(last_day_sig.get('dif{}'.format(idx))) else last_day_sig.get('dif{}'.format(idx))
macd = 0 if np.isnan(last_day_sig.get('macd{}'.format(idx))) else last_day_sig.get('macd{}'.format(idx))
deviation = last_day_sig.get('deviation', 0)
last_day_sig.update(deviation=deviation + dif + macd)
start = xd_list.sig_list[-1]['start']
hq = get_bar(code, start=start, end=last_trade_date, freq='5min', exchange=exchange)
# 复权处理
if class_name in ['stock', 'ETF']:
hq = data_stock_to_fq(hq, fqtype='qfq')
czsc_min = CzscMongo(code=code, data=hq, freq='5min', exchange=exchange)
try:
if len(czsc_min.data) < 1:
util_log_info("========={} {} 0 5min Quotes========".format(code, exchange))
continue
except:
util_log_info("========={} {} 5min Quotes file is not exists!========".format(code, exchange))
continue
if czsc_min.data.iloc[-1]['date'] < last_trade_date:
util_log_info(
"==Please Update {} {} 5min Quotes from {}==".format(
code, exchange, czsc_day.data.iloc[-1]['date'].strftime('%Y-%m-%d'))
)
continue
czsc_min.run()
sig_min_list = czsc_min.sig_list
if len(sig_min_list) < 1:
continue
last_min_sig = sig_min_list[-1]
if last_min_sig['date'] < last_trade_date:
continue
df = pd.DataFrame(sig_min_list).set_index('date')
bar_df = pd.DataFrame(czsc_min.bars).set_index('date')
bar_df = bar_df[bar_df.index > last_trade_date]
if xd_mark > 0:
idx = bar_df['low'].idxmin()
else:
idx = bar_df['high'].idxmax()
if df.empty:
util_log_info("===Please Download {} {} 5min Data===".format(code, exchange))
continue
try:
last_min_sig = df.loc[idx].to_dict()
except:
util_log_info("{} {} Have a opposite Signal=======".format(code, exchange))
continue
if last_min_sig['xd_mark'] * xd_mark < 0: # 日内高低点不一定是高级别买卖点
util_log_info("{} {} Have a opposite Signal=======".format(code, exchange))
continue
# 顺趋势买卖点为1,-1,逆趋势级别要大,小于0为逆趋势,或者不为笔分型
# (xd_mark * zs_list[-1]['location'] <= 0 and last_min_sig['xd'] >= 2)
# (xd_mark * zs_list[-1]['location'] >= 0 and last_min_sig['xd_mark'] in [1, -1])
# ('fx_start' in xd_list[-1])
if not (
(xd_mark * zs_list[-1]['location'] <= 0 and last_min_sig['xd'] >= 2)
or (xd_mark * zs_list[-1]['location'] >= 0 and last_min_sig['xd_mark'] in [1, -1])
or ('fx_start' in xd_list[-1])
):
util_log_info("==={} xd:{}, xd_mark:{}===".format(code, last_min_sig['xd'], last_min_sig['xd_mark']))
continue
try:
dif = 0 if np.isnan(last_min_sig.get('dif')) else last_min_sig.get('dif')
macd = 0 if np.isnan(last_min_sig.get('macd')) else last_min_sig.get('macd')
last_min_sig.update(macd=dif + macd)
except TypeError:
util_log_info("{} {} has no macd value=======".format(code, exchange))
# if code in ['515120', '510310', '512500', '515380', '515390', '515800', '159905']:
# print('ok')
for idx in range(1, last_min_sig['xd'] + 1):
dif = 0 if np.isnan(last_min_sig.get('dif{}'.format(idx))) else last_min_sig.get('dif{}'.format(idx))
macd = 0 if np.isnan(last_min_sig.get('macd{}'.format(idx))) else last_min_sig.get('macd{}'.format(idx))
last_min_sig.update(macd=last_min_sig.get('macd') + dif + macd)
for key in last_min_sig:
last_day_sig[key + '_min'] = last_min_sig[key]
last_day_sig.update(deviation=last_day_sig['deviation'] + last_day_sig['macd_min'])
# last_day_sig['start'] = start
last_day_sig.update(amount=amount, code=code, exchange=exchange)
sig_list.append(last_day_sig)
index = index + 1
util_log_info("==={:=>4d}. {} {} Have a Signal=======".format(index, code, exchange))
if len(sig_list) < 1:
util_log_info("========There are 0 Signal=======")
return None
df = | pd.DataFrame(sig_list) | pandas.DataFrame |
import pandas as pd #import necassary packages
import statsmodels.api as sms
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import pickle
df = pd.read_csv('us_bank_wages/us_bank_wages.txt', delimiter="\t") #read the csv-file
df.drop('Unnamed: 0', axis = 1, inplace = True) #drop unnecassary index column
educ_dummies = | pd.get_dummies(df['EDUC'], prefix='edu', drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 10:23:59 2021
@author: alber
"""
import re
import os
import pandas as pd
import numpy as np
import spacy
import pickle
import lightgbm as lgb
import imblearn
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from sklearn import metrics
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report
# from nltk.corpus import stopwords
# from nltk import ngrams
from nltk.stem.snowball import SnowballStemmer
# from sentence_transformers import SentenceTransformer, util
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, ADASYN
from statsmodels.stats.inter_rater import cohens_kappa
from common.tools import get_files, file_presistance
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
nlp = spacy.load("es_core_news_md")
stemmer = SnowballStemmer("spanish")
def _getReport(
y_test, y_pred, y_pred_proba, target_names, using_affective = "yes",
semantic_model = "", classification_model = ""
):
"""
TODO
Parameters
----------
y_test : TYPE
DESCRIPTION.
y_pred : TYPE
DESCRIPTION.
target_names : TYPE
DESCRIPTION.
using_affective : TYPE, optional
DESCRIPTION. The default is "yes".
semantic_model : TYPE, optional
DESCRIPTION. The default is "".
classification_model : TYPE, optional
DESCRIPTION. The default is "".
Returns
-------
df_metrics_iter : TYPE
DESCRIPTION.
"""
### 1. Standard Metrics
report = classification_report(
y_test, y_pred, target_names = target_names, output_dict = True
)
df_metrics_iter = pd.DataFrame(
{
'category': [category],
'using_affective': [using_affective],
'semantic_model': [semantic_model],
'classification_model': [classification_model],
'n_class_0': [report[f'{category}_0']['support']],
'n_class_1': [report[f'{category}_1']['support']],
'precision_class_0': [report[f'{category}_0']['precision']],
'precision_class_1': [report[f'{category}_1']['precision']],
'recall_class_0': [report[f'{category}_0']['recall']],
'recall_class_1': [report[f'{category}_1']['recall']],
'f1_class_0': [report[f'{category}_0']['f1-score']],
'f1_class_1': [report[f'{category}_1']['f1-score']],
'precision_weighted': [report['weighted avg']['precision']],
'recall_weighted': [report['weighted avg']['recall']],
'f1_weighted': [report['weighted avg']['f1-score']]
}
)
### 2. Cohen's Kappa
# Make Dataframe
df = pd.DataFrame({"A": y_test, "B": y_pred})
# Switch it to three columns A's answer, B's answer and count of that combination
df = df.value_counts().reset_index()
# Check compliance
if len(df) < 4:
df_aux = pd.DataFrame({'A': [0.0, 1.0, 0.0, 1.0],
'B': [0.0, 0.0, 1.0, 1.0]
})
df = df.merge(df_aux, how="outer").fillna(0)
# Make square
square = df.pivot(columns="A",index="B").values
# Get Kappa
dct_kappa = cohens_kappa(square)
kappa_max = dct_kappa['kappa_max']
kappa = dct_kappa['kappa']
df_metrics_iter['kappa'] = [kappa]
df_metrics_iter['kappa_max'] = [kappa_max]
### 3. AUC
y_pred_proba = np.asarray([x if str(x) != 'nan' else 0.0 for x in y_pred_proba])
fpr, tpr, thresholds = metrics.roc_curve(
y_test, y_pred_proba, pos_label=1
)
auc = metrics.auc(fpr, tpr)
df_metrics_iter['auc'] = [auc]
return df_metrics_iter
# =============================================================================
# 1. Prepare Data
# =============================================================================
### Load Sonnets Features
# Load Data
file_to_read = open(f"{PATH_RESULTS}/dct_sonnets_input_v5", "rb")
dct_sonnets = pickle.load(file_to_read)
file_to_read.close()
# Only DISCO
if False:
dct_sonnets = {x:y for x,y in dct_sonnets.items() if x <= 4085}
# Sonnet Matrix
list_original_sentence = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5'
]
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
# General Variables
dct_metrics_all_models = {}
df_meta = pd.concat(
[
pd.DataFrame({"index": [item["index"]], "text": [item["text"]]})
for key, item in dct_sonnets.items()
]
)
df_affective = pd.concat([item["aff_features"] for key, item in dct_sonnets.items()]).fillna(0)
# Load psycho names
df_names = pd.read_csv(f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
list_names = list(df_names["es_name"].values)
list_aff = [
"concreteness",
"context availability",
"anger",
"arousal",
"disgust",
"fear",
"happinness",
"imageability",
"sadness",
"valence",
]
### Load Ground Truth
if False:
df_gt = pd.read_csv(f"{PATH_GROUND_TRUTH}/poems_corpus_all.csv")
df_gt = df_gt[df_gt['index'].isin(list(dct_sonnets.keys()))]
df_gt = df_gt.rename(columns={"text": "text_original"})
df_gt.columns = [str(x).rstrip().lstrip() for x in list(df_gt.columns)]
### Get Subsample from GT
df_add = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import cvxpy as cvx
#### file to make the simulation of people that we can work with
class Person():
""" Person (parent?) class -- will define how the person takes in a points signal and puts out an energy signal
baseline_energy = a list or dataframe of values. This is data from SinBerBEST
points_multiplier = an int which describes how sensitive each person is to points
"""
def __init__(self, baseline_energy_df, points_multiplier = 1):
self.baseline_energy_df = baseline_energy_df
self.baseline_energy = np.array(self.baseline_energy_df["net_energy_use"])
self.points_multiplier = points_multiplier
baseline_min = self.baseline_energy.min()
baseline_max = self.baseline_energy.max()
baseline_range = baseline_max - baseline_min
self.min_demand = np.maximum(0, baseline_min + baseline_range * .05)
self.max_demand = np.maximum(0, baseline_min + baseline_range * .95)
def energy_output_simple_linear(self, points):
"""Determines the energy output of the person, based on the formula:
y[n] = -sum_{rolling window of 5} points + baseline_energy + noise
inputs: points - list or dataframe of points values. Assumes that the
list will be in the same time increment that energy_output will be.
For now, that's in 1 hour increments
"""
points_df = pd.DataFrame(points)
points_effect = (
points_df
.rolling(
window = 5,
min_periods = 1)
.mean()
)
time = points_effect.shape[0]
energy_output= []
for t in range(time):
temp_energy = self.baseline_energy[t] - points_effect.iloc[t]*self.points_multiplier + \
np.random.normal(1)
energy_output.append(temp_energy)
return | pd.DataFrame(energy_output) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # 🏁 Wrap-up quiz
#
# **This quiz requires some programming to be answered.**
#
# Open the dataset `bike_rides.csv` with the following commands:
# In[1]:
import pandas as pd
cycling = pd.read_csv("../datasets/bike_rides.csv", index_col=0,
parse_dates=True)
cycling.index.name = ""
target_name = "power"
data, target = cycling.drop(columns=target_name), cycling[target_name]
data.head()
# A detailed description of this dataset is given in the appendix. As a reminder,
# the problem we are trying to solve with this dataset is to use measurements
# from cheap sensors (GPS, heart-rate monitor, etc.) in order to predict a
# cyclist power. Power can indeed be recorded via a cycling power meter device,
# but this device is rather expensive.
#
# Instead of using blindly machine learning, we will first introduce some flavor of
# classic mechanics: the Newton's second law.
#
# $P_{meca} = (\frac{1}{2} \rho . SC_x . V_{a}^{2} + C_r . mg . \cos \alpha + mg . \sin \alpha + ma) V_d$
#
# where $\rho$ is the air density in kg.m$^{-3}$, $S$ is frontal surface of the
# cyclist in m$^{2}$, $C_x$ is the drag coefficient, $V_a$ is the air speed in
# m.s$^{-1}$, $C_r$ is the rolling coefficient, $m$ is the mass of the rider and
# bicycle in kg, $g$ is the standard acceleration due to gravity which is equal
# to 9.81 m.s$^{-2}$, $\alpha$ is the slope in radian, $V_d$ is the rider speed
# in m.s$^{-1}$, and $a$ is the rider acceleration in m.s$^{-2}$.
#
# This equation might look a bit complex at first but we can explain with words
# what the different terms within the parenthesis are:
#
# - the first term is the power that a cyclist is required to produce to fight wind
# - the second term is the power that a cyclist is required to produce to fight
# the rolling resistance created by the tires on the floor
# - the third term is the power that a cyclist is required to produce to go up a hill if the
# slope is positive. If the slope is negative the cyclist does not need to
# produce any power to go forward
# - the fourth and last term is the power that a cyclist requires to change his
# speed (i.e. acceleration).
#
# We can simplify the model above by using the data that we have at hand. It
# would look like the following.
#
# $P_{meca} = \beta_{1} V_{d}^{3} + \beta_{2} V_{d} + \beta_{3} \sin(\alpha) V_{d} + \beta_{4} a V_{d}$
#
# This model is closer to what we saw previously: it is a linear model trained
# on a non-linear feature transformation. We will build, train and evaluate
# such a model as part of this exercise. Thus, you need to:
#
# - create a new data matrix containing the cube of the speed, the speed, the
# speed multiplied by the sine of the angle of the slope, and the speed
# multiplied by the acceleration. To compute the angle of the slope, you need
# to take the arc tangent of the slope (`alpha = np.arctan(slope)`). In
# addition, we can limit ourself to positive acceleration only by clipping to 0
# the negative acceleration values (they would correspond to some power created
# by the braking that we are not modeling here).
# - using the new data matrix, create a linear predictive model based on a
# [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# and a
# [`sklearn.linear_model.RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html);
# - use a
# [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)
# cross-validation strategy with only 4 splits (`n_splits=4`) to evaluate the
# statistical performance of the model. Use the mean absolute error (MAE) as a
# statistical performance metric. Also, pass the parameter
# `return_estimator=True` and `return_train_score=True` to answer the
# subsequent questions. Be aware that the `ShuffleSplit` strategy is a naive
# strategy and we will investigate the consequence of making this choice in the
# subsequent questions.
# In[7]:
import numpy as np
data['cube_speed']=data['speed']**3
data['speed_sin_alpha']=data['speed']*np.sin(np.arctan(data['slope']))
data['fixed_acc']=np.maximum(0,data['acceleration'])*data['speed']
sub_data=data[['cube_speed', 'speed', 'speed_sin_alpha', 'fixed_acc']]
# In[9]:
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV
pipeline = make_pipeline(StandardScaler(), RidgeCV())
# In[12]:
from sklearn.model_selection import ShuffleSplit, cross_validate
cv = ShuffleSplit(n_splits=4)
cv_result = cross_validate(pipeline, sub_data, target, cv=cv, return_estimator=True, return_train_score=True, scoring='neg_mean_absolute_error')
cv_result
# In[11]:
sub_data['speed_sin_alpha'].mean()
# # Question 1
# What is the mean value of the column containing the information of
# $\sin(\alpha) V_{d}$?
#
# - a) about -3
# - b) about -0.3
# - c) about -0.03
# - d) about -0.003
#
# ```
# In[14]:
-cv_result['test_score'].mean()
# # Question 2
# On average, the Mean Absolute Error on the test sets obtained through
# cross-validation is closest to:
#
# - a) 20 Watts
# - b) 50 Watts
# - c) 70 Watts
# - d) 90 Watts
#
# _Select a single answer_
#
# Hint: pass `scoring="neg_mean_absolute_error"` to the `cross_validate`
# function to compute the (negative of) the requested metric.
# Hint: it is possible to replace the negative acceleration values by 0 using
# `data["acceleration"].clip(lower=0)`
# ```
# In[21]:
for est in cv_result['estimator']:
print(est['ridgecv'].coef_)
# # Question 3
# Given the model
# $P_{meca} = \beta_{1} V_{d}^{3} + \beta_{2} V_{d} + \beta_{3} \sin(\alpha) V_{d} + \beta_{4} a V_{d}$
# that you created, inspect the weights of the linear models fitted during
# cross-validation and select the correct statements:
#
# - a) $\beta_{1} < \beta_{2} < \beta_{3}$
# - b) $\beta_{3} < \beta_{1} < \beta_{2}$
# - c) $\beta_{2} < \beta_{3} < \beta_{1}$
# - d) $\beta_{1} < 0$
# - e) $\beta_{2} < 0$
# - f) $\beta_{3} < 0$
# - g) $\beta_{4} < 0$
# - h) All $\beta$s are $> 0$
#
# _Select several answers_
# ```
# +++
#
# Now, we will create a predictive model that uses all available sensor
# measurements such as cadence (the speed at which a cyclist turns pedals
# measured in rotation per minute) and heart-rate (the number of beat per minute
# of the heart of the cyclist while exercising). Also, we will use a non-linear
# regressor, a
# [`sklearn.ensemble.HistGradientBoostingRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingRegressor.html).
# Fix the number of maximum iterations to 1000 (`max_iter=1_000`) and activate
# the early stopping (`early_stopping=True`). Repeat the previous evaluation
# using this regressor.
# In[23]:
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor
pipelineHGB = make_pipeline(StandardScaler(), HistGradientBoostingRegressor(max_iter=1000, early_stopping=True))
from sklearn.model_selection import ShuffleSplit, cross_validate
cv = ShuffleSplit(n_splits=4)
cv_result = cross_validate(pipelineHGB, data, target, cv=cv, return_estimator=True, return_train_score=True, scoring='neg_mean_absolute_error')
cv_result
# # Question 4
# On average, the Mean Absolute Error on the test sets obtained through
# cross-validation is closest to:
#
# - a) 20 Watts
# - b) 40 Watts
# - c) 60 Watts
# - d) 80 Watts
#
# _Select a single answer_
# ```
# # Question 5
# Comparing both the linear model and the histogram gradient boosting model and
# taking into consideration the train and test MAE obtained via cross-validation,
# select the correct statements:
#
# - a) the statistical performance of the histogram gradient-boosting model is
# limited by its underfitting
# - b) the statistical performance of the histogram gradient-boosting model is
# limited by its overfitting
# - c) the statistical performance of the linear model is limited by its
# underfitting
# - d) the statistical performance of the linear model is limited by its
# overfitting
#
# _Select several answers_
# Hint: look at the values of the `train_score` and the `test_score` collected
# in the dictionaries returned by the `cross_validate` function.
# ```
#
# +++
#
# In the previous cross-validation, we made the choice of using a `ShuffleSplit`
# cross-validation strategy. It means that randomly selected samples were
# selected as a testing test ignoring any time dependency between the lines of
# the dataframe.
#
# We would like to have a cross-validation strategy that evaluates the capacity
# of our model to predict on a completely new bike ride: the samples in the
# validation set should only come from rides not present in the training set.
# In[25]:
data.plot()
# # Question 6
# How many bike rides are stored in the dataframe `data`? Do not hesitate to
# look at the hints.
#
# - a) 2
# - b) 3
# - c) 4
# - d) 5
#
# _Select a single answer_
#
# Hint: You can check the unique day in the `DatetimeIndex` (the index of the
# dataframe `data`). Indeed, we assume that on a given day the rider went cycling
# at most once per day.
# Hint: You can access to the date and time of a `DatetimeIndex` using
# `df.index.date` and `df.index.time`, respectively.
# ```
# +++
#
# Instead of using the naive `ShuffleSplit` strategy, we will use a strategy that
# takes into account the group defined by each individual date. It corresponds to
# a bike ride. We would like to have a cross-validation strategy that evaluates
# the capacity of our model to predict on a completely new bike ride: the samples
# in the validation set should only come from rides not present in the training
# set. Therefore, we can use a `LeaveOneGroupOut` strategy: at each iteration of
# the cross-validation, we will keep a bike ride for the evaluation and use all
# other bike rides to train our model.
#
# Thus, you concretely need to:
#
# - create a variable called `group` that is a 1D numpy array containing the
# index of each ride present in the dataframe. Therefore, the length of `group`
# will be equal to the number of samples in `data`. If we had 2 bike
# rides, we would expect the indices 0 and 1 in `group` to differentiate the
# bike ride. You can use
# [`pd.factorize`](https://pandas.pydata.org/docs/reference/api/pandas.factorize.html)
# to encode any Python types into integer indices.
# - create a cross-validation object named `cv` using the
# [`sklearn.model_selection.LeaveOneGroupOut`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.LeaveOneGroupOut.html#sklearn.model_selection.LeaveOneGroupOut)
# strategy.
# - evaluate both the linear and histogram gradient boosting models with this
# strategy.
# In[48]:
data['group']=data.index
data['group']=data['group'].dt.day
codes, uniques = pd.factorize(data['group'])
data['group']=codes
data
from sklearn.model_selection import LeaveOneGroupOut
cv = LeaveOneGroupOut()
cv_result_HGB = cross_validate(pipelineHGB, data, target, cv=cv, groups=codes, return_estimator=True, return_train_score=True, scoring='neg_mean_absolute_error')
print(cv_result_HGB)
cv_result_LR = cross_validate(pipeline, sub_data, target, cv=cv, groups=codes, return_estimator=True, return_train_score=True, scoring='neg_mean_absolute_error')
print(cv_result_LR)
# # Question 7
# Using the previous evaluations (with both `ShuffleSplit` and
# `LeaveOneGroupOut`) and looking at the train and test errors for both models,
# select the correct statements:
#
# - a) the statistical performance of the gradient-boosting model is
# limited by its underfitting
# - b) the statistical performance of the gradient-boosting model is
# limited by its overfitting
# - c) the statistical performance of the linear model is limited by its
# underfitting
# - d) the statistical performance of the linear model is limited by its
# overfitting
#
# _Select several answers_
# ```
# In[53]:
print(f'test score for linear model : {-cv_result_LR["test_score"].mean():0.02f} +/- {-cv_result_LR["test_score"].std():0.02f}')
print(f'test score for HGB model : {-cv_result_HGB["test_score"].mean():0.02f} +/- {-cv_result_HGB["test_score"].std():0.02f}')
# # Question 8
# Using the previous evaluations (with both `ShuffleSplit` and
# `LeaveOneGroupOut`) and looking at the train and test errors for both models,
# select the correct statements:
#
# - a) `ShuffleSplit` is giving over-optimistic results for the linear model
# - b) `LeaveOneGroupOut` is giving over-optimistic results for the linear model
# - c) both cross-validation strategies are equivalent for the linear model
# - d) `ShuffleSplit` is giving over-optimistic results for the gradient-boosting model
# - e) `LeaveOneGroupOut` is giving over-optimistic results for the gradient-boosting model
# - f) both cross-validation strategies are equivalent for the gradient-boosting model
#
# _Select several answer_
# ```
# # Question 9
# Compare more precisely the errors estimated through cross-validation and select
# the correct statement:
#
# - a) in general, the standard deviation of the train and test errors increased
# using the `LeaveOneGroupOut` cross-validation
# - b) in general, the standard deviation of the train and test errors decreased
# using the `LeaveOneGroupOut` cross-validation
#
# _Select a single answer_
# ```
# +++
#
# Now, we will go in details by picking a single ride for the testing and analyse
# the predictions of the models for this test ride. To do so, we can reuse the
# `LeaveOneGroupOut` cross-validation object in the following manner:
# In[56]:
cv = LeaveOneGroupOut()
groups=codes
data_linear_model=sub_data.copy()
train_indices, test_indices = list(cv.split(data, target, groups=groups))[0]
data_linear_model_train = data_linear_model.iloc[train_indices]
data_linear_model_test = data_linear_model.iloc[test_indices]
data_train = data.iloc[train_indices]
data_test = data.iloc[test_indices]
target_train = target.iloc[train_indices]
target_test = target.iloc[test_indices]
# Now, fit both the linear model and the histogram gradient boosting regressor
# models on the training data and collect the predictions on the testing data.
# Make a scatter plot where on the x-axis, you will plot the measured powers
# (true target) and on the y-axis, you will plot the predicted powers
# (predicted target). Do two separated plots for each model.
# In[65]:
pipeline.fit(data_linear_model_train, target_train)
linear_pred = pipeline.predict(data_linear_model_test)
import matplotlib.pyplot as plt
import seaborn as sns
linear_df = pd.DataFrame( { 'Prediction':pd.Series(linear_pred), 'True':pd.Series(target_test.to_numpy())} )
plt.axline((0, 0), slope=1, label="Perfect fit", color='black')
sns.scatterplot(data=linear_df, x='Prediction', y='True')
_ = plt.title("Regression using a linear model")
# In[67]:
pipelineHGB.fit(data_train, target_train)
HGB_pred = pipelineHGB.predict(data_test)
import matplotlib.pyplot as plt
import seaborn as sns
HGB_df = pd.DataFrame( { 'Prediction':pd.Series(HGB_pred), 'True':pd.Series(target_test.to_numpy())} )
plt.axline((0, 0), slope=1, label="Perfect fit", color='black')
sns.scatterplot(data=HGB_df, x='Prediction', y='True')
_ = plt.title("Regression using a HGB model")
# # Question 10
# By analysing the plots, select the correct statements:
#
# - a) the linear regressor tends to under-predict samples with high power
# - b) the linear regressor tends to over-predict samples with high power
# - c) the linear regressor makes catastrophic predictions for samples with low
# power
# - d) the histogram gradient boosting regressor tends to under-predict samples
# with high power
# - e) the histogram gradient boosting regressor tends to over-predict samples
# with high power
# - f) the histogram gradient boosting makes catastrophic predictions for samples
# with low power
#
# _Select several answers_
# ```
#
# +++
#
# Now select a portion of the testing data using the following code:
# In[68]:
time_slice = slice("2020-08-18 17:00:00", "2020-08-18 17:05:00")
data_test_linear_model_subset = data_linear_model_test[time_slice]
data_test_subset = data_test[time_slice]
target_test_subset = target_test[time_slice]
# It allows to select data from 5.00 pm until 5.05 pm. Used the previous fitted
# models (linear and gradient-boosting regressor) to predict on this portion
# of the test data. Draw on the same plot the true targets and the predictions
# of each model.
# In[69]:
linear_pred = pipeline.predict(data_test_linear_model_subset)
HGB_pred = pipelineHGB.predict(data_test_subset)
linear_df = pd.DataFrame( { 'Prediction':pd.Series(linear_pred), 'True':pd.Series(target_test_subset.to_numpy())} )
plt.axline((0, 0), slope=1, label="Perfect fit", color='black')
sns.scatterplot(data=linear_df, x='Prediction', y='True')
_ = plt.title("Regression using a linear model - subset")
# In[70]:
HGB_df = pd.DataFrame( { 'Prediction': | pd.Series(HGB_pred) | pandas.Series |
#!/usr/local/bin/python
import argparse
import os
import sys
import pandas as pd
import numpy as np
import time
pd.options.mode.chained_assignment = None
parser = argparse.ArgumentParser(prog='snvScore')
parser.add_argument('SampleBED',type=str,help='Path to the mosdepth per-base BED output')
parser.add_argument('SNVGermlineTXT',type=str,help='Path to Clivar-generated table with pathogenic germline SNVs')
parser.add_argument('SNVSomaticTXT',type=str,help='Path to Clivar-generated table with pathogenic somatic SNVs')
parser.add_argument('Threshold',type=int,nargs='?',help='SNV coverage quality threshold (optional, positive)',default=0)
args = parser.parse_args()
sample_name = args.SampleBED
while sample_name.find('/')!=-1:
sample_name = sample_name[sample_name.find('/')+1:]
def snv_coverage(snv,chrom_cover):
snv = snv.dropna()
snv['coverage']=0.0
snv=snv.drop_duplicates()
snv = snv.reset_index(drop=True)
cover_reg = chrom_cover[(chrom_cover.end>snv.position.iloc[0]) & (chrom_cover.start<=snv.position.iloc[-1])]
cover_reg = cover_reg.reset_index(drop=True)
for ind in snv.index:
buf = cover_reg[(cover_reg.end>snv.position[ind]) & (cover_reg.start<=snv.position[ind])]
snv.coverage[ind] = buf.coverage
return snv
def CatchChromoRegs(BED_fname,chrom_names):
BED = open(BED_fname, 'rt')
# chrom_names = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8',
# 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15',
# 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
# 'chrX', 'chrY','chrM']
chrom_start_pos = np.zeros(len(chrom_names)+1,dtype='int32')
line_num = 0
for chrom,i in zip(chrom_names,np.arange(len(chrom_names))):
pos_catched = False
while not pos_catched:
line = BED.readline()
line = line[:line.find('\t')]
if line == chrom:
pos_catched = True
chrom_start_pos[i] = line_num
line_num+=1
while line =='chrM':
line = BED.readline()
line = line[:line.find('\t')]
line_num+=1
chrom_start_pos[-1]=line_num-1
return chrom_start_pos
def ExecuteClinicalCoverageDepthCalc(chrom_names,SNVG,SNVS,SampleBED):
snv_cov = pd.DataFrame(columns=['chr','position','coverage','type'])
all_cov = np.array([])
# start = time.time()
res = CatchChromoRegs(SampleBED,chrom_names)
rows = ['' for i in range(24)]
for chrom,chr_num in zip(chrom_names[:-1],np.arange(24)):
# for chrom,chr_num in zip(chrom_names[:3],np.arange(3)):
chrom_cover = pd.read_csv(SampleBED,delimiter='\t',header=None,names=['chr','start','end','coverage'],skiprows=res[chr_num],nrows=res[chr_num+1]-res[chr_num])
all_cov = np.append(all_cov,chrom_cover.coverage.values,axis=0)
snvg_part = SNVG[SNVG.chr==chrom]
snvs_part = SNVS[SNVS.chr==chrom]
if snvg_part.size>0:
snvg_part = snv_coverage(snvg_part,chrom_cover)
snvg_part['type'] = 'germline'
snv_cov= | pd.concat([snv_cov,snvg_part]) | pandas.concat |
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import helpers.printing as hprint
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestComputeMoments(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_moments(series, prefix="moments_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.compute_moments(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.compute_moments(series)
def test7(self) -> None:
"""
Test series with `inf`.
"""
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[4] = np.inf
actual = cstati.compute_moments(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestComputeFracZero(hut.TestCase):
def test1(self) -> None:
data = [0.466667, 0.2, 0.13333, 0.2, 0.33333]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.4,
0.2,
0.4,
0.0,
0.6,
0.4,
0.6,
0.2,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 20 / 75 = num_zeros / num_points.
expected = 0.266666
actual = cstati.compute_frac_zero(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.466667
actual = cstati.compute_frac_zero(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_zero(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeFracNan(hut.TestCase):
def test1(self) -> None:
data = [0.4, 0.133333, 0.133333, 0.133333, 0.2]
index = [0, 1, 2, 3, 4]
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1))
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test2(self) -> None:
data = [
0.4,
0.0,
0.2,
0.4,
0.2,
0.2,
0.2,
0.0,
0.4,
0.2,
0.6,
0.0,
0.0,
0.0,
0.2,
]
index = pd.date_range(start="1-04-2018", periods=15, freq="30T")
expected = pd.Series(data=data, index=index)
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=1)
pd.testing.assert_series_equal(actual, expected, check_less_precise=3)
def test3(self) -> None:
# Equals 15 / 75 = num_nans / num_points.
expected = 0.2
actual = cstati.compute_frac_nan(self._get_df(seed=1), axis=None)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test4(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series)
np.testing.assert_almost_equal(actual, expected, decimal=3)
def test5(self) -> None:
series = self._get_df(seed=1)[0]
expected = 0.4
actual = cstati.compute_frac_nan(series, axis=0)
np.testing.assert_almost_equal(actual, expected, decimal=3)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.compute_frac_nan(series)
@staticmethod
def _get_df(seed: int) -> pd.DataFrame:
nrows = 15
ncols = 5
num_nans = 15
num_infs = 5
num_zeros = 20
#
np.random.seed(seed=seed)
mat = np.random.randn(nrows, ncols)
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = np.inf
mat.ravel()[np.random.choice(mat.size, num_infs, replace=False)] = -np.inf
mat.ravel()[np.random.choice(mat.size, num_zeros, replace=False)] = 0
mat.ravel()[np.random.choice(mat.size, num_nans, replace=False)] = np.nan
#
index = pd.date_range(start="01-04-2018", periods=nrows, freq="30T")
df = pd.DataFrame(data=mat, index=index)
return df
class TestComputeNumFiniteSamples(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_finite_samples(series)
class TestComputeNumUniqueValues(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati.count_num_unique_values(series)
class TestComputeDenominatorAndPackage(hut.TestCase):
@staticmethod
# Smoke test for empty input.
def test1() -> None:
series = pd.Series([])
cstati._compute_denominator_and_package(reduction=1, data=series)
class TestTTest1samp(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = pd.Series([])
cstati.ttest_1samp(series)
def test2(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.ttest_1samp(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.ttest_1samp(series, nan_mode="ffill_and_drop_leading")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test4(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.ttest_1samp(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestMultipleTests(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
series = pd.Series([])
cstati.multipletests(series)
# Test if error is raised with default arguments when input contains NaNs.
@pytest.mark.xfail()
def test2(self) -> None:
series_with_nans = self._get_series(seed=1)
series_with_nans[0:5] = np.nan
actual = cstati.multipletests(series_with_nans)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series_with_nans = self._get_series(seed=1)
series_with_nans[0:5] = np.nan
actual = cstati.multipletests(series_with_nans, nan_mode="drop")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = hut.get_random_df(
num_cols=1,
seed=seed,
**date_range,
)[0]
return series
class TestMultiTTest(hut.TestCase):
# Smoke test for empty input.
def test1(self) -> None:
df = pd.DataFrame(columns=["series_name"])
cstati.multi_ttest(df)
def test2(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, prefix="multi_ttest_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, popmean=1)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test6(self) -> None:
df = self._get_df_of_series(seed=1)
actual = cstati.multi_ttest(df, method="sidak")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@pytest.mark.xfail()
def test7(self) -> None:
df = self._get_df_of_series(seed=1)
df.iloc[:, 0] = np.nan
actual = cstati.multi_ttest(df)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_df_of_series(seed: int) -> pd.DataFrame:
n_series = 7
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
# Generating a dataframe from different series.
df = pd.DataFrame(
[
arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed + i
)
for i in range(n_series)
],
index=["series_" + str(i) for i in range(n_series)],
).T
return df
class TestApplyNormalityTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_normality_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_normality_test(series, prefix="norm_test_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.apply_normality_test(series)
def test4(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.apply_normality_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
# Place some `NaN` values in the series.
series[:5] = np.nan
series[8:10] = np.nan
actual = cstati.apply_normality_test(
series, nan_mode="ffill_and_drop_leading"
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test6(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_normality_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyAdfTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, regression="ctt")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, maxlag=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, autolag="t-stat")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_adf_test(series, prefix="adf_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.apply_adf_test(series)
def test7(self) -> None:
series = self._get_series(seed=1)
series[3:5] = np.nan
actual = cstati.apply_adf_test(series, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test8(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_adf_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyKpssTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, regression="ct")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, nlags="auto")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, nlags=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_kpss_test(series, prefix="kpss_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test6(self) -> None:
series = pd.Series([])
cstati.apply_kpss_test(series)
def test7(self) -> None:
series = self._get_series(seed=1)
series[3:5] = np.nan
actual = cstati.apply_kpss_test(series, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test8(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_kpss_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestApplyLjungBoxTest(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, lags=3)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, model_df=3)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, period=5)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, prefix="lb_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test6(self) -> None:
series = self._get_series(seed=1)
actual = cstati.apply_ljung_box_test(series, return_df=False)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test7(self) -> None:
series = pd.Series([])
cstati.apply_ljung_box_test(series)
def test8(self) -> None:
series = self._get_series(seed=1)
series[3:5] = np.nan
actual = cstati.apply_ljung_box_test(series, nan_mode="fill_with_zero")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for input of `np.nan`s.
def test9(self) -> None:
series = pd.Series([np.nan for i in range(10)])
cstati.apply_ljung_box_test(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
class TestComputeSpecialValueStats(hut.TestCase):
def test1(self) -> None:
"""
Test for default arguments.
"""
series = self._get_messy_series(seed=1)
actual = cstati.compute_special_value_stats(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
"""
Test for prefix.
"""
series = self._get_messy_series(seed=1)
actual = cstati.compute_special_value_stats(series, prefix="data_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test3(self) -> None:
series = pd.Series([])
cstati.compute_special_value_stats(series)
@staticmethod
def _get_messy_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = casgen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
series[:5] = 0
series[-5:] = np.nan
series[10:13] = np.inf
series[13:16] = -np.inf
return series
class TestCalculateHitRate(hut.TestCase):
def test1(self) -> None:
"""
Test for default parameters.
Expected outcome: 0
hit_rate_point_est_(%) 55.5556
hit_rate_97.50%CI_lower_bound_(%) 25.4094
hit_rate_97.50%CI_upper_bound_(%) 82.7032
"""
series = self._get_test_series()
actual = cstati.calculate_hit_rate(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
"""
Test for the case when NaNs compose the half of the input.
Expected outcome: 0
hit_rate_point_est_(%) 55.5556
hit_rate_97.50%CI_lower_bound_(%) 25.4094
hit_rate_97.50%CI_upper_bound_(%) 82.7032
"""
series = self._get_test_series()
nan_series = pd.Series([np.nan for i in range(len(series))])
series = pd.concat([series, nan_series])
actual = cstati.calculate_hit_rate(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
"""
Test for the case when np.inf compose the half of the input.
Expected outcome: 0
hit_rate_point_est_(%) 55.5556
hit_rate_97.50%CI_lower_bound_(%) 25.4094
hit_rate_97.50%CI_upper_bound_(%) 82.7032
"""
series = self._get_test_series()
inf_series = pd.Series([np.inf for i in range(len(series))])
inf_series[:5] = -np.inf
series = pd.concat([series, inf_series])
actual = cstati.calculate_hit_rate(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
"""
Test for the case when 0 compose the half of the input.
Expected outcome: 0
hit_rate_point_est_(%) 55.5556
hit_rate_97.50%CI_lower_bound_(%) 25.4094
hit_rate_97.50%CI_upper_bound_(%) 82.7032
"""
series = self._get_test_series()
zero_series = pd.Series([0 for i in range(len(series))])
series = pd.concat([series, zero_series])
actual = cstati.calculate_hit_rate(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
"""
Test threshold.
Expected outcome: 0
hit_rate_point_est_(%) 57.1429
hit_rate_97.50%CI_lower_bound_(%) 23.4501
hit_rate_97.50%CI_upper_bound_(%) 86.1136
"""
series = self._get_test_series()
actual = cstati.calculate_hit_rate(series, threshold=10e-3)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test6(self) -> None:
"""
Test alpha.
Expected outcome: 0
hit_rate_point_est_(%) 55.5556
hit_rate_95.00%CI_lower_bound_(%) 29.6768
hit_rate_95.00%CI_upper_bound_(%) 79.1316
"""
series = self._get_test_series()
actual = cstati.calculate_hit_rate(series, alpha=0.1)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test7(self) -> None:
"""
Test prefix.
Expected outcome: 0
hit_hit_rate_point_est_(%) 55.5556
hit_hit_rate_97.50%CI_lower_bound_(%) 25.4094
hit_hit_rate_97.50%CI_upper_bound_(%) 82.7032
"""
series = self._get_test_series()
actual = cstati.calculate_hit_rate(series, prefix="hit_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test8(self) -> None:
"""
Test method.
Expected outcome: 0
hit_rate_point_est_(%) 55.5556
hit_rate_97.50%CI_lower_bound_(%) 26.6651
hit_rate_97.50%CI_upper_bound_(%) 81.1221
"""
series = self._get_test_series()
actual = cstati.calculate_hit_rate(series, method="wilson")
self.check_string(hut.convert_df_to_string(actual, index=True))
# Smoke test for empty input.
def test_smoke(self) -> None:
series = pd.Series([])
cstati.calculate_hit_rate(series)
# Smoke test for input of `np.nan`s.
def test_nan(self) -> None:
series = pd.Series([np.nan] * 10)
cstati.calculate_hit_rate(series)
@staticmethod
def _get_test_series() -> pd.Series:
series = pd.Series([0, -0.001, 0.001, -0.01, 0.01, -0.1, 0.1, -1, 1, 10])
return series
class Test_compute_jensen_ratio(hut.TestCase):
def test1(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_jensen_ratio(
signal,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_jensen_ratio(
signal,
p_norm=3,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
signal = self._get_signal(seed=1)
signal[5:8] = np.inf
actual = cstati.compute_jensen_ratio(
signal,
inf_mode="drop",
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_jensen_ratio(
signal,
nan_mode="ffill",
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test5(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_jensen_ratio(
signal,
prefix="commodity_",
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test6(self) -> None:
signal = pd.Series([])
cstati.compute_jensen_ratio(signal)
@staticmethod
def _get_signal(seed: int) -> pd.Series:
np.random.seed(seed)
n = 1000
signal = pd.Series(np.random.randn(n))
signal[30:50] = np.nan
return signal
class Test_compute_forecastability(hut.TestCase):
def test1(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_forecastability(
signal,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_forecastability(
signal,
mode="periodogram",
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_forecastability(
signal,
nan_mode="ffill_and_drop_leading",
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test4(self) -> None:
signal = self._get_signal(seed=1)
actual = cstati.compute_forecastability(
signal,
prefix="commodity_",
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input.
def test5(self) -> None:
signal = self._get_signal(seed=1)
cstati.compute_forecastability(signal)
@staticmethod
def _get_signal(seed: int) -> pd.Series:
np.random.seed(seed)
n = 1000
signal = pd.Series(np.random.randn(n))
signal[30:50] = np.nan
return signal
class Test_compute_annualized_return_and_volatility(hut.TestCase):
def test1(self) -> None:
"""
Test for default parameters.
"""
series = self._get_series(seed=1)
actual = cstati.compute_annualized_return_and_volatility(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
"""
Test prefix.
"""
series = self._get_series(seed=1)
actual = cstati.compute_annualized_return_and_volatility(
series, prefix="test_"
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
# Smoke test for empty input
def test3(self) -> None:
series = pd.Series([])
cstati.compute_annualized_return_and_volatility(series)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arma_process = casgen.ArmaProcess([0], [0])
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, scale=0.1, seed=seed
)
return series
class TestComputeMaxDrawdown(hut.TestCase):
def test1(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_max_drawdown(series)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
series = self._get_series(seed=1)
actual = cstati.compute_max_drawdown(series, prefix="new_")
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
"""
Smoke test for empty input.
"""
series = | pd.Series([]) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import time
import logging
from typing import Dict, Optional
from multiprocessing import cpu_count
import xgboost as xgb
import ray
from ray.services import get_node_ip_address
import numpy as np
import pandas
from modin.distributed.dataframe.pandas import unwrap_partitions
from .utils import RabitContext, RabitContextManager
LOGGER = logging.getLogger("[modin.xgboost]")
@ray.remote
class ModinXGBoostActor:
def __init__(self, ip, nthread=cpu_count()):
self._evals = []
self._dpredict = []
self._ip = ip
self._nthreads = nthread
LOGGER.info(f"Actor <{self._ip}>, nthread = {self._nthreads} was initialized.")
def _get_dmatrix(self, X_y):
s = time.time()
X = X_y[: len(X_y) // 2]
y = X_y[len(X_y) // 2 :]
assert (
len(X) == len(y) and len(X) > 0
), "X and y should have the equal length more than 0"
X = pandas.concat(X, axis=0)
y = | pandas.concat(y, axis=0) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64")
}
)
| assert_frame_equal(df, expected, check_names=True) | pandas.testing.assert_frame_equal |
# coding: utf-8
"""Main estimation code.
"""
import re
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
from statsmodels.base.model import GenericLikelihoodModel
from numba import jit
_norm_pdf_C = np.sqrt(2 * np.pi)
@jit(nopython=True)
def _norm_pdf(x):
return np.exp(-x ** 2 / 2) / _norm_pdf_C
@jit(nopython=True)
def _kde_local(loc, data, bw, lmbda):
"""Return the locally smoothed kernel density estimate at *loc*
based on *data* with locally smoothed bandwidth *bw x lmbda*,
where *lmbda* is either a scalar or a vector of the same length
as *data*.
"""
l_s_bw = bw * lmbda
d = (loc - data).T / l_s_bw
s = (_norm_pdf(d) / l_s_bw).T
kde = 0.0
for r in range(s.shape[0]):
kde += s[r].prod()
return kde
@jit(nopython=True)
def _kde_local_array_core(index_std, locs_std, leave_one_out_locs, other_locs, nobs, h, lmbda):
# Loop over leave-one-out variables and others.
loo_shape = (index_std.shape[0] - 1, index_std.shape[1])
loo_index = np.empty(loo_shape, dtype=np.double)
loo_lmbda = np.empty(loo_shape[0], dtype=np.double)
out = np.empty(len(locs_std), dtype=np.double) * np.nan
i = 0
for j in leave_one_out_locs:
k_loo = 0
for k in range(index_std.shape[0]):
if not k == i:
loo_index[k_loo, 0] = index_std[k, 0]
loo_index[k_loo, 1] = index_std[k, 1]
loo_lmbda[k_loo] = lmbda[k]
k_loo += 1
out[j] = _kde_local(locs_std[j], loo_index, h, loo_lmbda) / (nobs - 1)
i += 1
for j in other_locs:
out[j] = _kde_local(locs_std[j], index_std, h, lmbda) / nobs
return out
def _kde_local_array(locs, index, leave_one_out_locs, other_locs, nobs, h, lmbda):
"""Return locally smoothed density of *index* evaluated
at each element of *locs*.
Further parameters:
* *h* - the baseline bandwidth
* *lmbda* - the local smoothing parameter adjusting the bandwidth
In KV (2009), this corresponds to the :math:`f^\hat_s, s \in \{0, 1\}`
in D1 (but for all observations instead of one ω).
"""
# Standardise data and locs s.t. the product kernel can be used easily.
Sigma = np.cov(index.T)
if len(Sigma.shape) == 0:
Sigma_inv = Sigma ** -1
sqrt_det = np.sqrt(Sigma_inv)
chol_Sigma_inv = sqrt_det
elif len(Sigma.shape) == 2:
Sigma_inv = np.linalg.inv(Sigma)
sqrt_det = np.sqrt(np.linalg.det(Sigma_inv))
chol_Sigma_inv = np.linalg.cholesky(Sigma_inv)
index_std = index.dot(chol_Sigma_inv)
locs_std = locs.dot(chol_Sigma_inv)
return sqrt_det * _kde_local_array_core(
index_std,
locs_std,
leave_one_out_locs,
other_locs,
nobs,
h,
lmbda
)
class KleinVellaDoubleIndex(GenericLikelihoodModel):
def __init__(self, data, y_name, index_names, index_colnames):
"""Set up the data and basic model. Arguments:
* *data*: A pandas dataframe with all dependent and explanatory
variables
* *y_name*: The name of the dependent variable (string)
* *index_names*: A 2-element list/tuple with the names of the indices.
E.g.: ['Structural Equation', 'Control Function']
* *index_colnames*: A 2-element list of iterables with the names of
the independent variables (strings). E.g.:
[
['age', 'female', 'income'],
['wealth', 'female', 'income']
]
Both should contain a dedicated continuous
variable as the first element (responsibility of the user).
*y_name* and the elements of *index[k]_names* must be present in the
columns of *data*.
"""
cols = data.columns
assert y_name in cols
self.y_name = y_name
assert len(index_names) == 2
assert len(index_colnames) == 2
self.index_names = tuple(index_names)
self.index_colnames = []
self.index_colnames_all = []
self.index_ncoeffs = np.zeros(2, dtype=np.int)
for i in range(2):
for i_n in index_colnames[i]:
assert i_n in cols, "'{}' not in data columns!".format(i_n)
self.index_colnames.append(tuple(index_colnames[i]))
self.index_ncoeffs[i] = len(self.index_colnames[i]) - 1
for v0 in self.index_colnames[0]:
if v0 not in self.index_colnames[1]:
self.index_colnames_all.append(v0)
for v1 in self.index_colnames[1]:
self.index_colnames_all.append(v1)
self.coeffs = [None, None]
# Retain only data without missings in all relevant variables
self._data = data.dropna(subset=[y_name] + self.index_colnames_all)
self._nobs = len(self._data)
self._data = self._data.set_index(np.arange(self._nobs))
# Trimming is done ex post, so we can set the data here already.
super(KleinVellaDoubleIndex, self).__init__(
endog=self._data[self.y_name],
exog=self._data[self.index_colnames_all]
)
self.endog = self._data[self.y_name]
self.exog = self._data[self.index_colnames_all]
# Consistency check - binary dependent variable?
assert set(self._data[self.y_name].unique()) == {0, 1}, (
"\n\nY is not a binary variable: {}\n\n".format(set(self._data[self.y_name].unique()))
)
def coeffs_from_vec(self, coeffs_vec):
"""Set the attribute *coeffs* based on *coeffs_vec*."""
coeffs = [self.coeffs[0].copy(), self.coeffs[1].copy()]
coeffs[0].iloc[1:] = coeffs_vec[:self.index_ncoeffs[0]].copy()
coeffs[1].iloc[1:] = coeffs_vec[self.index_ncoeffs[0]:].copy()
return coeffs
def _coeff_series_to_vec(self, coeffs):
vec = np.zeros(self.index_ncoeffs.sum(), dtype=np.float)
vec[:self.index_ncoeffs[0]] = coeffs[0].iloc[1:].values.copy()
vec[self.index_ncoeffs[0]:] = coeffs[1].iloc[1:].values.copy()
return vec
def get_index(self, coeffs):
"""Return the based on a 2-element list of *coeffs* and the data in *self.exog*.
"""
return pd.DataFrame(
data=[
self.exog[coeffs[0].index].dot(coeffs[0]),
self.exog[coeffs[1].index].dot(coeffs[1])
],
index=[0, 1]
).T
def τ(self, z, a):
"""Return smooth trimming weights, formula in D2 of KV (2009)."""
return 1 / (1 + np.exp(z * self._nobs ** a))
def _λ(self, f):
"""Return the estimated local smoothing parameter, formula in D3 of KV (2009)."""
γ = f / gmean(f)
d = self.τ(z=1 / np.log(self._nobs) - γ, a=0.01)
return (d * γ + (1 - d) / np.log(self._nobs)) ** (-1 / 2)
def λ_multi_stage(self, index, n_stages, h1=None, h2=None):
"""Return the vector of estimated local smoothing parameters in D3/D4 of KV (2009)
for each element of *index*.
The parameter *n_stages ∊ {1, 2, 3}* controls the number of stages:
* 1 just returns a vector of ones
* 2 returns a vector of parameters from a single smoothing step
* 3 returns a vector of parameters from two smoothing steps
"""
if len(index.shape) == 1:
index = index.reshape((len(index), 1))
n = len(index)
all_obs = np.arange(n)
no_obs = np.array([], dtype=np.int64)
λ1 = np.ones(n, dtype=np.double)
if n_stages == 1:
return λ1
elif n_stages in {2, 3}:
assert h1 is not None
λ2 = self._λ(_kde_local_array(index, index, all_obs, no_obs, self._nobs, h1, λ1))
if n_stages == 2:
return λ2
else:
assert h2 is not None, "3-stage smoothing currently not implemented."
return self._λ(_kde_local_array(index, index, all_obs, no_obs, self._nobs, h2, λ2))
else:
raise ValueError(n_stages)
def _xtrim(self, lower, upper):
"""Return trimming indicator series, where trimming is based on
the covariates directly (and the quantiles to be trimmed at, i.e.
*lower* and *upper*).
"""
trm = pd.Series(data=True, index=self._data.index)
for c in self.index_colnames_all:
l_limit = np.percentile(self._data[c], 100 * lower)
u_limit = np.percentile(self._data[c], 100 * upper)
trm &= self._data[c].apply(lambda x: True if l_limit <= x <= u_limit else False)
return trm
def f_s_pilot(self, s, index):
"""Return a pilot density estimate (potentially locally smoothed)
conditional on the outcome of the dependent variable, as defined
in D1-D4 of KV (2009).
In theory (see the paper), the local smoothing step is not needed.
In practice, it is used in the code by the authors.
"""
assert s in {0, 1}
index_s = index[self.endog == s].values
leave_one_out_locs = index[self.endog == s].index.values
other_locs = index[self.endog == 1 - s].index.values
λ = self.λ_multi_stage(index_s, n_stages=self._n_smoothing_stages_pilot, h1=self._h_pilot)
return _kde_local_array(
index.values,
index_s,
leave_one_out_locs,
other_locs,
self._nobs,
self._h_pilot,
λ
)
def semiparametric_probability_function_pilot(self, index):
f0 = self.f_s_pilot(0, index)
f1 = self.f_s_pilot(1, index)
return f1 / (f1 + f0)
def _bin_loglikeobs(self, P):
Y = self.endog
return Y * np.log(P) + (1 - Y) * np.log(1 - P)
def _loglikeobs_pilot(self, coeffs_vec):
"""Return the pilot estimator of the log likelihood function, i.e. the Q
in D6 of KV (2009).
"""
self.coeffs = self.coeffs_from_vec(coeffs_vec)
index = self.get_index(self.coeffs)
P = self.semiparametric_probability_function_pilot(index)
return self._xtrim_series * self._bin_loglikeobs(P)
def fit_pilot(
self,
coeffs_start=[None, None],
trim_lower=0.01,
trim_upper=0.99,
n_smoothing_stages_pilot=1,
maxiter=500
):
"""Fit the initial model, where trimming is based on the covariates
directly (as opposed to the index).
Arguments: *coeffs_start* a 2-element list of start values for the
coefficient vectors of both indices. The order must be the same as
the order of *self.index_colnames* and the initial element of each start
vector must be unity. If the start values are set to *None*, a vector
of ones will be used.
"""
for i in range(2):
if coeffs_start[i] is None:
coeffs_start[i] = pd.Series(data=1.0, index=self.index_colnames[i])
else:
assert tuple(coeffs_start[i].index) == self.index_colnames[i]
assert coeffs_start[i].iloc[0] in [-1.0, 1.0]
self.coeffs[i] = coeffs_start[i].copy()
vec_coeffs_start = self._coeff_series_to_vec(coeffs_start)
self._xtrim_series = self._xtrim(lower=trim_lower, upper=trim_upper)
self._h_pilot = self._nobs ** - (1 / 11)
self._n_smoothing_stages_pilot = n_smoothing_stages_pilot
self.loglikeobs = self._loglikeobs_pilot
print("Starting pilot fit.")
self.results_pilot = self.fit(
start_params=vec_coeffs_start,
method='bfgs',
maxiter=maxiter,
full_output=1,
disp=1,
callback=None,
retall=1,
tol=0.001
)
self.coeffs = self.coeffs_from_vec(self.results_pilot.params)
self._coeffs_pilot_vec = self.results_pilot.params.copy()
self.coeffs_pilot = [self.coeffs[0].copy(), self.coeffs[1].copy()]
self.index_pilot = self.get_index(self.coeffs_pilot)
def _itrim(self, coeffs, lower, upper):
"""Return trimmming vector based on product of trimming vectors
for individual indices.
"""
index = self.get_index(coeffs)
trm = pd.Series(data=1, index=self._data.index, dtype=np.double)
for i in 0, 1:
l_limit = np.percentile(index[i], 100 * lower)
u_limit = np.percentile(index[i], 100 * upper)
trm_l = self.τ(z=l_limit - index[i], a=1 / 12)
trm_u = 1 - self.τ(z=u_limit - index[i], a=1 / 12)
trm *= trm_l * trm_u
return trm
def f_s(self, index, index_s, leave_one_out_locs, other_locs):
"""Return a locally smoothed density estimate conditional on the outcome
of the dependent variable, as defined in D1-D4 of KV (2009).
Usually, *index* should be the index regardless of the outcome, *index_s*
should be the index for those observations with outcome s ∊ {0, 1},
*leave_one_out_locs* the integer locations of these outcomes, and *other_locs*
the integer locations of the outcome 1 - s.
However, this might be different for calculations such as the ASF.
"""
λ3 = self.λ_multi_stage(index_s, n_stages=3, h1=self._h1, h2=self._h2)
return _kde_local_array(
index,
index_s,
leave_one_out_locs,
other_locs,
self._nobs,
self._h3,
λ3
)
def f(self, eval_grid, index_data):
"""Syntactic sugar for local density estimation at a grid for marginal
or joint densities.
Both *eval_grid* and *index_data* must be NumPy arrays.
"""
# Make sure we have 2-d arrays throughout.
if len(eval_grid.shape) == 1:
eval_grid = np.reshape(eval_grid, (len(eval_grid), 1))
elif len(eval_grid.shape) > 2:
raise ValueError(eval_grid.shape)
if len(index_data.shape) == 1:
index_data = np.reshape(index_data, (len(index_data), 1))
elif len(index_data.shape) > 2:
raise ValueError(index_data.shape)
return self.f_s(
index=eval_grid,
index_s=index_data,
leave_one_out_locs=np.array([], dtype=np.int64),
other_locs=np.arange(len(eval_grid))
)
def Δ(self, f, s, ε=0.9):
"""Return the adjustment factors for the probability function defined in D5 of KV (2009).
"""
N = self._nobs
c = self._f_pilot_perc1[s]
α1 = ε * self._r3 / 4
α2 = ε * self._r3 / 5
return c * self._h3 ** ε / (1 + np.exp(N ** α1 * (f - N ** -α2)))
def semiparametric_probability_function(self, index, eval_locs=None):
"""Return the semiparametric probability function defined in D5 of KV (2009).
If *eval_locs* is *None*, go for estimation mode and evaluate the
function for each data point. Else evaluate it at *eval_locs*.
"""
index0 = index[self.endog == 0].values
index1 = index[self.endog == 1].values
if eval_locs is None:
eval_locs = index.values
f0_leave_one_out_locs = index[self.endog == 0].index.values
f1_leave_one_out_locs = index[self.endog == 1].index.values
f0_other_locs = f1_leave_one_out_locs
f1_other_locs = f0_leave_one_out_locs
else:
f0_leave_one_out_locs = np.array([], dtype=np.int64)
f1_leave_one_out_locs = np.array([], dtype=np.int64)
f0_other_locs = np.arange(len(eval_locs))
f1_other_locs = np.arange(len(eval_locs))
# Density estimates conditional on the outcome.
f0 = self.f_s(
index=eval_locs,
index_s=index0,
leave_one_out_locs=f0_leave_one_out_locs,
other_locs=f0_other_locs
)
f1 = self.f_s(
index=eval_locs,
index_s=index1,
leave_one_out_locs=f1_leave_one_out_locs,
other_locs=f1_other_locs
)
Δ0 = self.Δ(f=f0, s=0)
Δ1 = self.Δ(f=f1, s=1)
return (f1 + Δ1) / (f0 + f1 + Δ0 + Δ1)
def _loglikeobs_final(self, coeffs_vec_scaled):
coeffs_vec = coeffs_vec_scaled * self._coeffs_pilot_vec
self.coeffs = self.coeffs_from_vec(coeffs_vec)
P = self.semiparametric_probability_function(self.get_index(self.coeffs))
return self._itrim_series * self._bin_loglikeobs(P)
def _set_constants_itrim(self, r3, δ, trim_lower, trim_upper):
# Preliminaries: Set various parameters for local smoothing
r1 = (r3 - δ) / 4
r2 = (r3 - δ / 2) / 2
self._h1 = self._nobs ** -r1
self._h2 = self._nobs ** -r2
self._h3 = self._nobs ** -r3
self._r3 = r3
# Needed for Δ0, Δ1
self._f_pilot_perc1 = np.zeros(2)
self._f_pilot_perc1[0] = np.percentile(
self.f_s_pilot(s=0, index=self.index_pilot) / (1 - self.endog.mean()), 1
)
self._f_pilot_perc1[1] = np.percentile(
self.f_s_pilot(s=1, index=self.index_pilot) / self.endog.mean(), 1
)
# Re-use trimming bounds for ASF, so keep here.
self.trim_lower = trim_lower
self.trim_upper = trim_upper
self._itrim_series = self._itrim(
coeffs=self.coeffs_pilot,
lower=trim_lower,
upper=trim_upper
)
def fit_final(
self,
r3=1 / 11, δ=0.04,
trim_lower=0.01,
trim_upper=0.99,
maxiter=1000
):
"""Fit the final model, where trimming is based on the two indices.
.. note::
This routine assumes that *fit_pilot* has been run and that the
resulting first-step coefficients / index values are stored in
*self.coeffs_pilot* and *self.index_pilot*, respectively.
In order to improve numerical precision, we scale the coefficient
vector with the pilot estimates.
"""
vec_coeffs_start_scaled = np.ones(self.index_ncoeffs.sum())
self._set_constants_itrim(r3, δ, trim_lower, trim_upper)
self.loglikeobs = self._loglikeobs_final
print("Starting final fit.")
self.results_final_scaled = self.fit(
start_params=vec_coeffs_start_scaled,
method='bfgs',
maxiter=maxiter,
full_output=1,
disp=1,
callback=None,
retall=1,
gtol=1e-5
)
self.coeffs = self.coeffs_from_vec(
self.results_final_scaled.params * self._coeffs_pilot_vec
)
self.coeffs_final = [self.coeffs[0].copy(), self.coeffs[1].copy()]
self.index_final = self.get_index(self.coeffs_final)
self.std_err_final = self.coeffs_from_vec(
self.results_final_scaled.bse * np.abs(self._coeffs_pilot_vec)
)
def average_structural_function(self, asf_index_loc, asf_loc, r=None, ε=1e-3):
"""Return the value of the average structural function and its
standard error for *asf_index_loc* ∊ {0, 1}, evaluated at the
point *asf_loc*.
I.e. if *asf_index_loc=0*, the index=1 is integrated out.
"""
index0 = self.index_final[0].values
index1 = self.index_final[1].values
endog = self.endog.values
n_grid = 200
# Set up mesh.
if asf_index_loc == 0:
asf_index = index0
other_index = index1
elif asf_index_loc == 1:
asf_index = index1
other_index = index0
else:
raise ValueError('asf_index_loc = {} ∉ {{0, 1}}'.format(asf_index_loc))
# Calculate the ASF.
other_grid = np.linspace(other_index.min(), other_index.max(), n_grid)
eval_grid = pd.DataFrame({asf_index_loc: asf_loc, 1 - asf_index_loc: other_grid}).values
pred_grid = self.semiparametric_probability_function(
index=self.index_final,
eval_locs=eval_grid
)
dens_other_est = self.f(eval_grid=other_grid, index_data=other_index)
# And now the integral (note we're using an equally spaced grid).
asf = dens_other_est.dot(pred_grid) * (other_grid[1] - other_grid[0])
# Set the bandwidth (note the bandwidth is always relative to the standardised index).
if r is None:
h = self._h3 * asf_index.std()
else:
h = self._nobs ** -r * asf_index.std()
# Variance of the ASF - Start with squared error.
eval_n = | pd.DataFrame({asf_index_loc: asf_loc, 1 - asf_index_loc: other_index}) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pyvista as pv
import pandas as pd
from skimage import measure
from scipy.integrate import simps
from scipy.interpolate import griddata
import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon
from zmapio import ZMAPGrid
def poly_area(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
class Surface:
def __init__(self, **kwargs):
self.x = kwargs.pop('x',None)
self.y = kwargs.pop('y',None)
self.z = kwargs.pop('z',None)
self.crs = kwargs.pop('crs',4326)
#Properties
@property
def x(self):
return self._x
@x.setter
def x(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._y = value
@property
def z(self):
return self._z
@z.setter
def z(self,value):
if value is not None:
assert isinstance(value,np.ndarray)
assert value.ndim == 2
self._z = value
@property
def crs(self):
return self._crs
@crs.setter
def crs(self,value):
assert isinstance(value,(int,str,type(None))), f"{type(value)} not accepted. Name must be str. Example 'EPSG:3117'"
if isinstance(value,int):
value = f'EPSG:{value}'
elif isinstance(value,str):
assert value.startswith('EPSG:'), 'if crs is string must starts with EPSG:. If integer must be the Coordinate system reference number EPSG http://epsg.io/'
self._crs = value
def contour(self,ax=None,**kwargs):
#Create the Axex
cax= ax or plt.gca()
return cax.contour(self.x,self.y,self.z,**kwargs)
def contourf(self,ax=None,**kwargs):
#Create the Axex
cax= ax or plt.gca()
return cax.contourf(self.x,self.y,self.z,**kwargs)
def structured_surface_vtk(self):
#Get a Pyvista Object StructedGrid
grid = pv.StructuredGrid(self.x, self.y, self.z).elevation()
return grid
def get_contours_bound(self,levels=None,zmin=None,zmax=None,n=10):
#define levels
if levels is not None:
assert isinstance(levels,(np.ndarray,list))
levels = np.atleast_1d(levels)
assert levels.ndim==1
else:
zmin = zmin if zmin is not None else np.nanmin(self.z)
zmax = zmax if zmax is not None else np.nanmax(self.z)
levels = np.linspace(zmin,zmax,n)
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
#iterate over levels levels
contours = self.structured_surface_vtk().contour(isosurfaces=levels.tolist())
contours.points[:,2] = contours['Elevation']
df = pd.DataFrame(contours.points, columns=['x','y','z'])
#Organize the points according their angle with respect the centroid. This is done with the
#porpuse of plot the bounds continously.
list_df_sorted = []
for i in df['z'].unique():
df_z = df.loc[df['z']==i,['x','y','z']]
centroid = df_z[['x','y']].mean(axis=0).values
df_z[['delta_x','delta_y']] = df_z[['x','y']] - centroid
df_z['angle'] = np.arctan2(df_z['delta_y'],df_z['delta_x'])
df_z.sort_values(by='angle', inplace=True)
list_df_sorted.append(df_z)
return | pd.concat(list_df_sorted, axis=0) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
| pd.to_datetime('2020-02-01') | pandas.to_datetime |
# This example requires pandas, numpy, sklearn, scipy
# Inspired by an MLFlow tutorial:
# https://github.com/databricks/mlflow/blob/master/example/tutorial/train.py
import datetime
import itertools
import logging
import sys
from typing import Tuple
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from dbnd import (
dbnd_config,
dbnd_handle_errors,
log_dataframe,
log_metric,
output,
pipeline,
task,
)
from dbnd.utils import data_combine, period_dates
from dbnd_examples.data import data_repo
from dbnd_examples.pipelines.wine_quality.serving.docker import package_as_docker
from targets import target
from targets.types import PathStr
logger = logging.getLogger(__name__)
# dbnd run -m dbnd_examples predict_wine_quality --task-version now
# dbnd run -m dbnd_examples predict_wine_quality_parameter_search --task-version now
def calculate_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
@task(result="training_set, test_set, validation_set")
def prepare_data(raw_data: DataFrame) -> Tuple[DataFrame, DataFrame, DataFrame]:
""" Split data into train, test and validation """
train_df, test_df = train_test_split(raw_data)
test_df, validation_df = train_test_split(test_df, test_size=0.5)
sys.stderr.write("Running Prepare Data! You'll see this message in task log \n")
print("..and this one..\n")
logger.info("..and this one for sure!")
log_dataframe("raw", raw_data)
return train_df, test_df, validation_df
@task
def calculate_alpha(alpha: float = 0.5) -> float:
""" Calculates alpha for train_model """
alpha += 0.1
return alpha
@task
def train_model(
test_set: DataFrame,
training_set: DataFrame,
alpha: float = 0.5,
l1_ratio: float = 0.5,
) -> ElasticNet:
""" Train wine prediction model """
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio)
lr.fit(training_set.drop(["quality"], 1), training_set[["quality"]])
prediction = lr.predict(test_set.drop(["quality"], 1))
(rmse, mae, r2) = calculate_metrics(test_set[["quality"]], prediction)
log_metric("alpha", alpha)
log_metric("rmse", rmse)
log_metric("mae", rmse)
log_metric("r2", r2)
logging.info(
"Elasticnet model (alpha=%f, l1_ratio=%f): rmse = %f, mae = %f, r2 = %f",
alpha,
l1_ratio,
rmse,
mae,
r2,
)
return lr
def _create_scatter_plot(actual, predicted):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title("Actual vs. Predicted")
ax.set_xlabel("Actual Labels")
ax.set_ylabel("Predicted Values")
ax.scatter(actual, predicted)
return fig
@task
def validate_model(model: ElasticNet, validation_dataset: DataFrame) -> str:
""" Calculates metrics of wine prediction model """
log_dataframe("validation", validation_dataset)
# support for py3 parqeut
validation_dataset = validation_dataset.rename(str, axis="columns")
validation_x = validation_dataset.drop(["quality"], 1)
validation_y = validation_dataset[["quality"]]
prediction = model.predict(validation_x)
(rmse, mae, r2) = calculate_metrics(validation_y, prediction)
# log_artifact(
# "prediction_scatter_plot", _create_scatter_plot(validation_y, prediction)
# )
log_metric("rmse", rmse)
log_metric("mae", rmse)
log_metric("r2", r2)
return "%s,%s,%s" % (rmse, mae, r2)
@pipeline(result=("model", "validation"))
def predict_wine_quality(
data: DataFrame = None,
alpha: float = 0.5,
l1_ratio: float = 0.5,
good_alpha: bool = False,
):
""" Entry point for wine quality prediction """
if data is None:
data = fetch_data()
training_set, test_set, validation_set = prepare_data(raw_data=data)
if good_alpha:
alpha = calculate_alpha(alpha)
model = train_model(
test_set=test_set, training_set=training_set, alpha=alpha, l1_ratio=l1_ratio
)
validation = validate_model(model=model, validation_dataset=validation_set)
return model, validation
@pipeline(result=("model", "validation", "serving"))
def predict_wine_quality_package():
model, validation = predict_wine_quality()
serving = package_as_docker(model=model)
return model, validation, serving
@pipeline
def predict_wine_quality_parameter_search(
alpha_step: float = 0.3, l1_ratio_step: float = 0.4
):
result = {}
variants = list(
itertools.product(np.arange(0, 1, alpha_step), np.arange(0, 1, l1_ratio_step))
)
logger.info("All Variants: %s", variants)
for alpha_value, l1_ratio in variants:
exp_name = "Predict_%f_l1_ratio_%f" % (alpha_value, l1_ratio)
model, validation = predict_wine_quality(
alpha=alpha_value, l1_ratio=l1_ratio, task_name=exp_name
)
result[exp_name] = (model, validation)
return result
# DATA FETCHING
@pipeline
def wine_quality_day(
task_target_date: datetime.date, root_location: PathStr = data_repo.wines_per_date
) -> pd.DataFrame:
return target(root_location, task_target_date.strftime("%Y-%m-%d"), "wine.csv")
@task(result=output.prod_immutable[DataFrame])
def fetch_wine_quality(
task_target_date: datetime.date, data: pd.DataFrame = data_repo.wines_full
) -> pd.DataFrame:
# very simple implementation that just sampe the data with seed = target date
return | DataFrame.sample(data, frac=0.2, random_state=task_target_date.day) | pandas.DataFrame.sample |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
| tm.assert_frame_equal(result_no_copy, expected) | pandas._testing.assert_frame_equal |
class Preprocessing:
#Assumption 1 - Data Columns For Train & Test Will Be Same
#Assumption 2 - Ordinal & Bit Switches Will Not Be Pushed In Nominal Function
#Assumption 3 - Train Categorical Will Be SuperSet & Test Will Be SubSet, Else Model To Be ReCreated
def LoadData(self, FileName, HeaderMissing="No"):
# Supports excel,csv,tsv,xml,json,orc,parquet,avro
import pandas as pd
FileType = FileName.split(".")
FileType = FileType[len(FileType)-1].lower()
if FileType == 'xls':
if HeaderMissing =="Yes":
return pd.read_excel(FileName, header=None)
else:
return pd.read_excel(FileName)
if FileType == 'xlsx':
if HeaderMissing =="Yes":
return pd.read_excel(FileName, header=None)
else:
return pd.read_excel(FileName)
if FileType == 'csv':
if HeaderMissing =="Yes":
return pd.read_csv(FileName, header=None)
else:
return pd.read_csv(FileName)
if FileType == 'tsv':
if HeaderMissing =="Yes":
return pd.read_csv(FileName, header=None, sep='\t')
else:
return pd.read_csv(FileName, sep='\t')
if FileType == 'orc':
import pyarrow.orc as orc
return orc.ORCFile(FileName).read().to_pandas()
if FileType == 'parquet':
import pyarrow.parquet as parquet
return parquet.ParquetFile(FileName).read().to_pandas()
if FileType == 'avro':
import pandavro as pdx
return pdx.read_avro(FileName)
if FileType == 'json':
import json
from flatten_json import flatten
from pandas.io.json import json_normalize
with open(FileName) as RequiredFile:
json = json.load(RequiredFile)
if isinstance(json, dict):
if(len(json) > 1):
DataFrame = json_normalize(flatten(json))
else:
DataFrame = json_normalize(list(json.values())[0])
else:
FlattenedData = (flatten(_json) for _json in json)
DataFrame = | pd.DataFrame(FlattenedData) | pandas.DataFrame |
import pandas as pd
from sklearn import model_selection as skl
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
data = pd.read_csv('insurance.csv')
dframe = data.copy()
dframe['region'].fillna(method='bfill', inplace=True)
bmi_median_val = round(dframe['bmi'].median(), 2)
dframe['bmi'].fillna(bmi_median_val, inplace=True)
dframe['children'].fillna(0, inplace=True)
dframe = dframe[dframe['expenses'] < 51240]
dframe = dframe[dframe['expenses'] != 0] # adding to remove record where expense has to be predicted
dframe_dummy = pd.get_dummies(dframe)
y = dframe_dummy.pop('expenses')
x = dframe_dummy
x_train, x_test, y_train, y_test = skl.train_test_split(x, y, test_size=0.0000001,
random_state=30) # traininf with complete data without any test
# start of polynomial regression
poly_reg = PolynomialFeatures(degree=3)
x_poly = poly_reg.fit_transform(x_train)
reg_2 = LinearRegression()
reg_2.fit(x_poly, y_train)
# end of polynomial regression
# getting data for which expenses needs to be predicted
sample = pd.read_csv('insurance.csv')
dsample_dummy = | pd.get_dummies(sample) | pandas.get_dummies |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as filename:
df.to_csv(filename, float_format="%.2f")
rs = read_csv(filename, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
for encoding in (None, "utf-8"):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
with tm.ensure_clean() as path:
# case 1: CRLF as line terminator
df.to_csv(path, line_terminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 2: LF as line terminator
df.to_csv(path, line_terminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
tm.assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize(
"df,encoding",
[
(
DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
),
None,
),
# GH 21241, 21118
(DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"),
(DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"),
(
DataFrame(5 * [[123, "Γειά σου", "Κόσμε"]], columns=["X", "Y", "Z"]),
"cp737",
),
],
)
def test_to_csv_compression(self, df, encoding, compression):
with tm.ensure_clean() as filename:
df.to_csv(filename, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
result = read_csv(
filename, compression=compression, index_col=0, encoding=encoding
)
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
df.to_csv(handles.handle, encoding=encoding)
assert not handles.handle.closed
result = read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_frame_equal(df, result)
# explicitly make sure file is compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
for col in df.columns:
assert col in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding))
def test_to_csv_date_format(self, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_date_format__") as path:
dt_index = datetime_frame.index
datetime_frame = DataFrame(
{"A": dt_index, "B": dt_index.shift(1)}, index=dt_index
)
datetime_frame.to_csv(path, date_format="%Y%m%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime("%Y%m%d"))
)
tm.assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format="%Y-%m-%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime("%Y-%m-%d")
)
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime("%Y-%m-%d")
)
tm.assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format="%Y%m%d")
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(
lambda x: x.strftime("%Y%m%d")
)
tm.assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
["NaT"] * 10 + ["2000-01-01", "1/1/2000", "1-1-2000"]
)
nat_frame = DataFrame({"A": nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format="%Y-%m-%d")
test = read_csv(path, parse_dates=[0, 1], index_col=0)
tm.assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with tm.ensure_clean("csv_date_format_with_dst") as path:
# make sure we are not failing on transitions
times = date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous="infer",
)
for i in [times, times + pd.Timedelta("10s")]:
i = i._with_freq(None) # freq is not preserved by read_csv
time_range = np.array(range(len(i)), dtype="int64")
df = DataFrame({"A": time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index, utc=True).tz_convert(
"Europe/London"
)
tm.assert_frame_equal(result, df)
# GH11619
idx = date_range("2015-01-01", "2015-12-31", freq="H", tz="Europe/Paris")
idx = idx._with_freq(None) # freq does not round-trip
idx._data._freq = None # otherwise there is trouble on unpickle
df = DataFrame({"values": 1, "idx": idx}, index=idx)
with tm.ensure_clean("csv_date_format_with_dst") as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index, utc=True).tz_convert(
"Europe/Paris"
)
result["idx"] = to_datetime(result["idx"], utc=True).astype(
"datetime64[ns, Europe/Paris]"
)
tm.assert_frame_equal(result, df)
# assert working
df.astype(str)
with tm.ensure_clean("csv_date_format_with_dst") as path:
df.to_pickle(path)
result = pd.read_pickle(path)
tm.assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame(
{
"c_bool": [True, False],
"c_float": [1.0, 3.2],
"c_int": [42, np.nan],
"c_string": ["a", "b,c"],
}
)
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
'1,False,3.2,,"b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
'1,False,3.2,,"b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected_rows = [
'"","c_bool","c_float","c_int","c_string"',
'"0","True","1.0","42.0","a"',
'"1","False","3.2","","b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected_rows = [
'"","c_bool","c_float","c_int","c_string"',
'0,True,1.0,42.0,"a"',
'1,False,3.2,"","b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
with pytest.raises(csv.Error, match=msg):
df.to_csv(quoting=csv.QUOTE_NONE)
with pytest.raises(csv.Error, match=msg):
df.to_csv(quoting=csv.QUOTE_NONE, escapechar=None)
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
"1,False,3.2,,b!,c",
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="!")
assert result == expected
expected_rows = [
",c_bool,c_ffloat,c_int,c_string",
"0,True,1.0,42.0,a",
"1,False,3.2,,bf,c",
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="f")
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text_rows = ["a,b,c", '1,"test \r\n",3']
text = tm.convert_rows_list_to_csv_str(text_rows)
df = read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding="utf-8", index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
df = df.set_index(["a", "b"])
expected_rows = ['"a","b","c"', '"1","3","5"', '"2","4","6"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected_rows = [",0", "1990-01-01,4", "2000-01-01,5", "3005-01-01,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected_rows = [",0", "01-01-1990,4", "01-01-2000,5", "01-01-3005,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected_rows = [",0", "1990-01-01,4", ",5", "3005-01-01,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_multi_index_header(self):
# see gh-5539
columns = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)])
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
df.columns = columns
header = ["a", "b", "c", "d"]
result = df.to_csv(header=header)
expected_rows = [",a,b,c,d", "0,1,2,3,4", "1,5,6,7,8"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_single_level_multi_index(self):
# see gh-26303
index = Index([(1,), (2,), (3,)])
df = DataFrame([[1, 2, 3]], columns=index)
df = df.reindex(columns=[(1,), (3,)])
expected = ",1,3\n0,1,3\n"
result = df.to_csv(line_terminator="\n")
tm.assert_almost_equal(result, expected)
def test_gz_lineend(self):
# GH 25311
df = DataFrame({"a": [1, 2]})
expected_rows = ["a", "1", "2"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
with tm.ensure_clean("__test_gz_lineend.csv.gz") as path:
df.to_csv(path, index=False)
with tm.decompress_file(path, compression="gzip") as f:
result = f.read().decode("utf-8")
assert result == expected
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({"a": | date_range("1/1/2000", periods=10) | pandas.date_range |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.stats as stats
import os
import matplotlib.pyplot as plt
import traceback
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels
import bambi as bmb
import arviz as az
import sklearn
from datasets.models import RawFlower, RawUNM, RawDAR
from django.contrib.auth.models import User
from api.dilutionproc import predict_dilution
from api import adapters
def getCorrelationPerVisit(data, x_cols, y_cols, corr_method):
'returnn correlationns for sets of features per time period / visit'
for col in [x_cols] + [y_cols]:
try:
data[col] = data[col].astype(float)
data.loc[data[x] < 0, x] = np.nan
except:
data[col] = data[col]
df1 = data
rez = []
seen = []
N = None
for x in x_cols:
for y in y_cols:
if x!=y:
for visit in df1['TimePeriod'].unique():
df_visit = df1[df1['TimePeriod']== visit]
try:
temp = df_visit[(~df_visit[x].isna()) & (~df_visit[y].isna()) ]
N = temp.shape[0]
if corr_method == 'spearman':
spearman = stats.spearmanr(temp[x], temp[y])
rez.append([x,y,N,visit,spearman.correlation,spearman.pvalue])
else:
spearman = stats.pearsonr(temp[x], temp[y])
rez.append([x,y,N,visit,spearman[0],spearman[1]])
except:
print('err')
return pd.DataFrame(rez, columns = ['x','y','N','visit','corr','pval']).sort_values(by = 'pval')
def getCorrelation(data, x_cols, y_cols, corr_method):
for col in [x_cols] + [y_cols]:
try:
data[col] = data[col].astype(float)
data.loc[data[x] < 0, x] = np.nan
except:
data[col] = data[col]
df1 = data
rez = []
seen = []
N = None
for x in x_cols:
for y in y_cols:
if x!=y:
try:
temp = df1[(~df1[x].isna()) & (~df1[y].isna())]
N = temp.shape[0]
if corr_method == 'spearman':
spearman = stats.spearmanr(temp[x], temp[y])
rez.append([x,y,N,spearman.correlation,spearman.pvalue])
else:
spearman = stats.pearsonr(temp[x], temp[y])
rez.append([x,y,N,spearman[0],spearman[1]])
except:
print('err')
return pd.DataFrame(rez, columns = ['x','y','N','corr','pval']).sort_values(by = 'pval')
def corr_sig(df=None):
p_matrix = np.zeros(shape=(df.shape[1],df.shape[1]))
for col in df.columns:
for col2 in df.drop(col,axis=1).columns:
df_temp = df[(~df[col].isna()) & (~df[col2].isna())]
if df_temp.shape[0] > 2:
spearman = stats.spearmanr(df_temp[col], df_temp[col2])
p_matrix[df.columns.to_list().index(col),df.columns.to_list().index(col2)] = spearman.pvalue
else:
p_matrix[df.columns.to_list().index(col),df.columns.to_list().index(col2)] = 1
return p_matrix
def getCorrelationHeatmap(data, to_corr_cols):
for col in to_corr_cols:
try:
data[col] = data[col].astype(float)
data.loc[data[x] < 0, x] = np.nan
except:
data[col] = data[col]
#sns.set_theme(style="white",font_scale=1.75)
# Compute the correlation matrix
corr = data[to_corr_cols].corr(method = 'spearman').round(4)
# Generate a mask for the upper triangle
p_values = corr_sig(data[to_corr_cols])
mask = np.invert(np.tril(p_values<0.05))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(40, 30))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(0, 230, as_cmap=True)
g = sns.heatmap(corr,
cmap = cmap, vmax=.3, center=0, annot = True,
square=True, linewidths=.5, annot_kws={"size": 35}, mask=mask)
#g.ax_heatmap.set_xticklabels(g.ax_heatmap.get_xmajorticklabels(), fontsize = 40)
g.set_xticklabels(g.get_xmajorticklabels(), fontsize = 30, rotation = 90)
g.set_yticklabels(g.get_ymajorticklabels(), fontsize = 30, rotation = 0)
# Draw the heatmap with the mask and correct aspect ratio
return g
def cohortdescriptive(df_all):
'fuction that returns count, mean, and std per cohort'
df_all = df_all.drop_duplicates(['CohortType','PIN_Patient','TimePeriod'])
b = df_all.groupby(['CohortType']).agg(['count','mean','std']).transpose().reset_index()
df2 = b.pivot(index='level_0', columns='level_1', values=['NEU','DAR','UNM'])
df2.columns = list(map("_".join, [[str(x[0]),x[1]] for x in list(df2.columns)]))
return df2
def q1(x):
return x.quantile(0.25)
def q2(x):
return x.median()
def q3(x):
return x.quantile(0.75)
def cohortdescriptive_all(df_all):
' summary; minimum, quartile 1, median, quartile 3, and maximum.'
#df_all = df_all.drop_duplicates(['CohortType','PIN_Patient','TimePeriod'])
df_all = df_all.select_dtypes(include=['float64'])
categorical = ['CohortType','TimePeriod','Member_c','Outcome','folic_acid_supp', 'PIN_Patient',
'ethnicity','race','smoking','preg_complications','babySex','LGA','SGA','education']
df_all = df_all.loc[:, ~df_all.columns.isin(categorical)]
#b = df_all.agg(['count','mean','std',lambda x: x.quantile(0.25), lambda x: x.quantile(0.50)])
df_all[df_all < 0 ] = np.nan
b = df_all.agg(['count','mean','std','min', q1, 'median', q3, 'max']).transpose().round(4)
return b
def cohortdescriptiveOverall(data):
for col in data.columns:
try:
data[col] = data[col].astype(float)
except:
data[col] = data[col]
df_all = data
cohort = df_all['CohortType'].unique()[0]
b = df_all.groupby(['CohortType']).agg(['count','mean','std']).transpose().reset_index()
df2 = b.pivot(index='level_0', columns='level_1', values=[cohort])
df2.columns = list(map("_".join, [[str(x[0]),x[1]] for x in list(df2.columns)]))
return df2
def cohortDescriptiveByOutcome(data):
for col in data.columns:
try:
data[col] = data[col].astype(float)
except:
data[col] = data[col]
b = data.groupby(['Outcome']).agg(['count','mean','std']).transpose().reset_index()
df2 = b.pivot(index='level_0', columns='level_1', values=[0.0,1.0])
df2.columns = list(map("_".join, [[str(x[0]),x[1]] for x in list(df2.columns)]))
return df2
def oneHotEncoding(df, toencode):
#TODO: add onehot encoding for race, gender, etc.
for var in toencode:
dum = pd.get_dummies(df[var], prefix=var)
return dum
def merge3CohortFrames(df1,df2,df3):
'merge on feature intersections'
#only consider visit 2 for NEU
df2 = df2[df2['TimePeriod'] == 2]
for as_feature in ['UASB', 'UDMA', 'UAS5', 'UIAS', 'UAS3', 'UMMA']:
if as_feature not in df1.columns:
df1[as_feature] = np.nan
if as_feature not in df2.columns:
df2[as_feature] = np.nan
if as_feature not in df3.columns:
df3[as_feature] = np.nan
s1 = set(df1.columns)
s2 = set(df2.columns)
s3 = set(df3.columns)
cc = set.intersection(s1, s2, s3)
df_all = pd.concat([df1[cc],df2[cc],df3[cc]])
for x in df_all:
try:
df_all[x] = df_all[x].astype(float)
except:
pass
return df_all
def merge2CohortFrames(df1,df2):
'merge on feature intersections'
for as_feature in ['UASB', 'UDMA', 'UAS5', 'UIAS', 'UAS3', 'UMMA']:
if as_feature not in df1.columns:
df1[as_feature] = np.nan
if as_feature not in df2.columns:
df2[as_feature] = np.nan
s1 = set(df1.columns)
s2 = set(df2.columns)
cc = set.intersection(s1, s2)
df_all = pd.concat([df1[cc],df2[cc]])
for x in df_all:
try:
df_all[x] = df_all[x].astype(float)
except:
pass
return df_all
def categoricalCounts(df):
#each participant should only have 1 measurment per fvariable
cohort = df['CohortType'].unique()
categorical1 = ['CohortType','TimePeriod','Member_c','Outcome','folic_acid_supp', 'PIN_Patient',
'ethnicity','race','smoking','preg_complications','babySex','LGA','SGA','education']
categorical2 = ['CohortType','TimePeriod','Member_c','Outcome','folic_acid_supp', 'PIN_Patient',
'ethnicity','race','smoking','preg_complications','babySex','LGA','SGA','education','GDMtest1','GDMtest2']
#TODO: fix this should detect the dataset type
try:
df22 = df[categorical1].drop_duplicates(['PIN_Patient'])
categorical1.remove('PIN_Patient')
df22 = df22[categorical1]
melted = pd.melt(df22,id_vars=['CohortType'])
df33 = melted.groupby(['variable','value'])['value'].count()
df33.index.names = ['variable', 'cat']
except:
df22 = df[categorical2].drop_duplicates(['PIN_Patient'])
categorical2.remove('PIN_Patient')
df22 = df22[categorical2]
melted = pd.melt(df22,id_vars=['CohortType'])
df33 = melted.groupby(['variable','value'])['value'].count()
df33.index.names = ['variable', 'cat']
return df33.reset_index()
def turntofloat(df):
for col in df.columns:
try:
df[col] = df[col].astype(float)
except:
pass
return df
def crude_reg(df_merged, x_feature, y_feature, covars, adjust_dilution, output, encode_cats):
# inro for crude simple regression y = ax + b and report full results
# y_feature has to be binary (i.e. 0,1)
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
df_merged = df_merged[(~df_merged[x_feature].isna()) & (~df_merged[y_feature].isna())]
#make sure all concentrations are above 0 - assuption is ok because lowest conc should have LOD
df_merged = df_merged[(df_merged[x_feature]> 0) & (~df_merged[x_feature].isna()) ]
split_covars = covars.split('|')
print(split_covars)
print(len(split_covars))
## adjust dilution
if adjust_dilution == 'True':
df_merged[x_feature] = df_merged[x_feature] / df_merged['UDR']
if len(split_covars) > 1 & encode_cats == True:
data = add_confound(df_merged, x_feature, y_feature, split_covars)
if len(split_covars) > 1 & encode_cats == False:
data = df_merged[[x_feature]+ [y_feature] + split_covars + ['CohortType']]
data = data.dropna(axis = 'rows')
data = turntofloat(data)
else:
data = df_merged[[x_feature]+ [y_feature] + ['CohortType']]
data = data.dropna(axis = 'rows')
data = turntofloat(data)
## problem - if we are using z_score to predict might be an issue
data = data[(data[x_feature]> 0) & (~data[x_feature].isna()) ]
data_copy = data.copy()
data.drop(['CohortType'], inplace = True, axis = 1)
data['intercept'] = 1
#TODO: clean up - sometimes these variables are in the data, sometimes not (depends on selection )
try:
data['babySex'] = data['babySex'].astype(float)
except:
pass
try:
data['parity'] = data['parity'].astype(float)
except:
pass
data = data.select_dtypes(include = ['float','integer'])
#X and Y features TODO: clean up
X = data[[x for x in data.columns if x !=y_feature and x!= 'PIN_Patient']]
Y = data[y_feature]
X[x_feature]= np.log(X[x_feature])
if df_merged.shape[0] > 2:
reg = sm.OLS(Y, X).fit()
ret = reg.summary()
else:
ret = 'error'
# model string
fit_string = y_feature + '~'
for x in X.columns:
if x == x_feature:
fit_string += ' + log(' + str(x) +')'
else:
fit_string += ' + ' + str(x)
fit_string = fit_string.replace('~ +','~')
header = ''
# info to display on webpage
for cohort in data_copy['CohortType'].unique():
cohort_data = data_copy[data_copy['CohortType'] == cohort]
header += '<div> <b> ' + cohort +' Number samples :</b> ' + str(cohort_data.shape[0]) + '</div>'
header += '<div> <b> Total Number samples :</b> ' + str(X.shape[0]) + '</div>'
header += '<div> <b> Model: </b>' + fit_string + '</div>'
header += '<div> ===================================================</div>'
htmls = header + ret.tables[0].as_html() + ret.tables[1].as_html()
# depending where we are calling it from
if output == 'csv':
final_return = ret
if output == 'html':
final_return = htmls
return final_return
def crude_logreg(df_merged, x_feature, y_feature, covars, adjust_dilution, output, encode_cats):
# inro for crude simple logistic regression log(p(x)/1-p(x)) = ax + b and report slope, intercept, rvalue, plvalue, 'stderr
# y_feature has to be binary (i.e. 0,1)
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
df_merged = df_merged[(~df_merged[x_feature].isna()) & (~df_merged[y_feature].isna()) & \
(df_merged[y_feature].isin([0.0,1.0,0,1, '0', '1', '0.0', '1.0']))]
#make sure all concentrations are above 0 - assuption is ok because lowest conc should have LOD
#df_merged = df_merged[df_merged[x_feature]> 0]
#split the variables in the checkboxes
split_covars = covars.split('|')
print(split_covars)
print(len(split_covars))
##adjust dilution
if adjust_dilution == 'True':
df_merged[x_feature] = df_merged[x_feature] / df_merged['UDR']
if len(split_covars) > 1 & encode_cats == True:
data = add_confound(df_merged, x_feature, y_feature, split_covars)
if len(split_covars) > 1 & encode_cats == False:
data = df_merged[[x_feature]+ [y_feature] + split_covars + ['CohortType']]
data = data.dropna(axis = 'rows')
data = turntofloat(data)
else:
data = df_merged[[x_feature]+ [y_feature] + ['CohortType']]
data = data.dropna(axis = 'rows')
data = turntofloat(data)
data = data[(data[x_feature]> 0) & (~data[x_feature].isna()) ]
data = data.dropna(how = 'any')
data.drop(['CohortType'], inplace = True, axis = 1)
# set intercept to 1
data['intercept'] = 1
#TODO: clean up
try:
data['babySex'] = data['babySex'].astype(float)
except:
pass
try:
data['parity'] = data['parity'].astype(float)
except:
pass
data = data.select_dtypes(include = ['float','integer'])
print('Data shape after intselect')
#independent
X = data[[x for x in data.columns if x !=y_feature and x!= 'PIN_Patient']]
#target
Y = data[y_feature]
#log of the exposure
X[x_feature]= np.log(X[x_feature])
# fit the model
if df_merged.shape[0] > 1:
log_reg = sm.Logit(Y, X).fit()
ret = log_reg.summary()
else:
ret = 'error'
# fit string for site
fit_string = y_feature + '~'
for x in X.columns:
if x == x_feature:
fit_string += ' + log(' + str(x) +')'
else:
fit_string += ' + ' + str(x)
fit_string = fit_string.replace('~ +',' ~')
header = ' <div><b> Logistic Regression </b> </div>'
header += '<div><b> Number samples: </b> ' + str(X.shape[0]) + '</div>'
header += '<div><b> Model: </b>' + fit_string + '</div>'
header += '<div><b> Group: </b> CohortType '
htmls = header + ret.tables[0].as_html() + ret.tables[1].as_html()
# depending where we are calling it from
if output == 'csv':
final_return = ret
if output == 'html':
final_return = htmls
return final_return
def crude_mixedML2(df_merged, x_feature, y_feature, covars):
#TODO: Replace covars variable with actual selection of indivdual features
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
split_covars = covars.split('|')
data = add_confound(df_merged, x_feature, y_feature, split_covars)
data['intercept'] = 1
#data = data.select_dtypes(include = ['float','integer'])
X = data[[x for x in data.columns if x !=y_feature and x!= 'CohortType']]
Y = data[y_feature]
if X.shape[0] > 2:
reg = sm.MixedLM(Y, X, groups=data["CohortType"], exog_re=X["intercept"]).fit()
ret = reg.summary()
else:
ret = 'error'
fit_string = y_feature + '~'
for x in X.columns:
fit_string += ' + ' + str(x)
fit_string = fit_string.replace('~ +','~') + ' + (1|CohortType)'
header = '<div> <b> Liear Mixed Model with Random Intercept </b> </div>'
header += '<div> <b> Number samples: </b> ' + str(X.shape[0]) + '</div>'
header += '<div> <b> Model: </b>' + fit_string + '</div>'
header += '<div> <b> Group: </b> CohortType '
htmls = header + ret.tables[0].to_html() + ret.tables[1].to_html()
return htmls
def crude_binomial_mixedML(df_merged, x_feature, y_feature,covars):
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
if covars == 'False':
data = df_merged[[x_feature,y_feature,'CohortType']].dropna(how = 'any', axis='rows')
data[x_feature] = data[x_feature] + 1
data[y_feature] = data[y_feature].astype(int)
random = {"a": '0 + C(CohortType)'}
fit_string = y_feature + '~' + x_feature
if covars == 'True':
random = {"a": '0 + C(CohortType)'}
data = add_confound(df_merged, x_feature, y_feature)
## create the model string for
fit_string = y_feature + '~'
cnt = 0
## filter out target, at birth, and reference dummy variables in model
for x in data.columns:
#data.drop(['education'], inplace = True, axis = 0)
if x != 'birthWt' and x !='Outcome_weeks' and x!= 'Outcome' and x != 'PIN_Patient' and x != 'SGA' and x != 'LGA' \
and x !='birthLen' and x != 'CohortType' and x != 'race' and x!='race_1' and x!= 'smoking' and x != 'smoking_3' \
and x != 'education_5' and x != 'education':
if cnt == 0:
fit_string += ' ' + x + ' '
else:
fit_string += ' + ' + x + ' '
cnt+=1
data[y_feature] = data[y_feature].astype(int)
## miced linear model with group variable = CohortType
md = statsmodels.genmod.bayes_mixed_glm.BinomialBayesMixedGLM.from_formula(
fit_string, random, data)
##fit the model
mdf = md.fit_vb()
return mdf.summary()
def crude_mixedMLbayse(df_merged, x_feature, y_feature, covars='False', logit = False):
#TODO: Replace covars variable with actual selection of indivdual features
df_merged = df_merged.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
if covars == 'False':
data = df_merged[[x_feature,y_feature,'CohortType']].dropna(how = 'any', axis='rows')
fit_string = y_feature + '~' + x_feature
if covars == 'True':
data = add_confound(df_merged, x_feature, y_feature)
## create the model string for
fit_string = y_feature + '~'
cnt = 0
## filter out target, at birth, and reference dummy variables in model
for x in data.columns:
#data.drop(['education'], inplace = True, axis = 0)
if x != 'birthWt' and x !='Outcome_weeks' and x!= 'Outcome' and x != 'PIN_Patient' and x != 'SGA' and x != 'LGA' \
and x !='birthLen' and x != 'CohortType' and x != 'race' and x!='race_1' and x!= 'smoking' and x != 'smoking_3' \
and x != 'education_5' and x != 'education':
if cnt == 0:
fit_string += ' ' + x + ' '
else:
fit_string += ' + ' + x + ' '
cnt+=1
fit_string += '+ (1|CohortType)'
if logit == False:
model = bmb.Model(data)
results = model.fit(fit_string)
else:
model = bmb.Model(data)
results = model.fit(fit_string, family='bernoulli',link = 'logit')
## miced linear model with group variable = CohortType
mdf = az.summary(results)
return mdf
def verifyclean(df):
df = df.replace(-9,np.nan).replace('-9',np.nan).replace(999,np.nan).replace(888,np.nan)
return df
def add_confound(df_merged, x_feature, y_feature, conf):
print(df_merged.shape)
# check if confounders are added
if len(conf) > 1:
cols_to_mix = [x_feature, y_feature, 'PIN_Patient', 'CohortType'] + conf
else:
cols_to_mix = [x_feature, y_feature, 'PIN_Patient', 'CohortType']
# drop any missing values as mixed model requires complete data
df_nonan = df_merged[cols_to_mix].dropna(axis='rows')
#df_nonan['smoking'] = df_nonan['smoking'].astype(int)
print(df_nonan.shape)
## dummy race annd smoking varible
def add_cats(name, df_nonan, ref_val):
df_nonan[name] = df_nonan[name].astype('float').astype(int)
df = pd.concat([df_nonan, pd.get_dummies(df_nonan[name], prefix = name)], axis = 1)
#print(df.columns)
try:
df.drop([name,name + '_' + ref_val], inplace = True, axis = 1)
except:
pass
return df
if 'race' in conf: df_nonan = add_cats('race', df_nonan, '1')
if 'smoking' in conf: df_nonan = add_cats('smoking', df_nonan, '0')
if 'education' in conf: df_nonan = add_cats('education', df_nonan, '5')
return df_nonan
## main analysis
## with categories encoded
def runcustomanalysis1():
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import calendar
from datetime import date, datetime, time
import locale
import unicodedata
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.timezones import maybe_get_tz
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
bdate_range, date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesDatetimeValues:
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_period = PeriodArray._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'day_name', 'month_name']
ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
assert a == b
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series(date_range('20130101', periods=5), name='xxx'),
Series(date_range('20130101', periods=5, freq='s'),
name='xxx'),
Series(date_range('20130101 00:00:00', periods=5, freq='ms'),
name='xxx')]
for s in cases:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'US/Eastern'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
exp_values = (DatetimeIndex(s.values).tz_localize('UTC')
.tz_convert('US/Eastern'))
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101', periods=5, tz='US/Eastern'),
name='xxx')
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'CET'
freq_result = s.dt.freq
assert freq_result == | DatetimeIndex(s.values, freq='infer') | pandas.DatetimeIndex |
import re
import os
import pandas as pd
import numpy as np
def readGas(DataPath, building, building_num, write_data, datafile, floor_area):
dateparse = lambda x: pd.datetime.strptime(x, '%d-%b-%y')
print('importing gas data from:', DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv')
if building_num == 1: # Central House
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_GasData.csv', date_parser=dateparse,
header=0, index_col=0)
df = df.loc['2013-01-01':'2016-10-01'] # ['2015-09-31':'2016-10-01'] ['2012-01-24':'2016-10-01']
df = df.groupby(df.index.month).mean() # get the monthly mean over multiple years
df = pd.concat([df[9:], df[:9]]) # reorder the months to align with the submetered data...
rng = pd.date_range(start='09/2016', end='09/2017', freq='M')
df = df.set_index(rng) # set new index to align mean monthly gas data with metered electricity
df.rename(columns={df.columns[0]: 'Gas'}, inplace=True)
return df
def readSTM(DataPathSTM, building, building_num, write_data, datafile, floor_area):
""" Short Term Monitoring """
if building_num in {0}:
dateparseSTM = lambda x: pd.datetime.strptime(x, '%d-%m-%y %H:%M')
elif building_num in {1}:
dateparseSTM = lambda x: pd.datetime.strptime(x, '%d/%m/%Y %H:%M')
if building_num in {0,1}:
df_stm = pd.read_csv(DataPathSTM + datafile + '/' + datafile + '_combined.csv', date_parser=dateparseSTM, header=0,index_col=0)
else:
df_stm = pd.DataFrame()
cols = df_stm.columns.tolist()
if building_num == 0: # MaletPlaceEngineering
cols_new = ['Server GF [GP3]', 'Lighting 2nd', 'Power 2nd', 'Lighting 3rd', 'Power 3rd', 'DB1', 'Lighting 6th',
'Power 6th', 'Power 7th', 'Lighting 7th']
for i, v in enumerate(cols):
df_stm.rename(columns={cols[i]: cols_new[i]}, inplace=True)
if building_num == 1: # CentralHouse
cols_new = ['MCP01', 'B2', 'PV', '3A', '3D', 'B41']
for i, v in enumerate(cols):
df_stm.rename(columns={cols[i]: cols_new[i]}, inplace=True)
""" Manipulate """
# Created average and standard deviation profiles for the weekday and weekendday for short term monitoring. Interpolated the values to half-hour based on 2hour metering data.
df_stm = df_stm.divide(8) # because it's kWh each value needs to be divided by 8 if we go from 2h to 15min frequency
df_stm = df_stm[~df_stm.index.duplicated(keep='first')]
df_stm = df_stm.reindex(pd.date_range(start=df_stm.index.min(), end=df_stm.index.max(), freq='15Min'))
df_stm = df_stm.interpolate(method='linear')
return df_stm
def readSubmetering(DataPath, building, building_num, building_abr, write_data, datafile, df_stm, floor_area):
print(building_abr)
print('importing submetering data from:', DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv')
if building_abr == 'MPEB': # Malet Place Engineering
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%b-%d %H:%M:%S.000')
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv', date_parser=dateparse,
header=0, index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='15Min'), method='backfill')
df_realweather = pd.DataFrame()
cols = df.columns.tolist()
df = df.loc['2016-09-01 00:15:00':'2017-08-31'] # ['2016-09-01':'2017-04-30']
cols_new = ['B10', 'B11', 'B12', 'B14', 'B15', 'B16', 'B17', 'B8', 'B9', 'BB4', 'BB3',
'CH1', 'CH2', 'DB5MS', 'GP2', 'Dynamo', 'Fire Lift', 'Lift Panel',
'Lift P1', 'LV1', 'LV2', 'LV3', 'MCCB03', 'MCCB01', 'BB2', 'BB1']
# print(pd.DataFrame([df.columns, cols_new]).T)
# stm cols [server excluded...,'2L','2P','3L','3P','DB1','6L','6P','7P','7L']
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
df_stm = pd.concat([df_stm], axis=1, join_axes=[df.index]) # set the short term monitoring to the same axis
df = pd.concat([df, df_stm], axis=1)
# df = df.convert_objects(convert_numeric=True).fillna(0)
df[df < 0] = 0 # set negative values (LV3) to 0
df_MPEB = pd.concat([df[['LV1', 'LV2']]], axis=1)
df_MPEB = df_MPEB.sum(axis=1)
df_MPEB = df_MPEB.sum(axis=0)
print('df_MPEB total kWh/m2a:', df_MPEB / floor_area)
# real LV metered
df_LV1_real = df['LV1']
df_LV2_real = df['LV2']
df_mains = pd.DataFrame(pd.concat([df_LV1_real, df_LV2_real], axis=1).sum(axis=1), columns=['Mains'])
df_workshops = pd.DataFrame(pd.concat([df[['BB4', 'BB3', 'LV3', 'GP2']]], axis=1).sum(axis=1),
columns=['Workshops'])
df_lifts = pd.DataFrame(pd.concat([df[['Fire Lift', 'Lift Panel']]], axis=1).sum(axis=1), columns=['Lifts'])
df_mech = pd.DataFrame(pd.concat([df[['MCCB03', 'Dynamo', 'MCCB01']]], axis=1).sum(axis=1), columns=['Systems'])
df_chillers = pd.DataFrame(pd.concat([df[['CH1', 'CH2']]]).sum(axis=1), columns=['Chillers'])
# Lighting and Power
df_BB2 = pd.DataFrame(pd.concat([df[['Lighting 6th', 'Power 6th', 'Lighting 7th', 'Power 7th']].sum(axis=1),
pd.DataFrame(df[['Lighting 7th', 'Power 6th']].sum(axis=1) * 3)], axis=1).sum(
axis=1), columns=['BB2 L&P'])
df_BB1 = pd.DataFrame(pd.concat([df[['Lighting 2nd', 'Power 2nd', 'Lighting 3rd', 'Power 3rd']].sum(axis=1),
pd.DataFrame(df[['Lighting 6th', 'Power 6th']].sum(axis=1) * 2)], axis=1).sum(
axis=1), columns=['BB1 L&P'])
df_BB1_surplus = df['BB1'] - df['DB1'] - df['Server GF [GP3]'] - df_BB1['BB1 L&P']
df_BB2_surplus = df['BB2'] - df_BB2['BB2 L&P']
print('Busbar 1')
print('BB1', df['BB1'].sum(axis=0) / floor_area)
print('DB1', df['DB1'].sum(axis=0) / floor_area)
print('GP3', df['Server GF [GP3]'].sum(axis=0) / floor_area)
print('BB1 L&P', df_BB1['BB1 L&P'].sum(axis=0) / floor_area)
print('BB1remaining', df_BB1_surplus.sum(axis=0) / floor_area)
print('LP on 6th', pd.DataFrame(df[['Lighting 6th', 'Power 6th']]).sum(axis=1).sum(axis=0) / floor_area)
print('LP on 2 and 3rd',
df[['Lighting 2nd', 'Power 2nd', 'Lighting 3rd', 'Power 3rd']].sum(axis=1).sum(axis=0) / floor_area)
print('Busbar 2')
print('BB2', df['BB2'].sum(axis=0) / floor_area)
print('BB2 L&P', df_BB2['BB2 L&P'].sum(axis=0) / floor_area)
print('BB2remaining', df_BB2_surplus.sum(axis=0) / floor_area)
print(((df_BB1_surplus.sum(axis=0) / floor_area) + (df_BB2_surplus.sum(axis=0) / floor_area)) / (
df['DB1'].sum(axis=0) / floor_area))
print(((df_BB1_surplus.sum(axis=0) / floor_area) + (df_BB2_surplus.sum(axis=0) / floor_area)) / df['DB1'].sum(
axis=0) / floor_area)
df_lp = pd.DataFrame(pd.concat([df_BB1['BB1 L&P'], df_BB2['BB2 L&P']], axis=1).sum(axis=1),
columns=['floors L&P'])
surplus_basedonDB1 = df['DB1'] * ((((df_BB1_surplus.sum(axis=0) / floor_area) + (
df_BB2_surplus.sum(axis=0) / floor_area)) / (df['DB1'].sum(axis=0) / floor_area)) / 10)
# keep within 20% of the mean.
surplus_basedonDB1[
surplus_basedonDB1 < surplus_basedonDB1.mean() - 0.2 * surplus_basedonDB1.mean()] = surplus_basedonDB1.mean() # remove negative values..
surplus_basedonDB1[
surplus_basedonDB1 > surplus_basedonDB1.mean() + 0.2 * surplus_basedonDB1.mean()] = surplus_basedonDB1.mean() # remove negative values..
df_BB1and2 = pd.DataFrame(
df[['BB1', 'BB2']].sum(axis=1) - surplus_basedonDB1 - df['Server GF [GP3]'] - df['DB1'], columns=['L&P'])
# scaled_daily(df_BB1and2.resample('30Min').sum(), building_label='MPEB', building_abr='MPEB', day_type='three', scale=False, time_interval='30Min')
surplus = pd.concat([df_BB1_surplus + df_BB2_surplus], axis=1)
# determine server based on difference between LV2 and dissaggregated LV2.
df_LV2_aggregate = pd.concat([df[['BB1', 'BB2', 'CH2', 'MCCB01', 'GP2']]],
axis=1) # LV2, missing Fire alam and DB409 (big server)
df_LV2_aggregate = df_LV2_aggregate.sum(axis=1)
df_bigserver = pd.DataFrame(df_LV2_real - df_LV2_aggregate, columns=[
'DB409']) # difference between LV2 and LV2 dissaggregated is the difference, which should be the server.
df_bigserver[df_bigserver < 0] = 0 # remove negative values...
df_bigserver = pd.DataFrame(
pd.concat([df_bigserver, surplus_basedonDB1, df['Server GF [GP3]'], df['DB1']], axis=1).sum(axis=1),
columns=['DB409'])
print(df_bigserver.sum(axis=0) / floor_area, 'kWh/m2a')
df_floorsLP = pd.DataFrame(pd.concat([df[['BB1', 'BB2']]], axis=1).sum(axis=1), columns=['L&P'])
df_floorsLP['L&P'] = df_floorsLP['L&P'] - df['Server GF [GP3]']
df_floorsLP = pd.DataFrame(pd.concat([df_BB1, df_BB2], axis=1).sum(axis=1), columns=['L&P'])
df_servers = pd.DataFrame(pd.concat([df_bigserver, df[['Server GF [GP3]']]], axis=1).sum(axis=1),
columns=['Servers'])
print("Average kWh per day for the server DB409 = " + str(df_bigserver.mean()))
df_LVL1 = pd.concat([df_BB1and2, df_chillers, df_mech, df_servers, df_workshops],
axis=1) # LV1, missing LV1A, PF LV1
print('Workshops', df_workshops.values.sum() / floor_area, 'servers', df_servers.values.sum() / floor_area,
'Lifts', df_lifts.values.sum() / floor_area)
print('lift', df['Lift P1'].values.sum() / floor_area)
print('GP2', df['GP2'].values.sum() / floor_area)
print('DB5MS', df['DB5MS'].values.sum() / floor_area)
# diff between BB3 aggregated and separate
df_BB3 = df[['B9', 'B10', 'B14', 'B15', 'B8']] # these combined form Busbar-2 (BB3)
df_BB4 = df[['B12', 'B16', 'B17', 'B11']] # these combined form Busbar-1 (BB4) # excludes B13
df_BB3and4 = pd.concat([df_BB3, df_BB4], axis=1)
df_BB3and4 = df_BB3and4.sum(axis=1)
df_BB3and4real = pd.concat([df['BB2'], df['BB1']], axis=1)
df = pd.concat([df, df_bigserver], axis=1)
if building_abr == 'CH': # CentralHouse
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%b-%d %H:%M:%S.000')
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv', date_parser=dateparse,
header=0, index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='15Min'), method='backfill')
df_realweather = pd.DataFrame()
df = df.loc['2016-09-01':'2017-08-31']
# Naming
# cols_new = ['BB2', 'LIFT1', 'LIFT2', 'LIFT3', 'B1', 'B4', 'BB1', 'LIFT4', 'DB21', 'R1', 'Server [B5]', 'R2',
# 'G2', 'G1', 'B31', 'B32', 'B44', 'B52', 'B54', 'B53',
# 'B62', 'B64', 'B11', 'B12', 'B21', 'B22', 'B43', 'B42', 'B51',
# 'B61', 'MP1']
cols = df.columns.tolist()
# STM ['MCP01', 'B2', 'PV', '3A', '3D', 'B41']
cols_new = ['BB2', 'LIFT1', 'LIFT2', 'LIFT3', 'B1', 'B4', 'BB1', 'LIFT4', 'DB21', 'R1', 'Server', 'R2',
'G2', 'G1', 'B31', 'B32', 'B44', 'B52', 'B54', 'B53', 'B62', 'B64', 'B11', 'B12', 'B21', 'B22',
'B43', 'B42', 'B51',
'B61', 'MP1']
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
df_m = df.resample('M').sum()
df_m_sum = df_m.mean(axis=0)
# combine B1 and B2 and B4 (boiler house L&P) as Basement L&P
df_basementLP = pd.concat([df[['B1', 'B4']], df_stm[['B2']]], axis=1, join_axes=[df.index])
df_basementLP = pd.DataFrame(df_basementLP.sum(axis=1), columns=['L&P Basement'])
# combine L&P per floor
df_groundLP = df[['G1', 'G2']]
df_groundLP = pd.DataFrame(df_groundLP.sum(axis=1), columns=['L&P Ground floor'])
# first floor lighting and power
df_firstLP = df[['B12', 'B11']]
df_firstLP = pd.DataFrame(df_firstLP.sum(axis=1), columns=['L&P 1st floor'])
# second floor lighting and power
df_secondLP = df[['B21', 'B22']]
df_secondLP = pd.DataFrame(df_secondLP.sum(axis=1), columns=['L&P 2nd floor'])
# third floor lighting and power
df_thirdLP = pd.concat([df[['B31', 'B32']], df_stm[['3A', '3D']]], axis=1, join_axes=[df.index])
df_thirdLP = pd.DataFrame(df_thirdLP.sum(axis=1), columns=['L&P 3rd floor'])
# fourth floor lighting and power
df_fourthLP = pd.concat([df[['B42', 'B43', 'B44']], df_stm[['B41']]], axis=1, join_axes=[df.index])
df_fourthLP = pd.DataFrame(df_fourthLP.sum(axis=1), columns=['L&P 4th floor']) # [B41, B42]
# fifth floor lighting and power
df_fifthLP = df[['B51', 'B53', 'B54']]
df_fifthLP = pd.DataFrame(df_fifthLP.sum(axis=1), columns=['L&P 5th floor'])
# sixth floor lighting and power
df_sixthLP = df[['B61', 'B62']]
df_sixthLP = pd.DataFrame(df_sixthLP.sum(axis=1), columns=['L&P 6th floor'])
# combine Lifts 1-4
df_lifts = pd.DataFrame(df[['LIFT1', 'LIFT2', 'LIFT3', 'LIFT4']].sum(axis=1), columns=['Lifts'])
# combine R1, R2 and MCP01 as systems
df_mech = pd.concat([df[['R1', 'R2']], df_stm[['MCP01']]], axis=1, join_axes=[df.index])
df_mech = pd.DataFrame(df_mech.sum(axis=1), columns=['Systems'])
df_BBs = pd.concat([df[['BB1', 'BB2']], df_basementLP], axis=1, join_axes=[df.index])
df_BBs = df_BBs.sum(axis=1)
df_BBs = pd.DataFrame(df_BBs)
df_BBs.rename(columns={df_BBs.columns[0]: 'L&P'}, inplace=True) # R1, R2', MCP01
df_BB1 = df[['G1', 'B11', 'B21', 'B61', 'B42']]
df_BB2 = pd.concat([df[['G2', 'B12', 'B22', 'B51', 'B62']], df_stm[['B41']]], axis=1, join_axes=[df.index])
df_lighting = pd.concat([df[['B31', 'B62']], df_stm[['B41']]], axis=1, join_axes=[df.index]) # is this correct?
df_MP1_real = df['MP1']
df_floorsLP = pd.concat(
[df_basementLP, df_groundLP, df_firstLP, df_secondLP, df_thirdLP, df_fourthLP, df_fifthLP, df_sixthLP],
axis=1) # B3 is not measured... (should be small)
df_floorsLP_sum = pd.concat([df_floorsLP, df[['Server']], df_lifts], axis=1)
df_floorsLP_sum = pd.DataFrame(df_floorsLP_sum.sum(axis=1), columns=['L&P'])
df_LVL1 = pd.concat([df_floorsLP_sum, df_mech], axis=1,
join_axes=[df.index]) # B3 is not measured... (should be small)
df_stm = pd.concat([df_stm], axis=1, join_axes=[df.index])
df_mains = pd.DataFrame(
pd.concat([df[['MP1']], df[['B31', 'B32']], df_stm[['3A', '3D']]], axis=1, join_axes=[df.index]).sum(
axis=1), columns=['Mains'])
if building_abr == '17': # 17
dateparse = lambda x: pd.datetime.strptime(x, '%d-%m-%y %H:%M')
# for pilot study
# df = pd.read_csv(DataPath + building + '/Data/17_actual_clean.csv', date_parser=dateparse, header=0, index_col=0)
df = pd.read_csv(DataPath + building + '/Data/17_SubmeteringData.csv', date_parser=dateparse, header=0,
index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='30Min'), method='backfill')
df_realweather = pd.DataFrame()
df = df[:-1]
cols = df.columns.tolist()
print(df.columns)
cols_new = ['Gas', 'B_Power', 'B_Lights', 'B_AC', 'Print', 'Canteen', 'GF_Lights', 'Servers', 'GF_Power',
'1st_Lights', 'GF_AC', 'Lift', '1st_Power', '2nd_Lights', '2nd_Power']
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
# ['Gas', 'B_Power', 'B_Lights', 'B_AC', 'Print', 'Canteen', 'Server', 'GF_Power', 'GF_Lights', 'GF_AC', 'Lift', '1st_Power', '1st_Lights', '2nd_Power', '2nd_Lights']
df_lights = pd.concat([df[['B_Lights', 'GF_Lights', '1st_Lights', '2nd_Lights']]], axis=1)
df_lights = pd.DataFrame(df_lights.sum(axis=1), columns=['Lights'])
df_mech = pd.concat([df[['B_AC', 'GF_AC']]], axis=1)
df_mech = pd.DataFrame(df_mech.sum(axis=1), columns=['AC'])
df_power = pd.concat([df[['B_Power', 'GF_Power', '1st_Power', '2nd_Power']]], axis=1)
df_power = pd.DataFrame(df_power.sum(axis=1), columns=['Power'])
# L&P
df_floorsLP = pd.concat([df[['B_Power', 'B_Lights', 'GF_Power', 'GF_Lights', '1st_Power', '1st_Lights',
'2nd_Power', '2nd_Lights']]], axis=1) # B3 is not measured... (should be small)
df_LVL1 = pd.concat([df_lights, df_power, df[['Gas', 'Servers', 'Canteen', 'Print']]], axis=1)
df_mains = pd.DataFrame(
pd.concat([df_lights, df_power, df[['Servers', 'Canteen', 'Print']]], axis=1).sum(axis=1),
columns=['Mains'])
if building_abr == '71': # 71
dateparse = lambda x: pd.datetime.strptime(x, '%d-%m-%y %H:%M')
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv', date_parser=dateparse,
header=0, index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='30Min'), method='backfill')
df_realweather = pd.DataFrame()
df = df[:-1]
df = df.loc['2012-04-01':'2017-10-31']
cols = df.columns.tolist()
# df = df.loc['2014-01-01':'2014-12-31'] # ['2016-09-01':'2017-04-30']
cols_new = ['Gas', 'B_Power', 'Canteen', 'Lifts', 'GF_Power', 'GF_Lights', 'GF_AC', '1st_Power', '1st_Lights',
'1st_AC', '2nd_Power', '2nd_Lights', '2nd_AC', '3rd_Power', '3rd_Lights', '3rd_AC']
# print(pd.DataFrame([df.columns, cols_new]).T)
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
"""Multiply gas by 100!"""
df = pd.concat([df.iloc[:, 0].mul(100), df.iloc[:, 1:]], axis=1)
df_lights = pd.concat([df[['GF_Lights', '1st_Lights', '2nd_Lights', '3rd_Lights']]], axis=1)
df_lights = pd.DataFrame(df_lights.sum(axis=1), columns=['Lights'])
df_mech = | pd.concat([df[['GF_AC', '1st_AC', '2nd_AC', '3rd_AC']]], axis=1) | pandas.concat |
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
input_df = | pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import logging
import operator
from abc import abstractmethod
from enum import Enum
from typing import Dict, Generator, List, Tuple, Union
import numpy as np
import pandas as pd
from pint import Quantity
from pytz import timezone
from tqdm import tqdm
from wetterdienst.core.scalar.result import StationsResult, ValuesResult
from wetterdienst.metadata.columns import Columns
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.metadata.unit import REGISTRY, OriginUnit, SIUnit
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.util.logging import TqdmToLogger
log = logging.getLogger(__name__)
class ScalarValuesCore:
""" Core for sources of point data where data is related to a station """
# Fields for type coercion, needed for separation from fields with actual data
# that have to be parsed differently when having data in tabular form
@property
def _meta_fields(self) -> List[str]:
"""
Metadata fields that are independent of actual values and should be parsed
differently
:return: list of strings representing the metadata fields/columns
"""
if not self.stations.stations.tidy:
fields = [
Columns.STATION_ID.value,
Columns.DATE.value,
]
else:
fields = [
Columns.STATION_ID.value,
Columns.DATASET.value,
Columns.PARAMETER.value,
Columns.DATE.value,
Columns.VALUE.value,
Columns.QUALITY.value,
]
return fields
# Fields for date coercion
_date_fields = [Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value]
# TODO: add data type (forecast, observation, ...)
# @property
# @abstractmethod
# def _has_quality(self) -> bool:
# """Attribute that tells if a weather service has quality, which otherwise will
# have to be set to NaN"""
# pass
@property
def data_tz(self) -> timezone:
""" Timezone of the published data """
return timezone(self._data_tz.value)
@property
@abstractmethod
def _data_tz(self) -> Timezone:
""" Timezone enumeration of published data. """
pass
@property
@abstractmethod
def _irregular_parameters(self) -> Tuple[str]:
"""Declaration of irregular parameters which will have to be parsed differently
then others e.g. when a parameter is a date."""
pass
@property
@abstractmethod
def _integer_parameters(self) -> Tuple[str]:
""" Integer parameters that will be parsed to integers. """
pass
@property
@abstractmethod
def _string_parameters(self) -> Tuple[str]:
""" String parameters that will be parsed to integers. """
pass
@property
def _complete_dates(self) -> pd.DatetimeIndex:
"""
Complete datetime index for the requested start and end date, used for
building a complementary pandas DataFrame with the date column on which
other DataFrames can be joined on
:return: pandas.DatetimeIndex
"""
start_date, end_date = self.stations.start_date, self.stations.end_date
if self.stations.stations.resolution == Resolution.MONTHLY:
end_date += pd.Timedelta(days=31)
elif self.stations.stations.resolution == Resolution.ANNUAL:
end_date += pd.Timedelta(year=366)
date_range = pd.date_range(
start_date,
end_date,
freq=self.stations.frequency.value,
tz=self.data_tz,
)
return date_range
@property
def _base_df(self) -> pd.DataFrame:
"""
Base dataframe which is used for creating empty dataframes if no data is
found or for merging other dataframes on the full dates
:return: pandas DataFrame with a date column with complete dates
"""
return pd.DataFrame({Columns.DATE.value: self._complete_dates})
def convert_values_to_si(self, df: pd.DataFrame, dataset) -> pd.DataFrame:
"""
Function to convert values to metric units with help of conversion factors
:param df: pandas DataFrame that should be converted to SI units
:param dataset: dataset for which the conversion factors are created
:return: pandas DataFrame with converted (SI) values
"""
def _convert_values_to_si(series):
"""
Helper function to apply conversion factors column wise to a pandas DataFrame
:param series: pandas Series that should be converted
:return: converted pandas Series
"""
op, factor = conversion_factors.get(series.name, (None, None))
if not op or not factor:
return series
return op(series, factor)
conversion_factors = self._create_conversion_factors(dataset)
df = df.apply(_convert_values_to_si, axis=0)
return df
def _create_conversion_factors(
self, dataset
) -> Dict[str, Tuple[Union[operator.add, operator.mul], float]]:
"""
Function to create conversion factors based on a given dataset
:param dataset: dataset for which conversion factors are created
:return: dictionary with conversion factors for given parameter name
"""
dataset = dataset.name
dataset_accessor = self.stations.stations._dataset_accessor
if self.stations.stations._unique_dataset:
units = self.stations.stations._unit_tree[dataset_accessor]
else:
units = self.stations.stations._unit_tree[dataset_accessor][dataset]
conversion_factors = {}
# TODO eventually we may split this into smaller functions
for parameter in units:
origin_unit, si_unit = parameter.value
# Get parameter name
parameter = parameter.name
if self.stations.stations._unique_dataset:
parameter_value = self.stations.stations._dataset_tree[
dataset_accessor
][parameter].value
else:
parameter_value = self.stations.stations._dataset_tree[
dataset_accessor
][dataset][parameter].value
if si_unit == SIUnit.KILOGRAM_PER_SQUARE_METER.value:
# Fixed conversion factors to kg / m², as it only applies
# for water with density 1 g / cm³
if origin_unit == OriginUnit.MILLIMETER.value:
conversion_factors[parameter_value] = (operator.mul, 1)
else:
raise ValueError(
"manually set conversion factor for precipitation unit"
)
elif si_unit == SIUnit.DEGREE_KELVIN.value:
# Apply offset addition to temperature measurements
# Take 0 as this is appropriate for adding on other numbers
# (just the difference)
degree_offset = Quantity(0, origin_unit).to(si_unit).magnitude
conversion_factors[parameter_value] = (operator.add, degree_offset)
elif si_unit == SIUnit.PERCENT.value:
factor = REGISTRY(str(origin_unit)).to(str(si_unit)).magnitude
conversion_factors[parameter_value] = (operator.mul, factor)
else:
# For multiplicative units we need to use 1 as quantity to apply the
# appropriate factor
conversion_factors[parameter_value] = (
operator.mul,
Quantity(1, origin_unit).to(si_unit).magnitude,
)
return conversion_factors
def __init__(self, stations: StationsResult) -> None:
self.stations = stations
@classmethod
def from_stations(cls, stations: StationsResult):
return cls(stations)
def __eq__(self, other):
""" Equal method of request object """
return (
self.stations.station_id == other.stations.station_id
and self.stations.parameter == other.stations.parameter
and self.stations.start_date == other.stations.start_date
and self.stations.end_date == other.stations.end_date
)
pass
def __str__(self):
""" Str representation of request object """
# TODO: include source
# TODO: include data type
station_ids_joined = "& ".join(
[str(station_id) for station_id in self.stations.station_id]
)
parameters_joined = "& ".join(
[
parameter.value
for parameter, parameter_set in self.stations.stations.parameter
]
)
return ", ".join(
[
f"station_ids {station_ids_joined}",
f"parameters {parameters_joined}",
str(self.stations.start_date),
str(self.stations.end_date),
]
)
pass
def _create_empty_station_parameter_df(
self, station_id: str, parameter: Enum, dataset: Enum
) -> pd.DataFrame:
"""
Function to create an empty DataFrame
:param station_id:
:param parameter:
:return:
"""
dataset_tree = self.stations.stations._dataset_tree
resolution = self.stations.stations.resolution
# if parameter is a whole dataset, take every parameter from the dataset instead
if parameter == dataset:
if self.stations.stations._unique_dataset:
parameter = [*dataset_tree[resolution.name]]
else:
parameter = [*dataset_tree[resolution.name][dataset.name]]
if self.stations.stations.tidy:
if not self.stations.stations.start_date:
return | pd.DataFrame(None, columns=self._meta_fields) | pandas.DataFrame |
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import geopandas as gpd
import pandas as pd
from .grids import (
GPS_to_grid,
area_to_params,
grid_to_centre,
grid_to_polygon
)
from .coordinates import getdistance
def clean_same(data, col=['VehicleNum', 'Time', 'Lng', 'Lat']):
'''
Delete the data with the same information as the data before and
after to reduce the amount of data. For example, if several consecutive
data of an individual have the same information except for the time,
only the first and last two data can be kept
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the order of [‘Vehicleid, Time’]. It will sort
by time, and then determine the information of other columns besides
the time
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum, Time, Lng, Lat] = col[:4]
extra = col[4:]
data1 = data.copy()
data1 = data1.drop_duplicates(subset=[VehicleNum, Time])
data1 = data1.sort_values(by=[VehicleNum, Time])
data1['issame'] = 0
for i in [VehicleNum, Lng, Lat]+extra:
data1['issame'] += (data1[i].shift() == data1[i]
) & (data1[i].shift(-1) == data1[i])
data1 = data1[-(data1['issame'] == len([VehicleNum, Lng, Lat]+extra))]
data1 = data1.drop('issame', axis=1)
return data1
def clean_drift(data, col=['VehicleNum', 'Time', 'Lng', 'Lat'],
speedlimit=80, dislimit=1000):
'''
Delete the drift data. The select principle is that: if the speed of a
trajectory point is larger than the speed limit with before and after
points, while the speed between the before and after data is less than
the speedlimit. The time column in the input data is calculated more
efficiently if it is in datetime format.
Parameters
-------
data : DataFrame
Data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
speedlimit : number
Speed limitation
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum, Time, Lng, Lat] = col
data1 = data.copy()
data1 = data1.drop_duplicates(subset=[VehicleNum, Time])
data1[Time+'_dt'] = pd.to_datetime(data1[Time])
data1 = data1.sort_values(by=[VehicleNum, Time])
for i in [VehicleNum, Lng, Lat, Time+'_dt']:
data1[i+'_pre'] = data1[i].shift()
data1[i+'_next'] = data1[i].shift(-1)
data1['dis_pre'] = getdistance(
data1[Lng],
data1[Lat],
data1[Lng+'_pre'],
data1[Lat+'_pre'])
data1['dis_next'] = getdistance(
data1[Lng],
data1[Lat],
data1[Lng+'_next'],
data1[Lat+'_next'])
data1['dis_prenext'] = getdistance(
data1[Lng+'_pre'],
data1[Lat+'_pre'],
data1[Lng+'_next'],
data1[Lat+'_next'])
data1['timegap_pre'] = data1[Time+'_dt'] - data1[Time+'_dt_pre']
data1['timegap_next'] = data1[Time+'_dt_next'] - data1[Time+'_dt']
data1['timegap_prenext'] = data1[Time+'_dt_next'] - data1[Time+'_dt_pre']
data1['speed_pre'] = data1['dis_pre'] / \
data1['timegap_pre'].dt.total_seconds()*3.6
data1['speed_next'] = data1['dis_next'] / \
data1['timegap_next'].dt.total_seconds()*3.6
data1['speed_prenext'] = data1['dis_prenext'] / \
data1['timegap_prenext'].dt.total_seconds()*3.6
if speedlimit:
data1 = data1[
-((data1[VehicleNum+'_pre'] == data1[VehicleNum]) &
(data1[VehicleNum+'_next'] == data1[VehicleNum]) &
(data1['speed_pre'] > speedlimit) &
(data1['speed_next'] > speedlimit) &
(data1['speed_prenext'] < speedlimit))]
if dislimit:
data1 = data1[
-((data1[VehicleNum+'_pre'] == data1[VehicleNum]) &
(data1[VehicleNum+'_next'] == data1[VehicleNum]) &
(data1['dis_pre'] > dislimit) &
(data1['dis_next'] > dislimit) &
(data1['dis_prenext'] < dislimit))]
data1 = data1[data.columns]
return data1
def clean_outofbounds(data, bounds, col=['Lng', 'Lat']):
'''
The input is the latitude and longitude coordinates of the lower
left and upper right of the study area and exclude data that are
outside the study area
Parameters
-------
data : DataFrame
Data
bounds : List
Latitude and longitude of the lower left and upper right of
the study area, in the order of [lon1, lat1, lon2, lat2]
col : List
Column name of longitude and latitude
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
lon1, lat1, lon2, lat2 = bounds
if (lon1 > lon2) | (lat1 > lat2) | (abs(lat1) > 90) | (
abs(lon1) > 180) | (abs(lat2) > 90) | (abs(lon2) > 180):
raise Exception(
'Bounds error. The input bounds should be in the order \
of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and \
(lon2,lat2) is the upper right corner.')
Lng, Lat = col
data1 = data.copy()
data1 = data1[(data1[Lng] > bounds[0]) & (data1[Lng] < bounds[2]) & (
data1[Lat] > bounds[1]) & (data1[Lat] < bounds[3])]
return data1
def clean_outofshape(data, shape, col=['Lng', 'Lat'], accuracy=500):
'''
Input the GeoDataFrame of the study area and exclude the data beyond
the study area
Parameters
-------
data : DataFrame
Data
shape : GeoDataFrame
The GeoDataFrame of the study area
col : List
Column name of longitude and latitude
accuracy : number
The size of grid. The principle is to do the data gridding first
and then do the data cleaning. The smaller the size is, the higher
accuracy it has
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
Lng, Lat = col
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = area_to_params(bounds, accuracy)
data1 = data.copy()
data1['LONCOL'], data1['LATCOL'] = GPS_to_grid(
data1[Lng], data1[Lat], params)
data1_gdf = data1[['LONCOL', 'LATCOL']].drop_duplicates()
data1_gdf['geometry'] = grid_to_polygon(
[data1_gdf['LONCOL'], data1_gdf['LATCOL']], params)
data1_gdf = gpd.GeoDataFrame(data1_gdf)
data1_gdf = data1_gdf[data1_gdf.intersects(shape_unary)]
data1 = pd.merge(data1, data1_gdf[['LONCOL', 'LATCOL']]).drop(
['LONCOL', 'LATCOL'], axis=1)
return data1
def clean_traj(data, col=['uid', 'str_time', 'lon', 'lat'], tripgap=1800,
disgap=50000, speedlimit=80):
'''
A combo for trajectory data cleaning, including defining the the time
length threshold considered as a new trip, and the distance threshold
considered as a new trip
Parameters
-------
data : DataFrame
Trajectory data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
tripgap : number
The time length threshold considered as a new trip
disgap : number
The distance threshold considered as a new trip
speedlimit : number
Speed limit
Returns
-------
data1 : DataFrame
Cleaned data
'''
uid, timecol, lon, lat = col
data[timecol] = | pd.to_datetime(data[timecol]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, '<NAME>/<NAME>'),
(2515, '<NAME>'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
"""Return mean timestamp value from list of timestamps."""
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
"""Convert 'hh:mm' string to TimeDelta."""
return | pd.Timedelta(f'{hm}:00') | pandas.Timedelta |
import numpy as np
import pandas as pd
cjxx1 = | pd.read_csv('../SourceData/bks_cjxx_out1-1.csv',usecols = ['xh','xn','xqm','ksrq','kch','kxh','kccj','xf','kcsxdm','xdfsdm']) | pandas.read_csv |
import os
import matplotlib.cm as mcm
import matplotlib.pyplot as plt
import pandas as pd
import pytest
from bevel.plotting import _DivergentBarPlotter
from bevel.plotting import divergent_stacked_bar
from pandas.testing import assert_frame_equal
@pytest.fixture
def sample_data_even():
a, b, c = 'a', 'b', 'c'
return pd.DataFrame.from_dict({
'group': [a, a, b, b, b, b, c, c, c, c],
'resps': [1, 2, 3, 4, 1, 2, 3, 2, 2, 3],
})
@pytest.fixture
def sample_data_odd():
a, b, c = 'a', 'b', 'c'
return pd.DataFrame.from_dict({
'group': [a, a, b, b, b, b, c, c, c, c],
'resps': [1, 2, 2, 3, 1, 1, 2, 3, 1, 2],
})
@pytest.fixture
def sample_dbp_odd(sample_data_odd):
return _DivergentBarPlotter(sample_data_odd, 'group', 'resps')
@pytest.fixture
def sample_dbp_even(sample_data_even):
return _DivergentBarPlotter(sample_data_even, 'group', 'resps')
class TestDivergentBarPlotter():
def test_midpoint_default_even(self, sample_dbp_even):
assert sample_dbp_even.midpoint == 2.5
def test_midpoint_default_odd(self, sample_dbp_odd):
assert sample_dbp_odd.midpoint == 2.0
def test_response_label_default(self, sample_dbp_even):
sample_dbp_even.response_labels == {1: 1, 2: 2, 3: 3, 4: 4}
def test_compute_bar_sizes_even(self, sample_dbp_even):
actual = sample_dbp_even._compute_bar_sizes()
expected = pd.DataFrame([
{'resps': 1, 'a': -1.00, 'b': -0.50, 'c': -0.50},
{'resps': 2, 'a': -0.50, 'b': -0.25, 'c': -0.50},
{'resps': 4, 'a': +0.00, 'b': +0.50, 'c': +0.50},
{'resps': 3, 'a': +0.00, 'b': +0.25, 'c': +0.50},
])
expected = expected.set_index('resps', drop=True).rename_axis('group', axis='columns')
assert_frame_equal(actual, expected)
def test_compute_bar_sizes_odd(self, sample_dbp_odd):
actual = sample_dbp_odd._compute_bar_sizes()
expected = pd.DataFrame([
{'resps': 1, 'a': -0.75, 'b': -0.625, 'c': -0.50},
{'resps': 2, 'a': -0.25, 'b': -0.125, 'c': -0.25},
{'resps': 3, 'a': +0.25, 'b': +0.375, 'c': +0.50},
{'resps': 2, 'a': +0.25, 'b': +0.125, 'c': +0.25},
])
expected = expected.set_index('resps', drop=True).rename_axis('group', axis='columns')
| assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
import datetime
import pandas as pd
import plotly.express as px
import streamlit as st
def clean_dataframe(df):
df = df.drop(columns=[0])
df.rename(
columns={
1: "errand_date",
2: "scrape_time",
3: "rekyl_id",
4: "status",
5: "reporter",
6: "apartment",
7: "kategori",
8: "detaljer",
},
inplace=True,
)
return df
def reformat_dataframe(cleaned_df):
reformat_df = (
cleaned_df.groupby(["rekyl_id", "status", "kategori", "reporter", "detaljer"])
.agg({"scrape_time": "min", "errand_date": "min"})
.sort_values(by=["scrape_time"], ascending=False)
.reset_index()
)
reformat_df["scrape_time"] = pd.to_datetime(reformat_df["scrape_time"])
reformat_df["errand_date"] = pd.to_datetime(reformat_df["errand_date"])
return reformat_df
def add_info_flags(reform_df):
pivoted = reform_df.pivot(
values=["scrape_time"],
index=["rekyl_id", "errand_date", "kategori", "reporter", "detaljer"],
columns=["status"],
).reset_index()
pivoted["time_to_complete"] = (
pivoted["scrape_time"]["Avslutad"] - pivoted["errand_date"]
).dt.days
pivoted["is_completed"] = pivoted.apply(
lambda row: "No" if | pd.isnull(row.scrape_time.Avslutad) | pandas.isnull |
import scanpy as sc
import numpy as np
import scipy as sp
from skmisc.loess import loess
from statsmodels.stats.multitest import multipletests
from scipy.stats import rankdata
import pandas as pd
import time
def score_cell(data,
gene_list,
gene_weight=None,
suffix='',
ctrl_opt='mean_match',
trs_opt='vst',
bc_opt='empi',
ctrlgene_list=None,
n_ctrl=1,
n_genebin=200,
cov_list=None,
random_seed=0,
verbose=False,
copy=False,
return_list=['trs_ep', 'trs_ez']):
"""Score cells based on the trait gene set
Args
----
data (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list.
If gene_weight=None, the weigts are set to be one.
suffix : str
The name of the added cell-level annotations would be
['trs', 'trs_z', 'trs_tp', 'trs_ep', 'trs_ez']+suffix
ctrl_opt : str
Option for selecting the null geneset
None: not using a null geneset
'random': size matched random geneset
'mean_match' size-and-mean-matched random geneset
'mean_bvar_match': size-and-mean-and-bvar-matched random geneset. bvar means biological variance.
trs_opt : str
Option for computing TRS
'mean': average over the genes in the gene_list
'vst': weighted average with weights equal to 1/sqrt(technical_variance_of_logct)
'inv_std': weighted average with weights equal to 1/std
bc_opt : str
Option for cell-wise background correction
None: no correction.
'recipe_vision': normalize by cell-wise mean&var computed using all genes.
'empi': normalize by cell-wise mean&var stratified by mean bins.
ctrlgene_list (n_ctrl_gene) : list
List of control genes to use
n_ctrl : int
Number of control genesets
n_genebin : int
Number of gene bins (to divide the genes by expression)
Only useful when ctrl_opt is not None
cov_list : list of str
Covariates to control for.
The covariates are first centered and then regressed out.
Elements in cov_list should be present in data.obs.columns
random_seed : int
Random seed
copy : bool
If to make copy of the AnnData object
return_list : list
Items to return
Should be a subset of ['trs', 'trs_z', 'trs_tp', 'trs_ep', 'trs_ez']
Returns
-------
adata (n_cell, n_gene) : AnnData
Columns added to data.obs as specified by return_list
"""
np.random.seed(random_seed)
adata = data.copy() if copy else data
# Pre-compute statistics
var_set = set(['mean','var','var_tech'])
obs_set = set(['mean','var'])
if (len(var_set-set(adata.var.columns))>0) | (len(obs_set-set(adata.obs.columns))>0):
if verbose: print('# score_cell: recompute statistics using method.compute_stats')
compute_stats(adata)
# Check options
ctrl_opt_list = [None, 'given', 'random', 'mean_match', 'mean_bvar_match']
trs_opt_list = ['mean', 'vst', 'inv_std']
bc_opt_list = [None, 'recipe_vision', 'empi']
if ctrl_opt not in ctrl_opt_list:
raise ValueError('# score_cell: ctrl_opt not in [%s]'%', '.join([str(x) for x in ctrl_opt_list]))
if trs_opt not in trs_opt_list:
raise ValueError('# score_cell: trs_opt not in [%s]'%', '.join([str(x) for x in trs_opt_list]))
if bc_opt not in bc_opt_list:
raise ValueError('# score_cell: bc_opt not in [%s]'%', '.join([str(x) for x in bc_opt_list]))
if cov_list is not None:
temp_list = list(set(cov_list) - set(adata.obs.columns))
if len(temp_list)>0:
raise ValueError('# score_cell: covariates %s not in data.obs.columns'%','.join(temp_list))
if (len(cov_list)>0) & ('mean' not in cov_list):
raise ValueError('# score_cell: mean needs to be in cov_list')
if verbose:
print('# score_cell: suffix=%s, ctrl_opt=%s, trs_opt=%s, bc_opt=%s'%(suffix, ctrl_opt, trs_opt, bc_opt))
print('# score_cell: n_ctrl=%d, n_genebin=%d'%(n_ctrl, n_genebin))
# Gene-wise statistics
df_gene = pd.DataFrame(index=adata.var_names)
df_gene['gene'] = df_gene.index
df_gene['mean'] = adata.var['mean']
df_gene['var'] = adata.var['var'].values
df_gene['tvar'] = adata.var['var_tech'].values
df_gene['bvar'] = df_gene['var'].values - df_gene['tvar'].values
df_gene.drop_duplicates(subset='gene', inplace=True)
# Update gene_list
gene_list = list(gene_list)
n_gene_old = len(gene_list)
df_trait_gene = pd.DataFrame(index=gene_list, columns=['gene', 'gene_weight'], data=0)
df_trait_gene['gene'] = df_trait_gene.index
df_trait_gene['gene_weight'] = 1 if gene_weight is None else np.array(gene_weight)
df_trait_gene.drop_duplicates(subset='gene', inplace=True)
gene_list = list(set(df_gene['gene'].values) & set(gene_list))
gene_list.sort()
df_trait_gene = df_trait_gene.loc[gene_list].copy()
gene_weight = df_trait_gene['gene_weight'].values.copy()
if verbose:
print('# score_cell: %-15s %-15s %-20s'
%('trait geneset,', '%d/%d genes,'%(len(gene_list),n_gene_old),
'mean_exp=%0.2e'%df_gene.loc[gene_list, 'mean'].mean()))
# Select control genes: put all methods in _select_ctrl_geneset
dic_ctrl_list,dic_ctrl_weight = _select_ctrl_geneset(df_gene,
gene_list, gene_weight,
ctrl_opt, ctrlgene_list,
n_ctrl, n_genebin,
random_seed, verbose)
# Compute TRS: put all methods in _compute_trs
dic_trs = {}
dic_trs['trs'] = _compute_trs(adata, gene_list, gene_weight, trs_opt, cov_list=cov_list)
for i_list in dic_ctrl_list.keys():
dic_trs['trs_ctrl%d'%i_list] = _compute_trs(adata,
dic_ctrl_list[i_list],
dic_ctrl_weight[i_list],
trs_opt, cov_list=cov_list)
# Correct cell-specific and geneset-specific background: put all methods in _correct_background
_correct_background(adata, dic_trs, bc_opt)
# Get p-value
if 'trs_tp' in return_list:
dic_trs['trs_tp'] = 1 - sp.stats.norm.cdf(dic_trs['trs_z'])
if len(dic_ctrl_list.keys())>0:
v_ctrl_trs_z = []
for i_list in dic_ctrl_list.keys():
v_ctrl_trs_z += list(dic_trs['trs_ctrl%d_z'%i_list])
dic_trs['trs_ep'] = get_p_from_empi_null(dic_trs['trs_z'], v_ctrl_trs_z)
if 'trs_ez' in return_list:
dic_trs['trs_ez'] = -sp.stats.norm.ppf(dic_trs['trs_ep'])
dic_trs['trs_ez'] = dic_trs['trs_ez'].clip(min=-10,max=10)
for term in return_list:
if term in dic_trs.keys():
adata.obs['%s%s'%(term,suffix)] = dic_trs[term].copy()
else:
print('# score_cell: %s not computed'%term)
return adata if copy else None
def _select_ctrl_geneset(input_df_gene, gene_list, gene_weight,
ctrl_opt, ctrlgene_list,
n_ctrl, n_genebin, random_seed, verbose):
"""Subroutine for score_cell, select control genesets
Args
----
input_df_gene (adata.shape[1], n_statistic) : pd.DataFrame
Gene-wise statistics
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list.
ctrl_opt : str
Option for selecting the null geneset
None: not using a null geneset
'random': size matched random geneset
'mean_match' size-and-mean-matched random geneset
'mean_bvar_match': size-and-mean-and-bvar-matched random geneset. bvar means biological variance.
ctrlgene_list (n_ctrl_gene) : list
List of control genes to use
n_ctrl : int
Number of control genesets
n_genebin : int
Number of gene bins (to divide the genes by expression)
Only useful when ctrl_opt is not None
random_seed : int
Random seed
Returns
-------
dic_ctrl_list : dictionary
dic_ctrl_list[i]: the i-th control gene list (a list)
dic_ctrl_weight : dictionary
dic_ctrl_weight[i]: weights for the i-th control gene list (a list)
"""
np.random.seed(random_seed)
df_gene = input_df_gene.copy()
gene_list = list(gene_list)
df_trait_gene = pd.DataFrame(index=gene_list, columns=['gene', 'gene_weight'], data=0)
df_trait_gene['gene'] = df_trait_gene.index
df_trait_gene['gene_weight'] = list(gene_weight)
dic_ctrl_list = {}
dic_ctrl_weight = {}
if ctrl_opt=='given':
dic_ctrl_list[0] = ctrlgene_list
dic_ctrl_weight[0] = np.ones(len(ctrlgene_list))
if ctrl_opt=='random':
for i_list in np.arange(n_ctrl):
ind_select = np.random.permutation(df_gene.shape[0])[:len(gene_list)]
dic_ctrl_list[i_list] = list(df_gene['gene'].values[ind_select])
dic_ctrl_weight[i_list] = df_trait_gene['gene_weight'].values.copy()
if ctrl_opt=='mean_match':
# Divide genes into bins based on their rank of mean expression
df_gene['qbin'] = pd.qcut(df_gene['mean'], q=n_genebin, labels=False)
df_gene_bin = df_gene.groupby('qbin').agg({'gene':set})
gene_list_as_set = set(gene_list)
for i_list in np.arange(n_ctrl):
dic_ctrl_list[i_list] = []
dic_ctrl_weight[i_list] = []
for bin_ in df_gene_bin.index:
temp_overlap_list = list(df_gene_bin.loc[bin_,'gene'] & gene_list_as_set)
temp_overlap_list.sort()
n_gene_in_bin = len(temp_overlap_list)
if n_gene_in_bin>0:
temp_list = list(df_gene_bin.loc[bin_, 'gene'])
temp_list.sort()
v_gene_bin = np.array(temp_list)
ind_select = np.random.permutation(v_gene_bin.shape[0])[0:n_gene_in_bin]
dic_ctrl_list[i_list] += list(v_gene_bin[ind_select])
dic_ctrl_weight[i_list] += list(df_trait_gene.loc[temp_overlap_list,'gene_weight'].values)
if ctrl_opt=='mean_bvar_match':
# Divide genes into bins based on their rank of mean expression and biological variance
n_qbin = int(np.ceil(np.sqrt(n_genebin)))
df_gene['mean_qbin'] = pd.qcut(df_gene['mean'], q=n_qbin, labels=False)
df_gene['qbin'] = ''
for bin_ in set(df_gene['mean_qbin']):
ind_select = (df_gene['mean_qbin']==bin_)
df_gene.loc[ind_select,'qbin'] = ['%d.%d'%(bin_,x) for x in pd.qcut(df_gene.loc[ind_select,'bvar'],
q=n_qbin, labels=False)]
df_gene_bin = df_gene.groupby('qbin').agg({'gene':set})
gene_list_as_set = set(gene_list)
for i_list in np.arange(n_ctrl):
dic_ctrl_list[i_list] = []
dic_ctrl_weight[i_list] = []
for bin_ in df_gene_bin.index:
temp_overlap_list = list(df_gene_bin.loc[bin_,'gene'] & gene_list_as_set)
temp_overlap_list.sort()
n_gene_in_bin = len(temp_overlap_list)
if n_gene_in_bin>0:
temp_list = list(df_gene_bin.loc[bin_, 'gene'])
temp_list.sort()
v_gene_bin = np.array(temp_list)
ind_select = np.random.permutation(v_gene_bin.shape[0])[0:n_gene_in_bin]
dic_ctrl_list[i_list] += list(v_gene_bin[ind_select])
dic_ctrl_weight[i_list] += list(df_trait_gene.loc[temp_overlap_list,'gene_weight'].values)
if verbose:
for i_list in dic_ctrl_list.keys():
print('# score_cell: %-15s %-15s %-20s'
%('ctrl%d geneset,'%i_list, '%d genes,'%len(dic_ctrl_list[i_list]),
'mean_exp=%0.2e'%df_gene.loc[dic_ctrl_list[i_list], 'mean'].mean()))
return dic_ctrl_list,dic_ctrl_weight
def _compute_trs(adata, gene_list, gene_weight, trs_opt, cov_list=None):
"""Compute TRS
Args
----
adata (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list
trs_opt : str
Option for computing TRS
'mean': average over the genes in the gene_list
'vst': weighted average with weights equal to 1/sqrt(technical_variance_of_logct)
'inv_std': weighted average with weights equal to 1/std
Returns
-------
v_trs (n_cell,) : np.ndarray
Raw TRS
"""
gene_list = list(gene_list)
gene_weight = np.ones(len(gene_list)) if gene_weight is None else np.array(gene_weight)
if trs_opt=='mean':
v_trs_weight = np.ones(len(gene_list))
v_trs_weight *= gene_weight
v_trs_weight /= v_trs_weight.sum()
temp_v = adata[:, gene_list].X.dot(v_trs_weight)
v_trs = np.array(temp_v, dtype=np.float64).reshape([-1])
if trs_opt=='vst':
# v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var_tech'].values.clip(min=1e-1))
v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var_tech'].values.clip(min=1e-2))
v_trs_weight *= gene_weight
v_trs_weight /= v_trs_weight.sum()
temp_v = adata[:, gene_list].X.dot(v_trs_weight)
v_trs = np.array(temp_v, dtype=np.float64).reshape([-1])
if trs_opt=='inv_std':
# v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var'].values.clip(min=1e-1))
v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var'].values.clip(min=1e-2))
v_trs_weight *= gene_weight
v_trs_weight /= v_trs_weight.sum()
temp_v = adata[:, gene_list].X.dot(v_trs_weight)
v_trs = np.array(temp_v, dtype=np.float64).reshape([-1])
# Regress out covariates if needed
if cov_list is not None:
mat_X = adata.obs[cov_list].values.copy()
mat_X = mat_X - mat_X.mean(axis=0)
v_trs = _reg_out(v_trs, mat_X)
return v_trs
def _reg_out(mat_Y, mat_X):
"""Regress mat_X out of mat_Y
Args
----
mat_Y (n_sample, n_response) : np.ndarray
Response variable
mat_X (n_sample, n_covariates) : np.ndarray
Covariates
Returns
-------
mat_Y_resid (n_sample, n_response) : np.ndarray
Response variable residual
"""
mat_X = np.array(mat_X)
if len(mat_X.shape)==1:
mat_X = mat_X.reshape([-1,1])
mat_Y = np.array(mat_Y)
if len(mat_Y.shape)==1:
mat_Y = mat_Y.reshape([-1,1])
n_sample = mat_Y.shape[0]
mat_xtx = np.dot(mat_X.T, mat_X)/n_sample
mat_xty = np.dot(mat_X.T, mat_Y)/n_sample
mat_coef = np.linalg.solve(mat_xtx, mat_xty)
mat_Y_resid = mat_Y - mat_X.dot(mat_coef)
if mat_Y_resid.shape[1]==1:
mat_Y_resid = mat_Y_resid.reshape([-1])
return mat_Y_resid
def _correct_background(adata, dic_trs, bc_opt):
"""Cell-wise and gene-wise background correction
Args
----
adata (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
dic_trs : dictionary
Each element has dimension (n_cell,)
Trait TRS and control TRSs
bc_opt : str
Option for cell-wise background correction
None: no correction.
'recipe_vision': normalize by cell-wise mean&var computed using all genes.
'empi': normalize by cell-wise mean&var stratified by mean bins.
Returns
-------
Add trs_z and trs_ctrl%d_z to dic_trs (n_cell,) : np.ndarray
Normalized TRS z_score
"""
# Cell-specific background correction
trs_ctrl_list = [x for x in dic_trs if 'ctrl' in x]
v_mean,v_std = adata.obs['mean'].values,np.sqrt(adata.obs['var'].values)
n_cell = adata.shape[0]
if bc_opt is None:
for trs_name in ['trs']+trs_ctrl_list:
dic_trs['%s_z'%trs_name] = dic_trs[trs_name]
if bc_opt == 'recipe_vision':
for trs_name in ['trs']+trs_ctrl_list:
dic_trs['%s_z'%trs_name] = (dic_trs[trs_name] - v_mean) / v_std
if bc_opt == 'empi':
# Using TRSs to estimate empirical cell-specific background TRS mean&std
if len(trs_ctrl_list)==0:
raise ValueError('# score_cell: bc_opt=%s only works when n_ctrl>0'%bc_opt)
df_cell = None
for trs_name in ['trs']+trs_ctrl_list:
temp_df = pd.DataFrame()
temp_df['mean'] = v_mean
temp_df['trs'] = dic_trs[trs_name]
if df_cell is None:
df_cell = temp_df.copy()
else:
df_cell = | pd.concat([df_cell, temp_df], axis=0) | pandas.concat |
from flask import Flask, render_template, request, Response, send_file
import matplotlib
import io
import base64
from PIL import Image
from textwrap import wrap
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.colors as pltcolors
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from mpl_toolkits.axes_grid1 import make_axes_locatable
matplotlib.use('Agg')
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def mapping():
content = request.json
# files
file_sin_antartida = 'data/provincias_sin_antartida.geojson'
file_con_antartida = 'data/provincias_con_antartida.geojson'
# dataframes
df_data = pd.DataFrame(content['data'])
df_data['id'] = df_data[content['provincia']].astype(str)
df_data[content['datos']] = df_data[content['datos']].astype(int)
gdf_sin_antartida = gpd.read_file(file_sin_antartida)
# si tengo antártida como option lo levanto
if content['antartida']:
gdf_con_antartida = gpd.read_file(file_con_antartida)
df_data_con_antartidad = pd.merge(
gdf_con_antartida, df_data, right_on='id', left_on='id')
df_data = pd.merge(gdf_sin_antartida, df_data, right_on='id', left_on='id')
# color settings
cmap_colors = pltcolors.LinearSegmentedColormap.from_list(
"", content['colors'])
# big plot
f, ax = plt.subplots(figsize=(30, 13))
df_data.plot(ax=ax, column=content['datos'],
cmap=cmap_colors,
figsize=(30, 13),
edgecolor="grey",
linewidth=0.4,
legend=content['legend'],
scheme="userdefined",
classification_kwds={'bins': [float(i) for i in content['classification']]})
plt.title(content['title'])
# legend
if content['legend']:
leg = ax.get_legend()
leg.set_bbox_to_anchor((1.4, 0.1))
# datatable
if content['datatable']:
col_labels = ['Provincia', "\n".join(wrap(content['datos'], 20))]
df_table = df_data[['name', content['datos']]].sort_values(
by='name', ascending=True).head(25)
table_vals = df_table.values.tolist()
the_table = plt.table(cellText=table_vals,
colWidths=[0]*len(table_vals),
colLabels=col_labels,
loc='right', zorder=3)
the_table.auto_set_column_width(col=list(range(len(table_vals))))
the_table.set_fontsize(14)
the_table.scale(1, 1.7)
# CABA PLOT
ax_caba = zoomed_inset_axes(ax, 10, loc=7)
minx,miny,maxx,maxy = df_data.query('id == "02"').total_bounds
ax_caba.set_xlim(minx, maxx)
ax_caba.set_ylim(miny, maxy)
df_data.plot(
ax=ax_caba,
column=content['datos'],
cmap=cmap_colors,
figsize=(30, 13),
edgecolor="black",
linewidth=0.4,
scheme="userdefined",
classification_kwds={'bins': [int(float(i)) for i in content['classification']]}
)
dual_ax = mark_inset(ax, ax_caba, loc1=1, loc2=1, fc="none", ec="0.5")
# chart settings
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax_caba.axes.xaxis.set_ticklabels([])
ax_caba.axes.yaxis.set_ticklabels([])
# save file to bytess
arg_file = io.BytesIO()
plt.savefig(arg_file, format='png', bbox_inches='tight')
if content['antartida']:
f, ax_tdf = plt.subplots(figsize=(2, 2))
df_data_con_antartidad.plot(
ax=ax_tdf,
column=content['datos'],
cmap=cmap_colors,
figsize=(2, 2),
edgecolor="grey",
linewidth=0.4,
scheme="userdefined",
classification_kwds={'bins': [int(float(i))
for i in content['classification']]}
)
minx, miny, maxx, maxy = df_data_con_antartidad.query('id == "94"').total_bounds
ax_tdf.set_xlim(minx, maxx)
ax_tdf.set_ylim(miny, maxy)
plt.setp(ax_tdf.get_xticklabels(), visible=False)
plt.setp(ax_tdf.get_yticklabels(), visible=False)
# save file
tdf_file = io.BytesIO()
plt.savefig(tdf_file, format='png', bbox_inches='tight')
# merge pngs
background = Image.open(arg_file)
foreground = Image.open(tdf_file)
background.paste(foreground, (350, 823), foreground)
arg_file = io.BytesIO()
background.save(arg_file, format='PNG')
data = base64.encodebytes(arg_file.getvalue())
# freewilly
del df_data
del gdf_sin_antartida
try:
del gdf_con_antartida
del df_data_con_antartidad
except:
pass
return data
@app.route('/process-old', methods=['POST'])
def mapping_old():
content = request.json
# dataframes
df = pd.DataFrame(content['data'])
df['id'] = df[content['provincia']]
file = 'data/provincias_sin_antartida.geojson'
gdf = gpd.read_file(file)
df = | pd.merge(gdf, df, right_on='id', left_on='id') | pandas.merge |
# -*- coding: utf-8 -*-
from spider.https import Http
from spider.jsonparse import JsonParse
from spider.setting import headers
from spider.setting import cookies
import time
import logging
import pandas as pd
from bs4 import BeautifulSoup
class Spider:
def __init__(self,kdList, cityList):
self.kdList = kdList
self.cityList = cityList
self.url = 'https://www.lagou.com/jobs/positionAjax.json'
self.df = pd.DataFrame()
def getInfo(self, url, para):
"""
获取信息
"""
generalHttp = Http()
htmlCode = generalHttp.post(url, para=para, headers=headers, cookies=cookies)
generalParse = JsonParse(htmlCode)
pageCount = generalParse.parsePage()
for i in range(1, pageCount + 1):
print('第%s页' % i)
para['pn'] = str(i)
htmlCode = generalHttp.post(url, para=para, headers=headers, cookies=cookies)
generalParse = JsonParse(htmlCode)
df2 = generalParse.parseInfo()
self.df = | pd.concat([self.df, df2], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 10:31:31 2021
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 11:25:22 2021
@author: Administrator
"""
import h5py
# from pyram.PyRAM import PyRAM
from scipy import interpolate
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import glob
import os
import sys
os.chdir(r'D:\passive_acoustics\propagation_modelling')
import gsw
from netCDF4 import Dataset
import pandas as pd
import cartopy
import cartopy.crs as ccrs
from scipy.ndimage import gaussian_filter
import arlpy.uwapm as pm
modelfrec=500
# load data and slice out region of interest
# read mapdata
latlim=[-62,-56]
lonlim=[-(46+5),-(46-5)]
spacer=1
gebcofile=r"C:\Users\a5278\Documents\gebco_2020_netcdf\GEBCO_2020.nc"
gebco = Dataset(gebcofile, mode='r')
g_lons = gebco.variables['lon'][:]
g_lon_inds = np.where((g_lons>=lonlim[0]) & (g_lons<=lonlim[1]))[0]
# jump over entries to reduce data
g_lon_inds=g_lon_inds[::spacer]
g_lons = g_lons[g_lon_inds]
g_lats = gebco.variables['lat'][:]
g_lat_inds = np.where((g_lats>=latlim[0]) & (g_lats<=latlim[1]))[0]
# jump over entries to reduce data
g_lat_inds=g_lat_inds[::spacer]
g_lats = g_lats[g_lat_inds]
d = gebco.variables['elevation'][g_lat_inds, g_lon_inds]
gebco.close()
#%% get bathymetry slices
import pyresample
lo,la=np.meshgrid(g_lons, g_lats)
grid = pyresample.geometry.GridDefinition(lats=la, lons=lo)
m_loc=[-( 45+57.548/60) , -(60+24.297/60)]
from pyproj import Geod
geod = Geod("+ellps=WGS84")
bearings=np.arange(360)
bathy_dict={}
points_lat=pd.DataFrame()
points_lon=pd.DataFrame()
for b in bearings:
print(b)
points = geod.fwd_intermediate(lon1=m_loc[0],lat1=m_loc[1],azi1=b,npts=500,del_s=1000 )
p_lon=points[3]
p_lat=points[4]
points_lat=pd.concat( [points_lat,pd.DataFrame(p_lat)],ignore_index=True,axis=1 )
points_lon=pd.concat( [points_lon,pd.DataFrame(p_lon)],ignore_index=True,axis=1 )
swath = pyresample.geometry.SwathDefinition(lons=p_lon, lats=p_lat)
# Determine nearest (w.r.t. great circle distance) neighbour in the grid.
_, _, index_array, distance_array = pyresample.kd_tree.get_neighbour_info(
source_geo_def=grid, target_geo_def=swath, radius_of_influence=500000,
neighbours=1)
# get_neighbour_info() returns indices in the flattened lat/lon grid. Compute
# the 2D grid indices:
index_array_2d = np.unravel_index(index_array, grid.shape)
value = d[index_array_2d[0],index_array_2d[1]]
dvec=np.arange(0,1000*500,1000)
bb=np.transpose(np.array([dvec,-value.data]))
bathy_dict[b]= bb.tolist()
timevec = pd.Series( pd.date_range(start=pd.Timestamp('2016-01-01'),end=pd.Timestamp('2017-01-01'),freq='M') )
#%%
datestr=timevec[7]
tl_mat_dict={}
tl_map_dict={}
for datestr in timevec:
ncfile=r"D:\copernicus_data\\" + datestr.strftime('%Y-%m-%d') + r"_ocean_reanalysis.nc"
nc = Dataset(ncfile)
la,lo=np.meshgrid(nc['latitude'][:].data, nc['longitude'][:].data)
grid = pyresample.geometry.GridDefinition(lats=la, lons=lo)
m_loc=[-( 45+57.548/60) , -(60+24.297/60)]
geod = Geod("+ellps=WGS84")
bearings=np.arange(360)
z_ss_dict={}
rp_ss_dict={}
cw_dict={}
points_lat=pd.DataFrame()
points_lon=pd.DataFrame()
tl_mat_ray=pd.DataFrame()
lat_mat_ray=pd.DataFrame()
lon_mat_ray=pd.DataFrame()
for b in bearings:
print(b)
points = geod.fwd_intermediate(lon1=m_loc[0],lat1=m_loc[1],azi1=b,npts=500,del_s=1000 )
p_lon=points[3]
p_lat=points[4]
points_lat=pd.concat( [points_lat,pd.DataFrame(p_lat)],ignore_index=True,axis=1 )
points_lon=pd.concat( [points_lon,pd.DataFrame(p_lon)],ignore_index=True,axis=1 )
swath = pyresample.geometry.SwathDefinition(lons=p_lon, lats=p_lat)
# Determine nearest (w.r.t. great circle distance) neighbour in the grid.
_, _, index_array, distance_array = pyresample.kd_tree.get_neighbour_info(
source_geo_def=grid, target_geo_def=swath, radius_of_influence=500000,
neighbours=1)
# get_neighbour_info() returns indices in the flattened lat/lon grid. Compute
# the 2D grid indices:
index_array_2d = np.unravel_index(index_array, grid.shape)
temp = nc['thetao'][:][0,:,index_array_2d[1],index_array_2d[0]]
sal = nc['so'][:][0,:,index_array_2d[1],index_array_2d[0] ]
depth=nc['depth'][:]
depth_mat=np.tile( depth, [sal.shape[0],1] )
# depth.shape
sound_speed = gsw.sound_speed(sal,temp,depth_mat)
sound_speed = pd.DataFrame( sound_speed.data )
sound_speed=sound_speed.fillna(axis=1,method='ffill')
# fig=plt.figure(num=6)
# plt.clf()
# plt.imshow(np.transpose(sound_speed.values[:,:]),aspect='auto')
# plt.pcolormesh(dvec,-depth,np.transpose(sound_speed.values))
# plt.boxplot((sound_speed.values))
dvec=np.arange(0,1000*500,1000)
# ssp2 = sound_speed.astype('int')
sspdic={}
i=0
dd=dvec.copy()
dd[-1]=dvec[-1]*10
for rang in dd:
sspdic[rang]= sound_speed.iloc[i,:].values
i=i+1
ssp2= | pd.DataFrame(sspdic) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
#import statsmodels as stat
#import statsmodels.formula.api as smf
#import statsmodels.api as sm
#import matplotlib.pyplot as plt
#import nibabel.gifti as gio
#from statsmodels.stats.outliers_influence import OLSInfluence
from itertools import product,combinations
from numpy import isnan, nan, logical_not, logical_or
from collections import Counter
#from graphpype.utils_stats import compute_oneway_anova_fwe,compute_pairwise_ttest_fdr
####################################### utils
def glob_natural_sorted(reg_exp):
print(reg_exp)
files = glob.glob(reg_exp)
print(len(files))
natural_sorted_files = [reg_exp.replace('*',str(i),-1) for i in range(len(files))]
return natural_sorted_files,list(range(len(files)))
######################################## gather rada ######################################
def compute_rada_df(iter_path,df,radatools_version = "3.2", mapflow = [],mapflow_name = ""):
from graphpype.utils_mod import get_modularity_value_from_lol_file
from graphpype.utils_mod import get_values_from_global_info_file
from graphpype.utils_mod import get_path_length_from_info_dists_file
if radatools_version == "3.2":
net_prop_dir = "net_prop"
elif radatools_version == "4.0":
net_prop_dir = "prep_rada"
else:
print("Warning, could not find radatools_version {}".format(radatools_version))
return
########### modularity
if len(mapflow) == 0:
modularity_file = os.path.join(iter_path,"community_rada","Z_List.lol")
print(modularity_file)
if os.path.exists(modularity_file):
mod_val = get_modularity_value_from_lol_file(modularity_file)
#else:
#mod_val = np.nan
print(mod_val)
df['Modularity'] = mod_val
print(df)
#################### info_global
global_info_file = os.path.join(iter_path,net_prop_dir,"Z_List-info_global.txt")
print(global_info_file)
if os.path.exists(global_info_file):
global_info_values = get_values_from_global_info_file(global_info_file)
print(global_info_values)
df.update(global_info_values)
print(df)
##################### info_dists
path_length_file = os.path.join(iter_path,net_prop_dir,"Z_List-info_dists.txt")
print(path_length_file)
if os.path.exists(path_length_file):
mean_path_length,diameter,global_efficiency = get_path_length_from_info_dists_file(path_length_file)
print(mean_path_length,diameter)
df['Mean_path_length'] = str(mean_path_length)
df['Diameter'] = str(diameter)
df['Global_efficiency'] = str(global_efficiency)
else:
df['Modularity'] = []
df[mapflow_name] = []
df['Mean_path_length'] = []
df['Diameter'] = []
df['Global_efficiency'] = []
for i,cond in enumerate(mapflow):
df[mapflow_name].append(cond)
modularity_file = os.path.join(iter_path,"community_rada","mapflow","_community_rada"+str(i),"Z_List.lol")
print(modularity_file)
if os.path.exists(modularity_file):
mod_val = get_modularity_value_from_lol_file(modularity_file)
#else:
#mod_val = np.nan
print(mod_val)
df['Modularity'].append(mod_val)
else:
df['Modularity'].append(np.nan)
print(df)
#################### info_global
global_info_file = os.path.join(iter_path,net_prop_dir,"mapflow","_" + net_prop_dir+str(i),"Z_List-info_global.txt")
print(global_info_file)
if os.path.exists(global_info_file):
global_info_values = get_values_from_global_info_file(global_info_file)
print(global_info_values)
print(global_info_values.items())
for key,value in global_info_values.items():
print(key,value)
if not key in list(df.keys()):
df[key] = []
df[key].append(value)
print(df)
##################### info_dists
path_length_file = os.path.join(iter_path,net_prop_dir,"mapflow","_" + net_prop_dir+str(i),"Z_List-info_dists.txt")
print(path_length_file)
if os.path.exists(path_length_file):
mean_path_length,diameter,global_efficiency = get_path_length_from_info_dists_file(path_length_file)
print(mean_path_length,diameter)
df['Mean_path_length'].append(str(mean_path_length))
df['Diameter'].append(str(diameter))
df['Global_efficiency'].append(str(global_efficiency))
else:
df['Mean_path_length'].append(str(np.nan))
df['Diameter'].append(str(np.nan))
df['Global_efficiency'].append(str(np.nan))
print(df)
def compute_nodes_rada_df(local_dir,gm_coords,coords_file,labels_file,radatools_version = "3.2"):
from graphpype.utils_net import read_lol_file,read_Pajek_corres_nodes
from graphpype.utils_dtype_coord import where_in_coords
import glob
if radatools_version == "3.2":
net_prop_dir = "net_prop"
elif radatools_version == "4.0":
net_prop_dir = "prep_rada"
#### Z_List
#Pajek_files = glob.glob(os.path.join(local_dir,net_prop_dir,"*.net"))
#assert len(Pajek_files) == 1, "Error, no .net file found in {} prep_rada".format(local_dir)
#if len(Pajek_files) == 1:
#Pajek_file = Pajek_files[0]
list_df = []
Pajek_file = os.path.join(local_dir,"prep_rada","Z_List.net")
if os.path.exists(coords_file) and os.path.exists(Pajek_file) and os.path.exists(labels_file):
#### labels
labels = np.array([line.strip() for line in open(labels_file)], dtype = str)
#### MNI coordinates
coords = np.array(np.loadtxt(coords_file),dtype = int)
print(coords.shape)
#### nodes in the connected graph
node_corres = read_Pajek_corres_nodes(Pajek_file)
print(np.min(node_corres),np.max(node_corres))
print(node_corres.shape)
### node_coords
node_coords = coords[node_corres,:]
print(node_coords.shape)
node_labels = labels[node_corres].reshape(-1,1)
print(node_labels.shape)
### where_in_gm_mask
where_in_gm_mask = where_in_coords(node_coords,gm_coords)
where_in_gm_mask = where_in_gm_mask.reshape(where_in_gm_mask.shape[0],1)
#print where_in_gm_mask
print(where_in_gm_mask.shape)
list_df.append(pd.DataFrame(np.concatenate((where_in_gm_mask,node_labels,node_coords),axis = 1),columns = ['Where_in_GM_mask','labels','MNI_x','MNI_y','MNI_z']))
else:
#print "Missing {},{} or {}".format(Pajek_file,coords_file,labels_file)
if not os.path.exists(coords_file):
print("Missing {}".format(coords_file))
if not os.path.exists(Pajek_file):
print("Missing {}".format(Pajek_file))
if not os.path.exists(labels_file):
print("Missing {}".format(labels_file))
#and os.path.exists(Pajek_file) and os.path.exists(labels_file):
#### info nodes
#info_nodes_files = glob.glob(os.path.join(local_dir,net_prop_dir,"*-info_nodes.txt"))
#if len(info_nodes_files) == 1:
#info_nodes_file = info_nodes_files[0]
info_nodes_file = os.path.join(local_dir,net_prop_dir,"Z_List-info_nodes.txt")
print(info_nodes_file)
if os.path.exists(info_nodes_file) :
## loading info_nodes
df_node_info = pd.read_table(info_nodes_file)
print("Info nodes:")
print(df_node_info.shape)
list_df.append(df_node_info)
else:
print("Info nodes not found:")
print(info_nodes_file)
#### modules /community_vect
#partition_files = glob.glob(os.path.join(local_dir,"community_rada","*.lol"))
#if len(partition_files) == 1:
#partition_file = partition_files[0]
partition_file = os.path.join(local_dir,"community_rada","Z_List.lol")
if os.path.exists(partition_file) :
##loading partition_file
community_vect = read_lol_file(partition_file)
print("community_vect:")
print(community_vect.shape)
list_df.append(pd.DataFrame(community_vect,columns = ['Module']))
#### node roles
roles_file = os.path.join(local_dir,"node_roles","node_roles.txt")
part_coeff_file = os.path.join(local_dir,"node_roles","all_participation_coeff.txt")
Z_com_degree_file = os.path.join(local_dir,"node_roles","all_Z_com_degree.txt")
if os.path.exists(roles_file) and os.path.exists(part_coeff_file) and os.path.exists(Z_com_degree_file):
#### loding node roles
node_roles = np.array(np.loadtxt(roles_file),dtype = int)
print("node_roles:")
print(node_roles.shape)
print("part_coeff:")
part_coeff = np.loadtxt(part_coeff_file)
part_coeff = part_coeff.reshape(part_coeff.shape[0],1)
print(part_coeff.shape)
print("Z_com_degree:")
Z_com_degree = np.loadtxt(Z_com_degree_file)
Z_com_degree = Z_com_degree.reshape(Z_com_degree.shape[0],1)
print(Z_com_degree.shape)
list_df.append(pd.DataFrame(np.concatenate((node_roles,part_coeff,Z_com_degree),axis = 1),columns = ['Role_quality','Role_quantity','Participation_coefficient','Z_community_degree']))
return list_df
######################################## computing permutation-based stats per nodes, over several sheetnames #########################################
def compute_signif_permuts(permut_df, permut_col = "Seed",session_col = "Session", start_col = 0, stop_col = 0, columns = []):
"""
args:
compute significance of permutation over a df generated by gather_permuts
permut_df: original permutation results (pandas Dataframe)
stop_col: last column to be included (in fact, excluded except if value is 0, in this case goes to the last column of the df
return:
all_p_higher, all_p_lower: "vector of p_values obtained for 1 tail t-test in both direction, first session - second session"
"""
#print permut_df
################################################## check if permut_col exists and is consistent with permutation indexexing:
seed_index = np.unique(permut_df[permut_col].values)
print(seed_index)
### should start with -1
if not seed_index[0] == -1:
print("Error, permut_col {} should start with -1".format(permut_col))
#0/0
return pd.DataFrame()
expected_permut_indexes = list(range(len(seed_index)-1))
print(expected_permut_indexes)
### should start at 0 and have all values in between
if not all(x in seed_index[1:] for x in expected_permut_indexes):
print("Error, permut indexes should be consecutive and start with 0: {} ".format(expected_permut_indexes))
#0/0
#return pd.DataFrame()
nb_permuts = len(expected_permut_indexes)
print(nb_permuts)
################# selecting columns
if len(columns) != 0:
data_cols = columns
else:
if stop_col == 0:
data_cols = permut_df.columns[start_col:]
else:
data_cols = permut_df.columns[start_col:stop_col]
print(data_cols)
################## looping over selected columns
all_p_higher = np.zeros(shape = (len(data_cols)), dtype = 'float64') -1
all_p_lower = np.zeros(shape = (len(data_cols)), dtype = 'float64') -1
cols = []
if session_col == -1 or len(permut_df[session_col].unique()) == 1:
for index_col,col in enumerate(data_cols):
print(index_col,col)
sum_higher = np.sum((permut_df[col].iloc[1:] > permut_df[col].iloc[0]).values.astype(int))
all_p_higher[index_col] = (sum_higher+1)/float(permut_df[col].shape[0])
sum_lower = np.sum((permut_df[col].iloc[1:] < permut_df[col].iloc[0]).values.astype(int))
all_p_lower[index_col] = (sum_lower+1)/float(permut_df[col].shape[0])
#print permut_df[col]
#print permut_df[col].shape[0]
print(all_p_higher[index_col])
cols.append(str(col))
print(all_p_higher)
print(cols)
#df_res = pd.DataFrame(all_p_higher.reshape(1,-1),columns = cols)
#df_res.index = ["Higher"]
#print df_res
#return df_res
else:
### all unique values should have 2 different samples
count_elements = Counter(permut_df[permut_col].values)
print(count_elements)
### -1 should be represented Two times:
if not count_elements[-1] == 2:
print("-1 should be represented Two times")
return pd.DataFrame()
if not all(val == 2 for val in list(count_elements.values())):
print("Error, all permut indexes should have 2 and only 2 lines: {}".format(count_elements))
#0/0
#return pd.DataFrame()
################################################## computing diff df
for index_col,col in enumerate(data_cols):
print(index_col,col)
#print permut_df
print(permut_col,session_col)
df_col = permut_df.pivot(index = permut_col, columns = session_col, values = col)
print(df_col)
df_col["Diff"] = pd.to_numeric(df_col.iloc[:,0]) - pd.to_numeric(df_col.iloc[:,1])
print(df_col["Diff"])
non_nan_indexes, = np.where(np.isnan(df_col["Diff"]) == False)
print(non_nan_indexes)
diff_col = df_col["Diff"].values[non_nan_indexes]
if not all(val == 2 for val in list(count_elements.values())):
print("Error, all permut indexes should have 2 and only 2 lines: {}".format(count_elements))
print(diff_col)
print(diff_col.shape)
#assert df_col.shape[0] == 201, "Error with shape {}".format(df_col.shape)
if diff_col.shape[0] == 0:
all_p_higher[index_col] = np.nan
all_p_lower[index_col] = np.nan
cols.append(col)
continue
if diff_col[0] > 0:
sum_higher = np.sum(np.array(diff_col[1:] > diff_col[0],dtype = int))
print("sum_higher:",sum_higher)
all_p_higher[index_col] = (sum_higher+1)/float(df_col.shape[0])
elif diff_col[0] < 0 :
sum_lower = np.sum(np.array(diff_col[1:] < diff_col[0],dtype = int))
print("sum_lower:",sum_lower)
all_p_lower[index_col] = (sum_lower+1)/float(df_col.shape[0])
else :
print("not able to do diff")
cols.append(col)
#print df_col["Diff"] < df_col["Diff"][0]
print(all_p_higher)
print(all_p_lower)
print(all_p_higher, all_p_lower)
df_res = pd.DataFrame([all_p_higher, all_p_lower],columns= cols)
df_res.index = ["Higher","Lower"]
print(df_res)
return df_res
def compute_signif_node_prop(orig_df, list_permut_df, columns):
permut_df = pd.concat(list_permut_df,axis = 0)
print(permut_df['Seed'])
all_frac_higher = []
for col in columns:
print(col)
assert col in orig_df.columns, "Error, {} not in orig columns {}".format(col, orig_df.columns)
assert col in permut_df.columns, "Error, {} not in permut columns {}".format(col, permut_df.columns)
def sum_higher(a,b):
print(a)
print(b)
print(a[:,None])
def func(el):
print(el[0])
print(b.values)
print(el[0]<b.values)
return np.sum(el[0]<b.values)
return np.apply_along_axis(func, 1, a[:,None])
print(orig_df[col])
print(permut_df[col])
frac_higher = np.array(sum_higher(orig_df[col],permut_df[col])+1,dtype = float)/float(len(permut_df.index) +1)
print(frac_higher)
all_frac_higher.append(frac_higher)
df_signif = pd.DataFrame(np.transpose(np.array(all_frac_higher)),columns = columns)
return df_signif
########################################################################################################################
######################################## gather con values ################################
def gather_diff_con_values(res_path, cond, nb_permuts, labels):
import os
if isinstance(cond,tuple):
## si plusieurs conditions = IRMf
df_filename = os.path.join(res_path, "permuts_" + ".".join(cond) + '_con_values.csv')
else:
## si une seule valeur
df_filename = os.path.join(res_path, "permuts_" + cond + '_con_values.csv')
if not os.path.exists(df_filename):
############ pair of labels and tri triu_indices
triu_indices_i,triu_indices_j = np.triu_indices(len(labels),k=1)
pair_labels = [labels[i] + "_" + labels[j] for i,j in zip(triu_indices_i.tolist(),triu_indices_j.tolist())]
print(pair_labels)
print(len(pair_labels))
############ creating dataframe
all_vect_cormats = []
all_global_info_values = []
for seed in range(-1,nb_permuts):
print(seed)
for sess in ['1','2']:
print(sess)
dict_global_info_values = {'Session':sess, 'Seed':seed}
all_global_info_values.append(dict_global_info_values)
########### avg_cormat
if isinstance(cond,tuple):
iter_dir = "_cond_" + ".".join(cond) + "_permut_" + str(seed)
else:
iter_dir = "_freq_band_name_" + cond + "_permut_" + str(seed)
avg_cormat_file = os.path.join(res_path,iter_dir,"prepare_mean_correl" + sess,"avg_cormat.npy")
print(avg_cormat_file)
if os.path.exists(avg_cormat_file):
avg_cormat = np.load(avg_cormat_file)
print(avg_cormat)
vect_avg_cormat = avg_cormat[triu_indices_i,triu_indices_j]
print(vect_avg_cormat.shape)
all_vect_cormats.append(vect_avg_cormat)
df_info = | pd.DataFrame(all_global_info_values) | pandas.DataFrame |
import pandas as pd
import os
csvfile = os.path.join(os.path.dirname(__file__),
"../../data/penguins_lter.csv")
main_db = | pd.read_csv(csvfile, sep=";") | pandas.read_csv |
import pathlib
import numpy as np
import pandas as pd
from ..designMethods.en_13001_3_3 import ENComputation, LoadCollectivePrediction, MARSInput
from .output import ResultWriter
from ..designMethods.en_13001_3_3.input_error_check import InputFileError
class MainApplication():
def __init__(self) -> None:
self.input = MARSInput()
self.prediction = LoadCollectivePrediction()
self.computation: ENComputation
self.input_file_path: pathlib.Path
self.output_file_path: pathlib.Path
self.result_writer: ResultWriter
self.sc_direction: int
self.config: str
def read_input_file(self, filename: pathlib.Path) -> None:
# load data for load collective prediction
self.input.clear_inputs()
try:
self.input.read_input_df(filename)
self.input.check_input_df()
if len(self.input.input_df.columns) == 1:
raise InputFileError("More than 3 empty cells in all configurations")
self.input.load_gp_input("Stacker Crane (SC) And Rack Configuration")
# load en 13001 parameters
self.input.load_parameter_input("EN-13001-3-3")
# load materials
self.input.materials.read(filename, "rail_materials", "wheel_materials")
# load geometries
self.input.geometries.read(filename, "rail_geometries", "wheel_geometries")
# check materials and geometries
self.input.geometry_and_material_error_check()
# # load materials
# self.input.load_material_input_check(filename, "rail_materials", "wheel_materials")
# # load rail and wheel geometries
# self.input.load_geometry_input_check(filename, "rail_geometries", "wheel_geometries")
# check for input errors and drop faulty configurations
self.input.perform_error_checks()
self.input.drop_error_configs()
if len(self.input.parameters.gen_params) == 0:
raise InputFileError("At least one error in all configurations")
except InputFileError as e:
raise e
except ValueError as e:
if "Worksheet" in str(e):
raise InputFileError("Broken input file: one or more required input sheets were missing. required sheets are: Input_variables, rail_materials, wheel_materials, wheel_geometries, rail_geometries") from e
raise InputFileError("Unknown fatal error with input file, please redownload") from e
except Exception as e:
if "sheet" in str(e):
raise InputFileError("Broken input file: one or more required input sheets were missing. required sheets are: Input_variables, rail_materials, wheel_materials, wheel_geometries, rail_geometries") from e
raise InputFileError("Unknown fatal error with input file, please redownload") from e
def prepare_gp_input(self):
self.input.recompute_gp_data(self.config)
# check gp input variables for values outside expected intervals
self.input.perform_gp_input_warning_check()
def run_computation(self) -> None:
self.input.clear_computed_inputs()
self.input.set_materials_and_geometry()
self.input.parameters.compute_f_f3()
self.input.parameters.compute_contact_and_f_1()
self.prediction.clear_prediction_results()
# assign f_sd_s
self.prediction.load_f_sd_s(self.input.parameters.gen_params["F_sd_s_w"], self.input.parameters.gen_params["F_sd_s_r"])
self.prediction.predict_kc(self.input.gp_input.norm)
self.prediction.compute_F_sd_f_all(self.input.gp_input.raw, self.config, self.sc_direction)
self.prediction.recompute_kc(self.input.parameters.gen_params["F_sd_f_w"], "wf")
self.prediction.recompute_kc(self.input.parameters.gen_params["F_sd_f_w"], "wr")
self.prediction.recompute_kc(self.input.parameters.gen_params["F_sd_f_r"], "r")
self.prediction.predict_travelled_dist(self.input.gp_input.raw["cycle_mode"], self.input.gp_input.raw["num_cycles_wheel"], self.input.gp_input.raw["r_l"])
# create computation instance and compute configs
self.computation = ENComputation()
self.computation.load_data(self.input, self.prediction)
self.computation.compute_pre_F_rd_all()
self.computation.compute_F_rd_all()
self.computation.compute_proofs_all()
def initialize_result_writer(self):
# pick a filename that doesn't exist yet
self.result_writer = ResultWriter(self.computation, self.input, self.output_file_path)
self.result_writer.create_summary()
def computation_mode_1(self) -> None:
self.prepare_gp_input()
self.run_computation()
# reults output
self.input.prepare_for_output()
self.computation.load_results_all()
self.initialize_result_writer()
self.result_writer.write()
# create_output_file(self.computation, self.input, self.output_file_path)
def computation_mode_2(self) -> None:
self.prepare_gp_input()
# sort wheel geometries by diameter
self.input.geometries.wheel.sort_values("D", inplace=True)
wheel_geometries = list(self.input.geometries.wheel.index)
proof_results = np.empty((len(wheel_geometries), len(self.input.parameters.gen_params)))
for idx, wheel_geometry in enumerate(wheel_geometries):
self.input.parameters.gen_params.loc[:, "wheel_geometry"] = wheel_geometry
self.run_computation()
# check if all proofs are fullfilled
proof_results[idx, :] = np.logical_and.reduce((
self.computation.wheel_f.proofs["static"], self.computation.wheel_f.proofs["fatigue"].loc[:, "preds"],
self.computation.wheel_r.proofs["static"], self.computation.wheel_r.proofs["fatigue"].loc[:, "preds"],
self.computation.rail.proofs["static"], self.computation.rail.proofs["fatigue"].loc[:, "preds"]
))
wheel_geometries_min_d = | pd.Series(wheel_geometries) | pandas.Series |
"""
Written by <NAME> and contributed to by <NAME>.
Using the NOAA rubrics Dr Habermann created, and his work
conceptualizing the documentation language so that rubrics using
recommendations from other earth science communities can be applied
to multiple metadata dialects as a part of the USGeo BEDI and
NSF DIBBs projects. This python module as an outcome of DIBBs allows
a user to initiate an evaluation of valid XML and assess the degree
to which the collection of records is likely to meet a community information need.
The basic workflow is to retrieve records, evaluate for xpaths that contain
text, run occurrence functions on csv output of evaluation, create reports with
the outputs, if you want to compare between collections, combine csv outputs with
appropriate combination functions, create organizationSpreadsheet. Finally run
WriteToGoogle on any outputs you want to share to get a viewable/downloadable link.
"""
import pandas as pd
import csv
import gzip
import os
import requests
import xlsxwriter
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from lxml import etree
import sys
import logging
from IPython.core.display import display, HTML
import itertools
from plotly import tools
import plotly.plotly
from _plotly_future_ import v4
import plotly.graph_objs as go
import plotly.io as pio
from plotly.offline import iplot, init_notebook_mode
pio.orca.config.use_xvfb = True
init_notebook_mode(connected=True)
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
lggr = logging.getLogger(__name__)
csv.field_size_limit(sys.maxsize)
# function to download metadata
def get_records(urls, xml_files, well_formed=True):
"""Download metadata records. Metadata records are download from the
supplied ``urls`` and stored in files whose names are found on
``xml_files``. When ``well_formed`` is ``True`` downloaded XML will
be saved to a file only if well-formed.
"""
""" if we used a function
like this to collect xml, it would be the root of any processing steps
"""
if len(urls) != len(xml_files):
raise ValueError('Different number of URLs and record file names')
for url, fname in zip(urls, xml_files):
try:
r = requests.get(url)
r.raise_for_status()
except Exception:
print('There was an error downloading from {}'.format(url))
if well_formed:
try:
etree.fromstring(r.text)
except Exception:
print('Metadata record from {} not well-formed'.format(url))
if fname[-4:] != '.xml':
fname += '.xml'
with open(fname, 'wt') as f:
f.write(r.text)
def recordXpathContent(EvaluatedMetadataDF):
"""requires a dataframe with elements. Creates a vertical view of
concept content for each record in the collection. Useful in the
creation of json.
"""
EvaluatedMetadataDF = EvaluatedMetadataDF.applymap(str)
group_name = EvaluatedMetadataDF.groupby([
'Collection', 'Record', 'XPath'], as_index=False)
occurrenceMatrix = group_name['Content'].apply(
lambda x: '%s' % ', '.join(x)).unstack().reset_index()
occurrenceMatrix.columns.names = ['']
#FILLvalues = 'No Content'
#occurrenceMatrix = occurrenceMatrix.fillna(value=FILLvalues)
occurrenceMatrix.reset_index()
return(occurrenceMatrix)
def applyRecommendation(recElements, recommendationName, collection):
# places for all the evaluated and analyzed data
XpathEvaluated = os.path.join("..","data", recommendationName, collection + "_XpathEvaluated.csv.gz")
EvaluatedDF = pd.read_csv(XpathEvaluated)
# Use above dataframe and apply the xpathCounts and xpathOccurrence functions from MDeval for each recommendation
RecommendationEvaluated = os.path.join("..","data", recommendationName, collection + '_' + recommendationName + 'Evaluated.csv.gz')
#RecommendationCounts = os.path.join("../data/", collection + '_' + recommendationName + 'Counts.csv')
RecommendationOccurrence = os.path.join("..","data", recommendationName, collection + '_' + recommendationName + 'Occurrence.csv')
# Use the output of the evaluation transform and the piped string
# of all root paths to create a dataframe of just recommendation elements
recElementsPattern = '|'.join(recElements)
RecommendationDF = EvaluatedDF[EvaluatedDF['XPath'].str.contains(recElementsPattern)]
RecommendationDF.to_csv(RecommendationEvaluated, index=False, compression='gzip')
#XpathCounts(RecommendationDF, RecommendationCounts)
# change order of rows to be meaningful for recommendation
#RecommendationCountsDF = pd.read_csv(RecommendationCounts)
CollectionRecRows = []
CollectionRecRows.append(["Number of Records"])
CollectionRecColumns = []
CollectionRecColumns.append(["Collection","Record"])
XpathOccurrence(RecommendationDF, collection, RecommendationOccurrence)
RecommendationOccurrenceDF = pd.read_csv(RecommendationOccurrence)
for element in recElements:
# find the rows that match each element
CollectionElements = list(RecommendationOccurrenceDF['XPath'])[1:]
matchingElements = [CollectionElement for CollectionElement in CollectionElements if element in CollectionElement]
#append the list to a master list that will be used to order the chart
CollectionRecRows.append(matchingElements)
CollectionRecColumns.append(matchingElements)
CollectionRecRows = [item for sublist in CollectionRecRows for item in sublist]
CollectionRecColumns = [item for sublist in CollectionRecColumns for item in sublist]
from collections import OrderedDict
CollectionRecRows = list(OrderedDict.fromkeys(CollectionRecRows))
CollectionRecColumns = list(OrderedDict.fromkeys(CollectionRecColumns))
#RecommendationCountsDF = RecommendationCountsDF[CollectionRecColumns]
# write over the previous csv
#RecommendationCountsDF.to_csv(RecommendationCounts, index=False, mode='w')
# change order of rows to be meaningful for recommendation
RecommendationOccurrenceDF = RecommendationOccurrenceDF.set_index('XPath')
RecommendationOccurrenceDF = RecommendationOccurrenceDF.loc[CollectionRecRows]
RecommendationOccurrenceDF = RecommendationOccurrenceDF.reset_index()
def XpathCounts(EvaluatedMetadataDF,
DataDestination, to_csv=True):
"""XpathCounts requires a dataframe with xpath.The DF
can created be localAllNodesEval, XMLeval(not accurate), or
a simpleXpath. It is required for combineXpathCounts"""
group_name = EvaluatedMetadataDF.groupby(
['Collection', 'Record', 'XPath'], as_index=False)
XpathCountsDF = group_name.size().unstack().reset_index()
XpathCountsDF = XpathCountsDF.fillna(0)
pd.options.display.float_format = '{:,.0f}'.format
if to_csv:
lggr.info('Saving Xpath counts report to %s' % DataDestination)
XpathCountsDF.to_csv(DataDestination, mode='w', index=False)
return XpathCountsDF
def XpathOccurrence(EvaluatedMetadataDF, Collection,
DataDestination, to_csv=True):
# xpath occurrence data product
"""requires a list of xpathOccurrence csv.
It is required for CombinationSpreadsheet
"""
DataDestinationDirectory = DataDestination[:DataDestination.rfind('/') + 1]
os.makedirs(DataDestinationDirectory, exist_ok=True)
group_name = EvaluatedMetadataDF.groupby(
['Record', 'XPath'], as_index=False)
occurrenceMatrix = group_name.size().unstack().reset_index()
occurrenceMatrix = occurrenceMatrix.fillna(0)
occurrenceSum = occurrenceMatrix.sum()
occurrenceCount = occurrenceMatrix[occurrenceMatrix != 0].count()
result = pd.concat([occurrenceSum, occurrenceCount], axis=1).reset_index()
result.insert(
1, 'Collection', Collection)
result.insert(4, 'CollectionOccurrence%', Collection)
result.insert(4, 'AverageOccurrencePerRecord', Collection)
result.columns = [
'XPath', 'Collection', 'XPathCount', 'RecordCount',
'AverageOccurrencePerRecord', 'CollectionOccurrence%'
]
NumberOfRecords = result.at[0, 'XPathCount'].count('.xml')
result['CollectionOccurrence%'] = result['RecordCount'] / NumberOfRecords
result.at[0, 'XPathCount'] = NumberOfRecords
result.at[0, 'XPath'] = 'Number of Records'
result.at[0, 'CollectionOccurrence%'] = NumberOfRecords
result['AverageOccurrencePerRecord'] = (
result['XPathCount'] / NumberOfRecords)
result[['AverageOccurrencePerRecord', 'CollectionOccurrence%']] = (
result[['AverageOccurrencePerRecord',
'CollectionOccurrence%']].astype(float)
)
result[["XPathCount", "RecordCount"]] = (
result[["XPathCount", "RecordCount"]].astype(int)
)
result['AverageOccurrencePerRecord'] = (pd.Series([
"{0:.2f}".format(val) for val in result['AverageOccurrencePerRecord']
], index=result.index))
result.at[0, 'AverageOccurrencePerRecord'] = NumberOfRecords
if to_csv:
lggr.info('Saving XPath occurrence report to %s' % DataDestination)
result.to_csv(DataDestination, mode='w', index=False)
return result
def CombineXPathOccurrence(CollectionComparisons,
DataDestination, to_csv=True):
"""Using xpath occurrence data products, combine them and produce a
collection occurrence% table with collections for columns and
concepts for rows requires a list of xpathOccurrence csv.
It is required for CombinationSpreadsheet
"""
DataDestinationDirectory = DataDestination[:DataDestination.rfind('/') + 1]
os.makedirs(DataDestinationDirectory, exist_ok=True)
CombinedDF = pd.concat((pd.read_csv(f) for f in CollectionComparisons))
CombinedPivotDF = CombinedDF.pivot(
index='XPath', columns='Collection', values='CollectionOccurrence%')
ConceptCountsDF = CombinedPivotDF.fillna(0)
ConceptCountsDF.columns.names = ['']
ConceptCountsDF = ConceptCountsDF.reset_index()
line = ConceptCountsDF[ConceptCountsDF['XPath'] == 'Number of Records']
ConceptCountsDF = pd.concat(
[ConceptCountsDF[0:0], line,
ConceptCountsDF[0:]]).reset_index(drop=True)
ConceptCountsDF.drop(
ConceptCountsDF.tail(1).index, inplace=True)
if to_csv:
lggr.info('Saving concept count report to %s' % DataDestination)
ConceptCountsDF.to_csv(DataDestination, mode='w', index=False)
return ConceptCountsDF
def CombinationSpreadsheet(xpathOccurrence, recommendationOccurrence,
RecommendationConcept, RecommendationGraph,
RecGraphLink,
DataDestination, AVGxpathOccurrence=None,
AVGrecommendationOccurrence=None,
recommendationCounts=None, xpathCounts=None,
recommendationOccurrence2=None,
RecommendationConcept2=None, RecommendationGraph2=None,
RecGraphLink2=None, AVGrecommendationOccurrence2=None,
recommendationCounts2=None):
# create spreadsheet for an organization
"""requires each xpath and concept occurrence,
csv for a organization
(or any group of collections you want to compare)
"""
lggr.info('Saving spreadsheet %s' % DataDestination)
workbook = xlsxwriter.Workbook(DataDestination,
{'strings_to_numbers': True})
workbook.use_zip64()
cell_format11 = workbook.add_format()
cell_format11.set_num_format('0%')
cell_format04 = workbook.add_format()
cell_format04.set_num_format('0')
cell_format05 = workbook.add_format()
cell_format05.set_num_format('0.00')
formatGreen = workbook.add_format(
{'bg_color': '#C6EFCE', 'font_color': '#006100'})
formatRed = workbook.add_format(
{'bg_color': '#FFC7CE', 'font_color': '#9C0006'})
formatYellow = workbook.add_format(
{'bg_color': '#FFEB9C', 'font_color': '#9C6500'})
RecommendationConceptWS = workbook.add_worksheet(
'BestPractices2004_Concepts')
# RecommendationGraphWS = workbook.add_worksheet(
# 'RecommendationGraph')
# Insert an image with scaling.
RecommendationConceptWS.write('A29', "Full Image")
RecommendationConceptWS.write('B29', RecGraphLink)
RecommendationConceptWS.insert_image('A30', RecommendationGraph, {'x_scale': .07, 'y_scale': .07})
Reader = csv.reader(
open(RecommendationConcept, 'r'), delimiter=',', quotechar='"')
row_count = 0
RecommendationConceptWS.set_row(0, None, cell_format04)
RecommendationConceptWS.set_row(2, None, cell_format04)
for row in Reader:
for col in range(len(row)):
RecommendationConceptWS.write(row_count, col, row[col])
RecommendationConceptWS.set_column(col, col, 7, cell_format11)
row_count += 1
RecommendationConceptWS.set_column(0, 0, 20)
RecommendationConceptWS.set_column(1, 1, 15)
RecommendationConceptWS.set_column(2, 2, 20)
RecommendationAnalysisWS = workbook.add_worksheet(
'BestPractices2004_Elements')
RecommendationAnalysisWS.set_column(2, 4, 12)
recommendationoccurrenceWS = workbook.add_worksheet(
'BestPractices2004_Occurrence')
avgRecommendationOccurWS = workbook.add_worksheet(
'BestPractices2004_AVGoccurrence')
if recommendationCounts is not None:
recommendationcounts = workbook.add_worksheet('BestPractices2004_Counts')
###################################################################
# if a second recommendation
if recommendationOccurrence2 is not None:
RecommendationConcept2WS = workbook.add_worksheet(
'BestPractices2011_Concepts')
# RecommendationGraphWS = workbook.add_worksheet(
# 'RecommendationGraph')
# Insert an image with scaling.
RecommendationConcept2WS.write('A31', "Full Image")
RecommendationConcept2WS.write('B31', RecGraphLink2)
RecommendationConcept2WS.insert_image('A33', RecommendationGraph2, {'x_scale': .07, 'y_scale': .07})
Reader = csv.reader(
open(RecommendationConcept2, 'r'), delimiter=',', quotechar='"')
row_count = 0
RecommendationConcept2WS.set_row(0, None, cell_format04)
RecommendationConcept2WS.set_row(2, None, cell_format04)
for row in Reader:
for col in range(len(row)):
RecommendationConcept2WS.write(row_count, col, row[col])
RecommendationConcept2WS.set_column(col, col, 7, cell_format11)
row_count += 1
RecommendationConcept2WS.set_column(0, 0, 20)
RecommendationConcept2WS.set_column(1, 1, 15)
RecommendationConcept2WS.set_column(2, 2, 20)
RecommendationAnalysis2WS = workbook.add_worksheet(
'BestPractices2011_Elements')
RecommendationAnalysis2WS.set_column(2, 4, 12)
recommendationoccurrence2WS = workbook.add_worksheet(
'BestPractices2011_Occurrence')
avgRecommendationOccur2WS = workbook.add_worksheet(
'BestPractices2011_AVGoccurrence')
if recommendationCounts2 is not None:
recommendationCounts2 = workbook.add_worksheet('BestPractices2011_Counts')
#######################################################################
RecommendationAnalysis2WS.set_column('A:A', 70)
RecommendationAnalysis2WS.set_column('B:B', 20)
recommendationoccurrence2WS.set_column('A:A', 70)
recommendationoccurrence2WS.hide()
Reader = csv.reader(
open(recommendationOccurrence2, 'r'), delimiter=',', quotechar='"')
row_count = 0
recommendationoccurrence2WS.set_row(1, None, cell_format04)
for row in Reader:
for col in range(len(row)):
recommendationoccurrence2WS.write(
row_count, col, row[col])
recommendationoccurrence2WS.set_column(
col, col, 15, cell_format11)
row_count += 1
Reader = csv.reader(
open(recommendationOccurrence2, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
if Reader.line_num != 2:
for col in range(1, len(row)):
RecommendationAnalysis2WS.write(
row_count + 9, col + 4, row[col], cell_format11
)
for col in range(0, 1):
RecommendationAnalysis2WS.write(
row_count + 9, col, row[col], cell_format11)
Recommendationcell = xlsxwriter.utility.xl_rowcol_to_cell(
row_count + 9, 0)
formulaElementSimplifier = (
'=MID(' + Recommendationcell +
',1+FIND("|",SUBSTITUTE(' + Recommendationcell +
',"/","|",LEN(' + Recommendationcell + ')-LEN(SUBSTITUTE(' +
Recommendationcell + ',"/","")))),100)'
)
RecommendationAnalysis2WS.write(
row_count + 9, col + 1, formulaElementSimplifier, cell_format11
)
row_count += 1
avgRecommendationOccur2WS.set_column('A:A', 70)
avgRecommendationOccur2WS.hide()
if AVGrecommendationOccurrence2 is not None:
Reader = csv.reader(
open(AVGrecommendationOccurrence2, 'r'), delimiter=',', quotechar='"')
row_count = 0
avgRecommendationOccur2WS.set_row(1, None, cell_format04)
for row in Reader:
for col in range(len(row)):
avgRecommendationOccur2WS.write(
row_count, col, row[col])
avgRecommendationOccur2WS.set_column(col, col, 15, cell_format05)
RecommendationAnalysis2WS.write('A2', 'Number of records')
RecommendationAnalysis2WS.write('A3', 'Number of elements')
RecommendationAnalysis2WS.write(
'A4',
'Number of recommendation elements'
)
RecommendationAnalysis2WS.write('A5', 'Recommendation focus')
RecommendationAnalysis2WS.write('A6', 'Complete elements in the collection')
RecommendationAnalysis2WS.write('A7', 'Complete recommendation elements in the collection')
RecommendationAnalysis2WS.write(
'A8', 'Recommendation completeness focus')
RecommendationAnalysis2WS.write('A9', 'Upload Date')
RecommendationAnalysis2WS.write('B1', 'Formulas')
RecommendationAnalysis2WS.write('C1', 'MIN')
RecommendationAnalysis2WS.write('D1', 'MAX')
RecommendationAnalysis2WS.write('E1', 'AVG')
RecommendationAnalysis2WS.write('B10', 'Element Name')
RecommendationAnalysis2WS.write('C10', 'Collections')
RecommendationAnalysis2WS.write('D10', 'Complete')
RecommendationAnalysis2WS.write('E10', 'Partial')
Reader = csv.reader(
open(recommendationOccurrence2, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
for col in range(len(row) - 1):
ElementTotal = xlsxwriter.utility.xl_rowcol_to_cell(5, col + 5)
RecommendationElementTotal = xlsxwriter.utility.xl_rowcol_to_cell(6, col + 5)
cell2 = xlsxwriter.utility.xl_rowcol_to_cell(0, col + 1)
cell3 = xlsxwriter.utility.xl_rowcol_to_cell(2, col + 5)
cell4 = xlsxwriter.utility.xl_rowcol_to_cell(3, col + 5)
colRange = xlsxwriter.utility.xl_range(2, col + 1, 5000, col + 1)
colRange2 = xlsxwriter.utility.xl_range(2, 5, 2, len(row) + 3)
formula2 = '=COUNTIF(XpathOccurrence!' + colRange + ',">"&0)'
RecommendationAnalysis2WS.write(2, col + 5, formula2)
formula3 = '=COUNTIF(BestPractices2011_Occurrence!' + colRange + ',">"&0)'
RecommendationAnalysis2WS.write(3, col + 5, formula3)
formula4 = '='+cell4+'/'+cell3
RecommendationAnalysis2WS.write(4, col + 5, formula4, cell_format11)
formula5 = '=COUNTIF(XpathOccurrence!' + colRange + ',"=1")/'+cell3
RecommendationAnalysis2WS.write(5, col + 5, formula5, cell_format11)
formula6 = '=COUNTIF(BestPractices2011_Occurrence!' + colRange + ',"=1")/'+cell3
RecommendationAnalysis2WS.write(6, col + 5, formula6, cell_format11)
formula7 = '='+RecommendationElementTotal+'/'+ElementTotal
RecommendationAnalysis2WS.write(7, col + 5, formula7, cell_format11)
formula1 = (
'=VLOOKUP("Number of Records",BestPractices2011_Occurrence!1:1048576,' +
str(col + 2) + ', False)'
)
RecommendationAnalysis2WS.write(1, col + 5, formula1, cell_format04)
formula = '=BestPractices2011_Occurrence!' + '%s' % cell2
RecommendationAnalysis2WS.write(0, col + 5, formula)
dateFormula = (
'=LEFT(RIGHT(BestPractices2011_Occurrence!' + '%s' % cell2 +
',LEN(BestPractices2011_Occurrence!' + '%s' % cell2 +
')-FIND("_", BestPractices2011_Occurrence!' +
'%s' % cell2 + ')-1),FIND("_",BestPractices2011_Occurrence!' +
'%s' % cell2 + ')+1)'
)
RecommendationAnalysis2WS.write(8, col + 5, dateFormula)
collectFormula = (
'=LEFT(BestPractices2011_Occurrence!' + '%s' % cell2 +
',FIND("_",BestPractices2011_Occurrence!' + '%s' % cell2 + ')-1)'
)
RecommendationAnalysis2WS.write(9, col + 5, collectFormula)
row_count += 1
#######################################################################
if recommendationCounts2 is not None:
Reader = csv.reader(
open(recommendationCounts2, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
for col in range(len(row)):
recommendationCounts2.write(
row_count, col, row[col], cell_format04)
row_count += 1
Reader = csv.reader(
open(recommendationCounts2, 'r'), delimiter=',', quotechar='"')
row_count = 0
absRowCount = sum(1 for row in Reader)
absColCount = len(next(csv.reader(
open(recommendationCounts2, 'r'), delimiter=',', quotechar='"')))
recommendationCounts2.autofilter(0, 0, absRowCount - 1, absColCount - 1)
for row in range(1, 4):
absColCount = len(next(csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"'
)))
colRange4 = xlsxwriter.utility.xl_range(row, 5, row, 3 + absColCount)
miniFormula = '=MIN(' + colRange4 + ')'
RecommendationAnalysis2WS.write(row, 2, miniFormula, cell_format04)
maxiFormula = '=MAX(' + colRange4 + ')'
RecommendationAnalysis2WS.write(row, 3, maxiFormula, cell_format04)
avgFormula = '=AVERAGE(' + colRange4 + ')'
RecommendationAnalysis2WS.write(row, 4, avgFormula, cell_format04)
for row in range(4, 8):
absColCount = len(next(csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"'
)))
colRange4 = xlsxwriter.utility.xl_range(row, 5, row, 3 + absColCount)
miniFormula = '=MIN(' + colRange4 + ')'
RecommendationAnalysis2WS.write(row, 2, miniFormula, cell_format11)
maxiFormula = '=MAX(' + colRange4 + ')'
RecommendationAnalysis2WS.write(row, 3, maxiFormula, cell_format11)
avgFormula = '=AVERAGE(' + colRange4 + ')'
RecommendationAnalysis2WS.write(row, 4, avgFormula, cell_format11)
Reader = csv.reader(
open(recommendationOccurrence2, 'r'), delimiter=',', quotechar='"'
)
absRowCount = sum(1 for row in Reader)
absColCount = len(next(csv.reader(
open(recommendationOccurrence2, 'r'), delimiter=',', quotechar='"'
)))
RecommendationAnalysis2WS.autofilter(9, 0, absRowCount + 7, absColCount + 3)
recommendationoccurrence2WS.autofilter(
0, 0, absRowCount - 2, absColCount - 1)
avgRecommendationOccur2WS.autofilter(0, 0, absRowCount - 2, absColCount - 1)
RecommendationAnalysis2WS.conditional_format(
10, 5, absRowCount + 8, absColCount +
3,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
RecommendationAnalysis2WS.conditional_format(
10, 5, absRowCount + 8, absColCount + 3,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
RecommendationAnalysis2WS.conditional_format(
10, 5, absRowCount + 8, absColCount + 3,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
recommendationoccurrence2WS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
recommendationoccurrence2WS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
recommendationoccurrence2WS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
avgRecommendationOccur2WS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen})
avgRecommendationOccur2WS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow})
avgRecommendationOccur2WS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed})
RecommendationConcept2WS.conditional_format(
3, 3, 28, absColCount + 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
RecommendationConcept2WS.conditional_format(
3, 3, 28, absColCount + 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
RecommendationConcept2WS.conditional_format(
3, 3, 28, absColCount + 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
RecommendationConcept2WS.conditional_format(
1, 3, 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
RecommendationConcept2WS.conditional_format(
1, 3, 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
RecommendationConcept2WS.conditional_format(
1, 3, 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
for row in range(10, absRowCount + 8):
colRange5 = xlsxwriter.utility.xl_range(row, 5, row, absColCount + 3)
numbCollectFormula = '=COUNTIF(' + colRange5 + ',">"&0)'
CompleteCollectFormula = '=COUNTIF(' + colRange5 + ',"="&1)'
GreatCollectFormula = '=COUNTIF(' + colRange5 + ',"<"&1)-COUNTIF('+ colRange5 + ',"=0")'
RecommendationAnalysis2WS.write(row, 2, numbCollectFormula)
RecommendationAnalysis2WS.write(row, 3, CompleteCollectFormula)
RecommendationAnalysis2WS.write(row, 4, GreatCollectFormula)
###################################################################
XpathAnalysisWS = workbook.add_worksheet('AllXpaths')
xpathoccurrenceWS = workbook.add_worksheet('XpathOccurrence')
avgXpathOccurWS = workbook.add_worksheet('AVGxpathOccurrence')
if xpathCounts is not None:
xpathcounts = workbook.add_worksheet('XpathCounts')
XpathAnalysisWS.set_column('A:A', 70)
XpathAnalysisWS.set_column('B:B', 20)
recommendationoccurrenceWS.hide()
xpathoccurrenceWS.hide()
avgXpathOccurWS.hide()
avgRecommendationOccurWS.hide()
xpathoccurrenceWS.set_column('A:A', 70)
Reader = csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
xpathoccurrenceWS.set_row(1, None, cell_format04)
for row in Reader:
for col in range(len(row)):
xpathoccurrenceWS.write(row_count, col, row[col])
xpathoccurrenceWS.set_column(col, col, 15, cell_format11)
row_count += 1
Reader = csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
if Reader.line_num != 2:
for col in range(1, len(row)):
XpathAnalysisWS.write(
row_count + 9, col + 4, row[col], cell_format11
)
for col in range(0, 1):
XpathAnalysisWS.write(row_count + 9, col, row[col], cell_format11)
Xpathcell = xlsxwriter.utility.xl_rowcol_to_cell(row_count + 9, 0)
formulaElementSimplifier = (
'=MID(' + Xpathcell +
',1+FIND("|",SUBSTITUTE(' + Xpathcell +
',"/","|",LEN(' + Xpathcell + ')-LEN(SUBSTITUTE(' +
Xpathcell + ',"/","")))),100)'
)
XpathAnalysisWS.write(
row_count + 9, col + 1, formulaElementSimplifier, cell_format11
)
row_count += 1
if AVGxpathOccurrence is not None:
avgXpathOccurWS.set_column('A:A', 70)
Reader = csv.reader(
open(AVGxpathOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
avgXpathOccurWS.set_row(1, None, cell_format04)
for row in Reader:
for col in range(len(row)):
avgXpathOccurWS.write(row_count, col, row[col])
avgXpathOccurWS.set_column(col, col, 15, cell_format05)
for col in range(len(row) - 1):
cell2 = xlsxwriter.utility.xl_rowcol_to_cell(0, col + 1)
cell3 = xlsxwriter.utility.xl_rowcol_to_cell(2, col + 5)
colRange = xlsxwriter.utility.xl_range(2, col + 1, 5000, col + 1)
colRange2 = xlsxwriter.utility.xl_range(2, 5, 2, len(row) + 3)
formula2 = '=COUNTIF(xpathOccurrence!' + colRange + ',">"&0)'
XpathAnalysisWS.write(2, col + 5, formula2)
formula6 = (
'=COUNTIF(xpathOccurrence!' +
colRange + ',">="&1)/' + '%s' % cell3
)
XpathAnalysisWS.write(6, col + 5, formula6, cell_format11)
formula7 = (
'=COUNTIFS(xpathOccurrence!' +
colRange + ',">"&0,xpathOccurrence!' +
colRange + ',"<"&1)/' + '%s' % cell3
)
XpathAnalysisWS.write(7, col + 5, formula7, cell_format11)
formula1 = (
'=VLOOKUP("Number of Records",xpathOccurrence!1:1048576,' +
str(col + 2) + ', False)'
)
XpathAnalysisWS.write(1, col + 5, formula1, cell_format04)
cell2 = xlsxwriter.utility.xl_rowcol_to_cell(0, col + 1)
formula4 = '=SUM(xpathOccurrence!' + colRange + ')/' + '%s' % cell3
XpathAnalysisWS.write(4, col + 5, formula4, cell_format11)
formula5 = '=' + '%s' % cell3 + '/MAX(' + colRange2 + ')'
XpathAnalysisWS.write(5, col + 5, formula5, cell_format11)
formula = '=xpathOccurrence!' + '%s' % cell2
XpathAnalysisWS.write(0, col + 5, formula)
dateFormula = (
'=LEFT(RIGHT(xpathOccurrence!' + '%s' % cell2 +
',LEN(xpathOccurrence!' + '%s' % cell2 +
')-FIND("_", xpathOccurrence!' +
'%s' % cell2 + ')-1),FIND("_",xpathOccurrence!' +
'%s' % cell2 + ')+1)'
)
XpathAnalysisWS.write(8, col + 5, dateFormula)
collectFormula = (
'=LEFT(xpathOccurrence!' + '%s' % cell2 +
',FIND("_",xpathOccurrence!' + '%s' % cell2 + ')-1)'
)
XpathAnalysisWS.write(9, col + 5, collectFormula)
row_count += 1
#######################################################################
if xpathCounts is not None:
Reader = csv.reader(
open(xpathCounts, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
for col in range(len(row)):
xpathcounts.write(row_count, col, row[col], cell_format04)
row_count += 1
Reader = csv.reader(
open(xpathCounts, 'r'), delimiter=',', quotechar='"')
row_count = 0
absRowCount = sum(1 for row in Reader)
absColCount = len(next(csv.reader(
open(xpathCounts, 'r'), delimiter=',', quotechar='"')))
xpathcounts.autofilter(0, 0, absRowCount - 1, absColCount - 1)
XpathAnalysisWS.write('A2', 'Number of Records')
XpathAnalysisWS.write('A3', 'Number of Elements / Attributes')
#XpathAnalysisWS.write(
# 'A4',
# 'Coverage w/r to Repository (CR): \
#number of elements / total number of elements'
#)
#XpathAnalysisWS.write('A5', 'Average Occurrence Rate')
#XpathAnalysisWS.write('A6', 'Repository Completeness: Number of elements \
#/ number of elements in most complete collection in repository')
XpathAnalysisWS.write('A7', 'Complete Elements')
#/ Total Number of elements in the collection')
XpathAnalysisWS.write('A8', 'Partially Complete Elements')
XpathAnalysisWS.write('A9', 'Upload Date')
XpathAnalysisWS.write('C1', 'MIN')
XpathAnalysisWS.write('D1', 'MAX')
XpathAnalysisWS.write('E1', 'AVG')
XpathAnalysisWS.write('B10', 'Element Name')
XpathAnalysisWS.write('C10', 'Collections')
XpathAnalysisWS.write('D10', 'Complete')
XpathAnalysisWS.write('E10', 'Partial')
for row in range(1, 3):
absColCount = len(next(csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"'
)))
colRange4 = xlsxwriter.utility.xl_range(row, 5, row, 3 + absColCount)
miniFormula = '=MIN(' + colRange4 + ')'
XpathAnalysisWS.write(row, 2, miniFormula, cell_format04)
maxiFormula = '=MAX(' + colRange4 + ')'
XpathAnalysisWS.write(row, 3, maxiFormula, cell_format04)
avgFormula = '=AVERAGE(' + colRange4 + ')'
XpathAnalysisWS.write(row, 4, avgFormula, cell_format04)
for row in range(6, 8):
absColCount = len(next(csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"'
)))
colRange4 = xlsxwriter.utility.xl_range(row, 5, row, 3 + absColCount)
miniFormula = '=MIN(' + colRange4 + ')'
XpathAnalysisWS.write(row, 2, miniFormula, cell_format11)
maxiFormula = '=MAX(' + colRange4 + ')'
XpathAnalysisWS.write(row, 3, maxiFormula, cell_format11)
avgFormula = '=AVERAGE(' + colRange4 + ')'
XpathAnalysisWS.write(row, 4, avgFormula, cell_format11)
Reader = csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"'
)
absRowCount = sum(1 for row in Reader)
absColCount = len(next(csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"'
)))
XpathAnalysisWS.autofilter(9, 0, absRowCount + 7, absColCount + 3)
xpathoccurrenceWS.autofilter(0, 0, absRowCount - 2, absColCount - 1)
avgXpathOccurWS.autofilter(0, 0, absRowCount - 2, absColCount - 1)
XpathAnalysisWS.conditional_format(
10, 5, absRowCount + 8, absColCount +
3,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
XpathAnalysisWS.conditional_format(
10, 5, absRowCount + 8, absColCount + 3,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
XpathAnalysisWS.conditional_format(
10, 5, absRowCount + 8, absColCount + 3,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
xpathoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
xpathoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
xpathoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
xpathoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
xpathoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
xpathoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
avgXpathOccurWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen})
avgXpathOccurWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow})
avgXpathOccurWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed})
for row in range(10, absRowCount + 8):
colRange5 = xlsxwriter.utility.xl_range(row, 5, row, absColCount + 3)
numbCollectFormula = '=COUNTIF(' + colRange5 + ',">"&0)'
CompleteCollectFormula = '=COUNTIF(' + colRange5 + ',"="&1)'
GreatCollectFormula = '=COUNTIF(' + colRange5 + ',"<"&1)-COUNTIF('+ colRange5 + ',"=0")'
XpathAnalysisWS.write(row, 2, numbCollectFormula)
XpathAnalysisWS.write(row, 3, CompleteCollectFormula)
XpathAnalysisWS.write(row, 4, GreatCollectFormula)
#######################################################################
Reader = csv.reader(
open(xpathOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
for col in range(len(row) - 1):
cell2 = xlsxwriter.utility.xl_rowcol_to_cell(0, col + 1)
cell3 = xlsxwriter.utility.xl_rowcol_to_cell(2, col + 5)
colRange = xlsxwriter.utility.xl_range(2, col + 1, 5000, col + 1)
colRange2 = xlsxwriter.utility.xl_range(2, 5, 2, len(row) + 3)
formula1 = (
'=VLOOKUP("Number of Records",xpathOccurrence!1:1048576,' +
str(col + 2) + ', False)'
)
XpathAnalysisWS.write(1, col + 5, formula1, cell_format04)
cell2 = xlsxwriter.utility.xl_rowcol_to_cell(0, col + 1)
formula2 = '=COUNTIF(xpathOccurrence!' + colRange + ',">"&0)'
XpathAnalysisWS.write(2, col + 5, formula2)
formula = '=xpathOccurrence!' + '%s' % cell2
XpathAnalysisWS.write(0, col + 5, formula)
formula6 = (
'=COUNTIF(xpathOccurrence!' +
colRange + ',">="&1)/' + '%s' % cell3
)
XpathAnalysisWS.write(6, col + 5, formula6, cell_format11)
formula7 = (
'=COUNTIFS(xpathOccurrence!' +
colRange + ',">"&0,xpathOccurrence!' +
colRange + ',"<"&1)/' + '%s' % cell3
)
XpathAnalysisWS.write(7, col + 5, formula7, cell_format11)
dateFormula = (
'=LEFT(RIGHT(xpathOccurrence!' + '%s' % cell2 +
',LEN(xpathOccurrence!' + '%s' % cell2 +
')-FIND("_", xpathOccurrence!' +
'%s' % cell2 + ')-1),FIND("__",xpathOccurrence!' +
'%s' % cell2 + ')+1)'
)
XpathAnalysisWS.write(8, col + 5, dateFormula)
collectFormula = (
'=LEFT(xpathOccurrence!' + '%s' % cell2 +
',FIND("_",xpathOccurrence!' + '%s' % cell2 + ')-1)'
)
XpathAnalysisWS.write(9, col + 5, collectFormula)
#######################################################################
RecommendationAnalysisWS.set_column('A:A', 70)
RecommendationAnalysisWS.set_column('B:B', 20)
recommendationoccurrenceWS.set_column('A:A', 70)
Reader = csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
recommendationoccurrenceWS.set_row(1, None, cell_format04)
for row in Reader:
for col in range(len(row)):
recommendationoccurrenceWS.write(
row_count, col, row[col])
recommendationoccurrenceWS.set_column(
col, col, 15, cell_format11)
row_count += 1
Reader = csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
if Reader.line_num != 2:
for col in range(1, len(row)):
RecommendationAnalysisWS.write(
row_count + 9, col + 4, row[col], cell_format11
)
for col in range(0, 1):
RecommendationAnalysisWS.write(
row_count + 9, col, row[col], cell_format11)
Recommendationcell = xlsxwriter.utility.xl_rowcol_to_cell(
row_count + 9, 0)
formulaElementSimplifier = (
'=MID(' + Recommendationcell +
',1+FIND("|",SUBSTITUTE(' + Recommendationcell +
',"/","|",LEN(' + Recommendationcell + ')-LEN(SUBSTITUTE(' +
Recommendationcell + ',"/","")))),100)'
)
RecommendationAnalysisWS.write(
row_count + 9, col + 1, formulaElementSimplifier, cell_format11
)
row_count += 1
avgRecommendationOccurWS.set_column('A:A', 70)
if AVGrecommendationOccurrence is not None:
Reader = csv.reader(
open(AVGrecommendationOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
avgRecommendationOccurWS.set_row(1, None, cell_format04)
for row in Reader:
for col in range(len(row)):
avgRecommendationOccurWS.write(
row_count, col, row[col])
avgRecommendationOccurWS.set_column(col, col, 15, cell_format05)
RecommendationAnalysisWS.write('A2', 'Number of records')
RecommendationAnalysisWS.write('A3', 'Number of elements')
RecommendationAnalysisWS.write(
'A4',
'Number of recommendation elements'
)
RecommendationAnalysisWS.write('A5', 'Recommendation focus')
RecommendationAnalysisWS.write('A6', 'Complete elements in the collection')
RecommendationAnalysisWS.write('A7', 'Complete recommendation elements in the collection')
RecommendationAnalysisWS.write(
'A8', 'Recommendation completeness focus')
RecommendationAnalysisWS.write('A9', 'Upload Date')
RecommendationAnalysisWS.write('B1', 'Formulas')
RecommendationAnalysisWS.write('C1', 'MIN')
RecommendationAnalysisWS.write('D1', 'MAX')
RecommendationAnalysisWS.write('E1', 'AVG')
RecommendationAnalysisWS.write('B10', 'Element Name')
RecommendationAnalysisWS.write('C10', 'Collections')
RecommendationAnalysisWS.write('D10', 'Complete')
RecommendationAnalysisWS.write('E10', 'Partial')
Reader = csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
for col in range(len(row) - 1):
ElementTotal = xlsxwriter.utility.xl_rowcol_to_cell(5, col + 5)
RecommendationElementTotal = xlsxwriter.utility.xl_rowcol_to_cell(6, col + 5)
cell2 = xlsxwriter.utility.xl_rowcol_to_cell(0, col + 1)
cell3 = xlsxwriter.utility.xl_rowcol_to_cell(2, col + 5)
cell4 = xlsxwriter.utility.xl_rowcol_to_cell(3, col + 5)
colRange = xlsxwriter.utility.xl_range(2, col + 1, 5000, col + 1)
colRange2 = xlsxwriter.utility.xl_range(2, 5, 2, len(row) + 3)
formula2 = '=COUNTIF(XpathOccurrence!' + colRange + ',">"&0)'
RecommendationAnalysisWS.write(2, col + 5, formula2)
formula3 = '=COUNTIF(BestPractices2004_Occurrence!' + colRange + ',">"&0)'
RecommendationAnalysisWS.write(3, col + 5, formula3)
formula4 = '='+cell4+'/'+cell3
RecommendationAnalysisWS.write(4, col + 5, formula4, cell_format11)
formula5 = '=COUNTIF(XpathOccurrence!' + colRange + ',"=1")/'+cell3
RecommendationAnalysisWS.write(5, col + 5, formula5, cell_format11)
formula6 = '=COUNTIF(BestPractices2004_Occurrence!' + colRange + ',"=1")/'+cell3
RecommendationAnalysisWS.write(6, col + 5, formula6, cell_format11)
formula7 = '='+RecommendationElementTotal+'/'+ElementTotal
RecommendationAnalysisWS.write(7, col + 5, formula7, cell_format11)
formula1 = (
'=VLOOKUP("Number of Records",BestPractices2004_Occurrence!1:1048576,' +
str(col + 2) + ', False)'
)
RecommendationAnalysisWS.write(1, col + 5, formula1, cell_format04)
formula = '=BestPractices2004_Occurrence!' + '%s' % cell2
RecommendationAnalysisWS.write(0, col + 5, formula)
dateFormula = (
'=LEFT(RIGHT(BestPractices2004_Occurrence!' + '%s' % cell2 +
',LEN(BestPractices2004_Occurrence!' + '%s' % cell2 +
')-FIND("_", BestPractices2004_Occurrence!' +
'%s' % cell2 + ')-1),FIND("_",BestPractices2004_Occurrence!' +
'%s' % cell2 + ')+1)'
)
RecommendationAnalysisWS.write(8, col + 5, dateFormula)
collectFormula = (
'=LEFT(BestPractices2004_Occurrence!' + '%s' % cell2 +
',FIND("_",BestPractices2004_Occurrence!' + '%s' % cell2 + ')-1)'
)
RecommendationAnalysisWS.write(9, col + 5, collectFormula)
row_count += 1
#######################################################################
if recommendationCounts is not None:
Reader = csv.reader(
open(recommendationCounts, 'r'), delimiter=',', quotechar='"')
row_count = 0
for row in Reader:
for col in range(len(row)):
recommendationcounts.write(
row_count, col, row[col], cell_format04)
row_count += 1
Reader = csv.reader(
open(recommendationCounts, 'r'), delimiter=',', quotechar='"')
row_count = 0
absRowCount = sum(1 for row in Reader)
absColCount = len(next(csv.reader(
open(recommendationCounts, 'r'), delimiter=',', quotechar='"')))
recommendationcounts.autofilter(0, 0, absRowCount - 1, absColCount - 1)
for row in range(1, 4):
absColCount = len(next(csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"'
)))
colRange4 = xlsxwriter.utility.xl_range(row, 5, row, 3 + absColCount)
miniFormula = '=MIN(' + colRange4 + ')'
RecommendationAnalysisWS.write(row, 2, miniFormula, cell_format04)
maxiFormula = '=MAX(' + colRange4 + ')'
RecommendationAnalysisWS.write(row, 3, maxiFormula, cell_format04)
avgFormula = '=AVERAGE(' + colRange4 + ')'
RecommendationAnalysisWS.write(row, 4, avgFormula, cell_format04)
for row in range(4, 8):
absColCount = len(next(csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"'
)))
colRange4 = xlsxwriter.utility.xl_range(row, 5, row, 3 + absColCount)
miniFormula = '=MIN(' + colRange4 + ')'
RecommendationAnalysisWS.write(row, 2, miniFormula, cell_format11)
maxiFormula = '=MAX(' + colRange4 + ')'
RecommendationAnalysisWS.write(row, 3, maxiFormula, cell_format11)
avgFormula = '=AVERAGE(' + colRange4 + ')'
RecommendationAnalysisWS.write(row, 4, avgFormula, cell_format11)
Reader = csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"'
)
absRowCount = sum(1 for row in Reader)
absColCount = len(next(csv.reader(
open(recommendationOccurrence, 'r'), delimiter=',', quotechar='"'
)))
RecommendationAnalysisWS.autofilter(9, 0, absRowCount + 7, absColCount + 3)
recommendationoccurrenceWS.autofilter(
0, 0, absRowCount - 2, absColCount - 1)
avgRecommendationOccurWS.autofilter(0, 0, absRowCount - 2, absColCount - 1)
RecommendationAnalysisWS.conditional_format(
10, 5, absRowCount + 8, absColCount +
3,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
RecommendationAnalysisWS.conditional_format(
10, 5, absRowCount + 8, absColCount + 3,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
RecommendationAnalysisWS.conditional_format(
10, 5, absRowCount + 8, absColCount + 3,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
recommendationoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
recommendationoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
recommendationoccurrenceWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
avgRecommendationOccurWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen})
avgRecommendationOccurWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow})
avgRecommendationOccurWS.conditional_format(
2, 1, absRowCount - 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed})
RecommendationConceptWS.conditional_format(
3, 3, 28, absColCount + 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
RecommendationConceptWS.conditional_format(
3, 3, 28, absColCount + 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
RecommendationConceptWS.conditional_format(
3, 3, 28, absColCount + 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
RecommendationConceptWS.conditional_format(
1, 3, 1, absColCount - 1,
{'type': 'cell', 'criteria': '>=', 'value': 1, 'format': formatGreen}
)
RecommendationConceptWS.conditional_format(
1, 3, 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': 0, 'format': formatYellow}
)
RecommendationConceptWS.conditional_format(
1, 3, 1, absColCount - 1,
{'type': 'cell', 'criteria': '=', 'value': -1, 'format': formatRed}
)
for row in range(10, absRowCount + 8):
colRange5 = xlsxwriter.utility.xl_range(row, 5, row, absColCount + 3)
numbCollectFormula = '=COUNTIF(' + colRange5 + ',">"&0)'
CompleteCollectFormula = '=COUNTIF(' + colRange5 + ',"="&1)'
GreatCollectFormula = '=COUNTIF(' + colRange5 + ',"<"&1)-COUNTIF('+ colRange5 + ',"=0")'
RecommendationAnalysisWS.write(row, 2, numbCollectFormula)
RecommendationAnalysisWS.write(row, 3, CompleteCollectFormula)
RecommendationAnalysisWS.write(row, 4, GreatCollectFormula)
#######################################################################
#######################################################################
workbook.close()
def WriteToGoogle(SpreadsheetLocation, folderID=None, Convert=None, Link=None):
"""
Upload files to Google Drive. Requires
"""
client_json = '../scripts/client_secrets.json'
GoogleAuth.DEFAULT_SETTINGS['client_config_file'] = (client_json)
gauth = GoogleAuth()
# Try to load saved client credentials
mycred_file = '../scripts/mycreds.txt'
gauth.LoadCredentialsFile(mycred_file)
# if not creds or creds.invalid:
if gauth.credentials is None:
# Authenticate if they're not there
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile(mycred_file)
drive = GoogleDrive(gauth)
SpreadsheetName = SpreadsheetLocation.rsplit('/', 1)[-1]
SpreadsheetName = SpreadsheetName[:-5]
if folderID is not None:
test_file = drive.CreateFile({'title': SpreadsheetName,
"parents": [{"kind": "drive#fileLink", "id": folderID}]})
else:
test_file = drive.CreateFile(
{'title': SpreadsheetName, "parents": [{"kind": "drive#fileLink"}]})
test_file.SetContentFile(SpreadsheetLocation)
if Convert is None:
test_file.Upload({'convert': False})
else:
test_file.Upload({'convert': True})
# Insert the permission.
permission = test_file.InsertPermission(
{'type': 'anyone', 'value': 'anyone', 'role': 'reader'})
hyperlink = (test_file['alternateLink']) # Display the sharable link.
if Link is True:
return hyperlink
else:
ReportURLstring = '<a href="' + str(hyperlink) + '">' + SpreadsheetName + '</a>'
display(HTML(ReportURLstring)) # Display the sharable link.
def crop(image_path, coords, saved_location):
"""
@param image_path: The path to the image to edit
@param coords: A tuple of x/y coordinates (x1, y1, x2, y2)
@param saved_location: Path to save the cropped image
"""
image_obj = Image.open(image_path)
cropped_image = image_obj.crop(coords)
cropped_image.save(saved_location)
def Site_ttConceptAnalysis(Site, recommendationName, RecDict, LevelOrder, ConceptOrder, ElementOrder, YearsInvestigated):
recMD = ['RecConcept',
'RecLevel',
'RecElement']
# use a sites recommendation elements occurrence table, and add some columns for metadata about the recommendation
recOccurDF = pd.read_csv(os.path.join("..","data", recommendationName, Site+"_" + recommendationName + "Occurrence.csv"))
recOccurDF.insert(0, "RecElement", 0, allow_duplicates=False)
recOccurDF.insert(0, "RecLevel", 0, allow_duplicates=False)
recOccurDF.insert(0, "RecConcept", 0, allow_duplicates=False)
'''
use the RecDict to look at the XPath column and for each key that matches part of any cell,
write the value into the same row in the recOccurDF
'''
recOccurDF['RecElement'] = recOccurDF['XPath'].apply(lambda x: [value for key, value in RecDict.items() if key in x][0] )
# create a list to order the columns with
columnOrder = list(recOccurDF)
# don't need xpaths any more
columnOrder.remove('XPath')
'''
create a pivot table that leverages the dataframe recOccurDF's column for recommendation elements
and assigns the highest percentage any of the xpaths of the child elements to that row for a particular year
'''
radarElements = pd.pivot_table(recOccurDF, index='RecElement', columns=None, aggfunc='max').reindex(ElementOrder).reset_index()
radarElements = radarElements[columnOrder]
# fill in the metadata about concepts and recommendation levels
radarElements['RecConcept'] = pd.Series(ConceptOrder)
radarElements['RecLevel'] = pd.Series(LevelOrder)
radarElements = radarElements.fillna(value=0.0)
lineConcepts = radarElements
# remove the site name from the column
radarElements = radarElements.rename(columns={col: col.split('__')[-1] for col in radarElements.columns})
# create recommendation concept csv
#lineConcepts = lineConcepts.drop(['RecElement','RecLevel'], axis=1)
lineConcepts.loc[-1] = lineConcepts.iloc[1:,:].mean(axis=0, numeric_only=True)
lineConcepts.index = lineConcepts.index + 1 # shifting index
lineConcepts.fillna('Average Completeness', inplace=True)
lineConcepts = lineConcepts.sort_index()
lineConcepts.to_csv(os.path.join('..','data', recommendationName, Site+'_' + recommendationName + 'Complete.csv'), index=False)
# remove the site name from the column
lineConcepts = lineConcepts.rename(columns={col: col.split('__')[-1] for col in lineConcepts.columns})
lineConcepts.to_csv(os.path.join('..','data', recommendationName, Site+'_' + recommendationName + 'Completeness.csv'), index=False)
# create new version of concept occurrence table
radarList = list(radarElements)
difference = list(set(YearsInvestigated) - set(radarList[3:]))
for year in difference:
radarElements.insert(0, year, 0, allow_duplicates=False)
RecOccurDFcols = recMD + YearsInvestigated
radarElements = radarElements[RecOccurDFcols]
'''
Take the occurrence of the conceptual elements from each site's pivot table and plot each years output
on a radar chart of 0 to 1 with each RecElement as an axis and the occurrence of records the percentage of color along that axis.
'''
# create a structure to add data to.
data = []
fig = tools.make_subplots(rows=14, cols=1, print_grid=False)
count = 0
# add the data from each year to a subplot.
for year in YearsInvestigated: #collectionsToProcess
count = count + 1
data.append(go.Scatterpolar(
name = year,
mode = 'lines',
r = radarElements[year].tolist()[1:],
theta = radarElements['RecElement'].tolist()[1:],
line = dict(width = 50), #, shape = 'spline', smoothing = 1.3),
#opacity = .75,
fill = 'toself',
#fillcolor = '',
connectgaps = False,
subplot = 'polar'+year
))
fig.add_traces(data)
layout = {
'polar2005': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2006': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2007': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2008': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2009': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2010': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2011': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2012': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2013': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2014': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2015': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2016': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2017': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2018': dict(
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'showlegend': False,
"height": 1200, "width": 16800, "autosize": False, "title": Site + recommendationName + 'Completeness 2005-2018'
}
# create a description of the placement of each subplot
layout2 = {
'polar2005': dict(
domain = dict(
x = [0, 1],
y = [.96, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2006': dict(
domain = dict(
x = [0, 1],
y = [0.89, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2007': dict(
domain = dict(
x = [0, 1],
y = [0.818, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2008': dict(
domain = dict(
x = [0, 1],
y = [0.746, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2009': dict(
domain = dict(
x = [0, 1],
y = [0.675, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2010': dict(
domain = dict(
x = [0, 1],
y = [0.603, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2011': dict(
domain = dict(
x = [0, 1],
y = [0.531, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2012': dict(
domain = dict(
x = [0, 1],
y = [0.460, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2013': dict(
domain = dict(
x = [0, 1],
y = [0.388, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2014': dict(
domain = dict(
x = [0, 1],
y = [0.317, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2015': dict(
domain = dict(
x = [0, 1],
y = [0.245, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2016': dict(
domain = dict(
x = [0, 1],
y = [0.174, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2017': dict(
domain = dict(
x = [0, 1],
y = [0.103, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'polar2018': dict(
domain = dict(
x = [0, 1],
y = [0.029, 1]
),
radialaxis = dict(
angle = 0
),
angularaxis = dict(
direction = "clockwise",
period = 6
)
),
'showlegend': False,
"height": 32700, "width": 1200, "autosize": False
}
fig2 = {'data':data,'layout':layout2}
pio.write_image(fig2, os.path.join('..','data', recommendationName, Site + recommendationName + '_bigPict_.png'))
fig = {'data':data,'layout':layout}
pio.write_image(fig, os.path.join('..','data', recommendationName, Site + '_' + recommendationName + '_.png'))
crop(os.path.join('..','data', recommendationName, Site+ recommendationName + '_bigPict_.png'), (0, 0, 1200, 16600), os.path.join('..','data', recommendationName, Site+ recommendationName + '_bigPicture_.png'))
os.remove(os.path.join('..','data', recommendationName, Site+ recommendationName + '_bigPict_.png'))
def CombineAppliedRecommendation(Site, recElements, recommendationName, RecommendationOccurrenceToCombine, RecommendationcountsToCombine=None):
# places for all the combined data
RecommendationOccurrence = os.path.join("..", "data", recommendationName, "combinedCollections" + '_' + recommendationName + 'Occurrence.csv')
RecommendationConcept = os.path.join('..','data', recommendationName, "combinedCollections" + '_' + recommendationName + 'Completeness.csv')
#RecommendationGraph = os.path.join('..','data', recommendationName, "combinedCollections" + '_' + recommendationName + '_.png')
if RecommendationcountsToCombine is not None:
RecommendationCounts = os.path.join("..", "data", recommendationName, Site + '_' + recommendationName + 'Counts.csv')
CombineXPathCounts(RecommendationcountsToCombine, RecommendationCounts)
# combine xpathoccurrence from a specfic site for each year
RecommendationCountsDF = pd.read_csv(RecommendationCounts)
CombineXPathOccurrence(RecommendationOccurrenceToCombine,
RecommendationOccurrence, to_csv=True)
RecommendationOccurrenceDF = pd.read_csv(RecommendationOccurrence)
# change order of rows to be meaningful for recommendation
CollectionRecRows = []
CollectionRecRows.append(["Number of Records"])
CollectionRecColumns = []
CollectionRecColumns.append(["Collection","Record"])
for element in recElements:
# find the rows that match each element
CollectionElements = list(RecommendationOccurrenceDF['XPath'])[1:]
matchingElements = [CollectionElement for CollectionElement in CollectionElements if element in CollectionElement]
#append the list to a master list that will be used to order the chart
CollectionRecRows.append(matchingElements)
CollectionRecColumns.append(matchingElements)
CollectionRecRows = [item for sublist in CollectionRecRows for item in sublist]
CollectionRecColumns = [item for sublist in CollectionRecColumns for item in sublist]
from collections import OrderedDict
CollectionRecRows = list(OrderedDict.fromkeys(CollectionRecRows))
CollectionRecColumns = list(OrderedDict.fromkeys(CollectionRecColumns))
if RecommendationcountsToCombine is not None:
RecommendationCountsDF = RecommendationCountsDF[CollectionRecColumns]
# change order of rows to be meaningful for recommendation
RecommendationOccurrenceDF = RecommendationOccurrenceDF.set_index('XPath')
RecommendationOccurrenceDF = RecommendationOccurrenceDF.loc[CollectionRecRows]
RecommendationOccurrenceDF = RecommendationOccurrenceDF.reset_index()
# write over the previous csv
RecommendationOccurrenceDF.to_csv(RecommendationOccurrence, index=False, mode='w')
def Collection_ConceptAnalysis(Site, recommendationName, RecDict, LevelOrder, ConceptOrder, ElementOrder, YearsInvestigated):
recMD = ['RecConcept',
'RecLevel',
'RecElement']
# use a sites recommendation elements occurrence table, and add some columns for metadata about the recommendation
recOccurDF = pd.read_csv(os.path.join("..","data", recommendationName, "combinedCollections"+"_" + recommendationName + "Occurrence.csv"))
recOccurDF.insert(0, "RecElement", 0, allow_duplicates=False)
recOccurDF.insert(0, "RecLevel", 0, allow_duplicates=False)
recOccurDF.insert(0, "RecConcept", '', allow_duplicates=False)
'''
use the RecDict to look at the XPath column and for each key that matches part of any cell,
write the value into the same row in the recOccurDF
'''
recOccurDF['RecElement'] = recOccurDF['XPath'].apply(lambda x: [value for key, value in RecDict.items() if key in x][0] )
# create a list to order the columns with
columnOrder = list(recOccurDF)
# don't need xpaths any more
columnOrder.remove('XPath')
'''
create a pivot table that leverages the dataframe recOccurDF's column for recommendation elements
and assigns the highest percentage any of the xpaths of the child elements to that row for a particular year
'''
radarElements = pd.pivot_table(recOccurDF, index='RecElement', columns=None, aggfunc='max').reindex(ElementOrder).reset_index()
radarElements = radarElements[columnOrder]
# fill in the metadata about concepts and recommendation levels
radarElements['RecConcept'] = pd.Series(ConceptOrder)
radarElements['RecLevel'] = | pd.Series(LevelOrder) | pandas.Series |
def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = | pd.Series(ini_genes) | pandas.Series |
"""
Static data imports
Written by <NAME> <EMAIL>
(C) 2014-2017 <NAME>
Released under Apache 2.0 license. More info at http://www.apache.org/licenses/LICENSE-2.0
"""
import pandas
from pandas import read_csv
#import os
#print os.path.dirname(os.path.abspath(__file__))
# Main folders
#UATPATH = 'O:\\Global Markets\\Credit~2\\Credit~1\\FlowTr~1\\Tools\\FlowTr~1\\'
APPPATH = 'O:\\Global Markets\\Credit~3\\FlowTr~1\\'
MYPATH = APPPATH + 'source\\development\\'
UATPATH = MYPATH
TEMPPATH = APPPATH + 'temp\\'
DEFPATH = APPPATH + 'definitions\\'
THPATH = APPPATH + 'TradeH~1\\'
MAPATH = APPPATH + 'ma_logs\\'
BBGPATH = APPPATH + 'bbg_logs\\'
PHPATH = APPPATH + 'PriceH~1\\'
STAGINGPATH = 'Z:\\GlobalMarkets\\Credit Trading\\PROD\\Staging\\'
# User definitions
gs = read_csv(DEFPATH+'genericSettings.csv')
logoFile = gs['logoFile'].iloc[0]
LDNFLOWBOOKS = list(gs['LDNFLOWBOOKS'][gs['LDNFLOWBOOKS'].notnull()]) # excludes stlbk atm
TRADERS = list(gs['TRADERS'][gs['TRADERS'].notnull()])
frontToEmail = dict(zip(TRADERS,list(gs['EMAIL'][gs['EMAIL'].notnull()])))
traderLogins = dict(zip(list(gs['WINLOGIN'][gs['WINLOGIN'].notnull()]),TRADERS))
tabList = read_csv(DEFPATH+'TabListByTrader.csv')
columnListByTrader = read_csv(DEFPATH+'ColumnListByTrader.csv')
# Chart definitions
xls = pandas.ExcelFile(DEFPATH+'chart_definitions.xls')
#BONDCHARTS = xls.parse('groups')
#BONDCHARTCOLORS = xls.parse('colors')
BONDCHARTS = pandas.read_excel(xls,'groups')
BONDCHARTCOLORS = pandas.read_excel(xls,'colors')
# Bond universe
bonds = pandas.read_excel(DEFPATH+'bonduniverse.xls', sheetname='list',index_col=0)
regsToBondName = {v: k for k, v in dict(bonds['REGS']).items()}
countries = read_csv(DEFPATH+'countrycodes.csv')
isinsregs = pandas.Series(bonds.index,index=bonds['REGS'])
isins144a = pandas.Series(bonds.index,index=bonds['144A'])
allisins = isinsregs.append(isins144a)
allisins.name = 'Bond'
allisins = allisins.drop(allisins.index.get_duplicates())
SPECIALBONDS = list(gs['SPECIALBONDS'][gs['SPECIALBONDS'].notnull()]) # just 'TNZNIA' atm
SINKABLEBONDS = list(gs['SINKABLEBONDS'][gs['SINKABLEBONDS'].notnull()]) # will have different z-spread rule
bonduniverseexclusionsdf = read_csv(DEFPATH+'bonduniverseexclusions.csv', header=None)
bonduniverseexclusionsList = list(bonduniverseexclusionsdf[0])
ratingsScale = read_csv(DEFPATH+'RatingsScale.csv',index_col=0)
# Pricer
bbgToBdmDic = read_csv(DEFPATH+'bbgToBdmDic.csv',index_col=0)['BondDataModel'].to_dict()
bondRuns = read_csv(DEFPATH+'runs.csv',index_col=0)
grid_labels = list(read_csv(DEFPATH+'TabList.csv',header=None)[0])
colFormats = read_csv(DEFPATH+'colFormats.csv',index_col=0)
runTitleStr = gs['runTitleStr'].iloc[0]
# Trade history
ccy = | read_csv(DEFPATH+'CCY.csv',index_col=0) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
import matplotlib
import modin.pandas as pd
from modin.utils import to_pandas
from modin.pandas.test.utils import (
random_state,
df_equals,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
numeric_dfs,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
test_data,
generate_multiindex,
eval_general,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_combine(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
modin_df.combine(modin_df + 1, lambda s1, s2: s1 if s1.count() < s2.count() else s2)
pandas_df.combine(
pandas_df + 1, lambda s1, s2: s1 if s1.count() < s2.count() else s2
)
@pytest.mark.parametrize(
"test_data, test_data2",
[
(
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
np.random.uniform(0, 100, size=(2 ** 7, 2 ** 6)),
),
(
np.random.uniform(0, 100, size=(2 ** 7, 2 ** 6)),
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
),
(
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 7)),
),
(
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 7)),
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
),
],
)
def test_join(test_data, test_data2):
modin_df = pd.DataFrame(
test_data,
columns=["col{}".format(i) for i in range(test_data.shape[1])],
index=pd.Index([i for i in range(1, test_data.shape[0] + 1)], name="key"),
)
pandas_df = pandas.DataFrame(
test_data,
columns=["col{}".format(i) for i in range(test_data.shape[1])],
index=pandas.Index([i for i in range(1, test_data.shape[0] + 1)], name="key"),
)
modin_df2 = pd.DataFrame(
test_data2,
columns=["col{}".format(i) for i in range(test_data2.shape[1])],
index=pd.Index([i for i in range(1, test_data2.shape[0] + 1)], name="key"),
)
pandas_df2 = pandas.DataFrame(
test_data2,
columns=["col{}".format(i) for i in range(test_data2.shape[1])],
index=pandas.Index([i for i in range(1, test_data2.shape[0] + 1)], name="key"),
)
hows = ["inner", "left", "right", "outer"]
ons = ["col33", "col34"]
sorts = [False, True]
for i in range(4):
for j in range(2):
modin_result = modin_df.join(
modin_df2,
how=hows[i],
on=ons[j],
sort=sorts[j],
lsuffix="_caller",
rsuffix="_other",
)
pandas_result = pandas_df.join(
pandas_df2,
how=hows[i],
on=ons[j],
sort=sorts[j],
lsuffix="_caller",
rsuffix="_other",
)
df_equals(modin_result, pandas_result)
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize(
"test_data, test_data2",
[
(
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
np.random.uniform(0, 100, size=(2 ** 7, 2 ** 6)),
),
(
np.random.uniform(0, 100, size=(2 ** 7, 2 ** 6)),
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
),
(
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 7)),
),
(
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 7)),
np.random.uniform(0, 100, size=(2 ** 6, 2 ** 6)),
),
],
)
def test_merge(test_data, test_data2):
modin_df = pd.DataFrame(
test_data,
columns=["col{}".format(i) for i in range(test_data.shape[1])],
index=pd.Index([i for i in range(1, test_data.shape[0] + 1)], name="key"),
)
pandas_df = pandas.DataFrame(
test_data,
columns=["col{}".format(i) for i in range(test_data.shape[1])],
index=pandas.Index([i for i in range(1, test_data.shape[0] + 1)], name="key"),
)
modin_df2 = pd.DataFrame(
test_data2,
columns=["col{}".format(i) for i in range(test_data2.shape[1])],
index=pd.Index([i for i in range(1, test_data2.shape[0] + 1)], name="key"),
)
pandas_df2 = pandas.DataFrame(
test_data2,
columns=["col{}".format(i) for i in range(test_data2.shape[1])],
index=pandas.Index([i for i in range(1, test_data2.shape[0] + 1)], name="key"),
)
hows = ["left", "inner"]
ons = ["col33", ["col33", "col34"]]
sorts = [False, True]
for i in range(2):
for j in range(2):
modin_result = modin_df.merge(
modin_df2, how=hows[i], on=ons[j], sort=sorts[j]
)
pandas_result = pandas_df.merge(
pandas_df2, how=hows[i], on=ons[j], sort=sorts[j]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.merge(
modin_df2,
how=hows[i],
left_on="key",
right_on="key",
sort=sorts[j],
)
pandas_result = pandas_df.merge(
pandas_df2,
how=hows[i],
left_on="key",
right_on="key",
sort=sorts[j],
)
df_equals(modin_result, pandas_result)
# Test for issue #1771
modin_df = pd.DataFrame({"name": np.arange(40)})
modin_df2 = pd.DataFrame({"name": [39], "position": [0]})
pandas_df = pandas.DataFrame({"name": np.arange(40)})
pandas_df2 = pandas.DataFrame({"name": [39], "position": [0]})
modin_result = modin_df.merge(modin_df2, on="name", how="inner")
pandas_result = pandas_df.merge(pandas_df2, on="name", how="inner")
df_equals(modin_result, pandas_result)
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(TypeError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_index(axis, ascending, na_position):
data = test_data["float_nan_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
# Change index value so sorting will actually make a difference
if axis == 0:
length = len(modin_df.index)
for df in [modin_df, pandas_df]:
df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
for df in [modin_df, pandas_df]:
sort_index = df.axes[axis]
df.set_axis(
[np.nan if i % 2 == 0 else sort_index[i] for i in range(len(sort_index))],
axis=axis,
inplace=True,
)
eval_general(
modin_df,
pandas_df,
lambda df: df.sort_index(
axis=axis, ascending=ascending, na_position=na_position
),
)
@pytest.mark.parametrize("axis", ["rows", "columns"])
def test_sort_index_inplace(axis):
data = test_data["int_data"]
modin_df, pandas_df = pd.DataFrame(data), | pandas.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
from cached_property import cached_property
from functools import lru_cache
from .backend import DataBackend
from ..utils import get_str_date_from_int, get_int_date
import pymongo
import QUANTAXIS as qa
from QUANTAXIS.QAFetch import QATdx as QATdx
import pandas as pd
import datetime
# XSHG - 上证
# XSHE - 深证
class MongodbBackend(DataBackend):
skip_suspended = True
def __init__(self):
client = pymongo.MongoClient('localhost', 27017)
self.db = client['quantaxis']
@lru_cache(maxsize=4096)
def get_price(self, order_book_id, start, end, freq):
"""
:param order_book_id: e.g. 000002.XSHE
:param start: 20160101
:param end: 20160201
:param freq: 1m 1d 5m 15m ...
:returns:
:rtype: numpy.rec.array
"""
s = get_str_date_from_int(start)
e = get_str_date_from_int(end)
if freq != '1d':
raise NotImplementedError
is_index = False
if ((order_book_id.startswith("0") and order_book_id.endswith(".XSHG")) or
(order_book_id.startswith("3") and order_book_id.endswith(".XSHE"))):
is_index = True
if order_book_id.endswith(".XSHG") or order_book_id.endswith(".XSHE"):
order_book_id = order_book_id[:6]
L = list(self.db[is_index and 'index_day' or 'stock_day']
.find({'code': order_book_id, 'date': {'$gte': s,'$lte': e}},{'_id':0,'date_stamp':0}).sort('date',1))
df = | pd.DataFrame(L) | pandas.DataFrame |
from transformer_rankers.eval import results_analyses_tools
from transformer_rankers.utils import utils
from IPython import embed
import pandas as pd
import numpy as np
import scipy.stats
import argparse
import logging
import json
import traceback
import os
import sys
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler()
]
)
METRICS = ['R_10@1', 'R_10@2', 'R_10@5', 'R_2@1', 'ndcg_cut_10', 'recip_rank', 'map']
pd.set_option('display.max_columns', None)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_outputs_folder", default=None,
type=str, required=True, help="the folder containing all results in sacred format.")
parser.add_argument("--identifier_columns", default=None,
type=str, required=True, help="The columns that uniquely identify a model and should be aggregated, comma separated.")
parser.add_argument("--output_folder", default=None, type=str, required=True,
help="the folder to write aggregated results and analyses.")
args = parser.parse_args()
identifier_cols = args.identifier_columns.split(",")
all_metrics = []
all_logits = []
folders = [args.model_outputs_folder+name for name in os.listdir(args.model_outputs_folder) if os.path.isdir(args.model_outputs_folder+name)]
for run_folder in folders:
try:
with open(run_folder+"/config.json") as f:
config = json.load(f)['args']
config['seed'] = str(config['seed'])
logging.info("Run %s" % (run_folder))
logging.info("Seed %s" % config['seed'])
logging.info("Task %s" % (config['task']))
predictions = | pd.read_csv(run_folder+"/predictions.csv") | pandas.read_csv |
import json
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
def update_graph(
graph_id,
graph_title,
y_train_index,
y_val_index,
run_log_json,
yaxis_title,
):
def smooth(scalars, weight=0.6):
last = scalars[0]
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
trace_train = go.Scatter()
trace_val = go.Scatter()
layout = go.Layout(
template="plotly_dark",
title_text=graph_title,
paper_bgcolor="rgb(16, 24, 32)",
plot_bgcolor="rgb(16, 24, 32)",
margin=dict(l=40, r=25, b=40, t=40),
)
fig = go.Figure(data=[trace_train, trace_val], layout=layout)
if run_log_json:
run_log_df = pd.read_json(run_log_json, orient="split")
if len(run_log_df["batch"]) != 0:
step = run_log_df["step"]
y_train = run_log_df[y_train_index]
if y_val_index in run_log_df:
y_val = run_log_df[y_val_index]
else:
y_val = pd.Series(dtype=object)
if not y_train.isnull().values.any():
y_train = smooth(y_train)
trace_train = go.Scatter(
x=step,
y=y_train,
mode="lines",
name="Training",
showlegend=True,
)
if y_val.isnull().values.any():
y_val = y_val.dropna()
# y_val = smooth(y_val)
trace_val = go.Scatter(
x=y_val.index,
y=y_val,
mode="lines",
name="Validation",
showlegend=True,
)
fig = go.Figure(data=[trace_train, trace_val], layout=layout)
fig.update_xaxes(range=[0, step.iloc[-1] * 1.1])
if len(y_train) > 1:
fig.update_yaxes(
range=[
max(min(y_train[max(-10, -len(y_train)) : -1]) - 0.1, -0.01),
y_train[-1] + 0.1,
]
)
fig.add_shape(
type="line",
x0=0,
y0=y_train[-1],
x1=step.iloc[-1] * 1.1,
y1=y_train[-1],
line=dict(color="blue", dash="dot", width=1),
xref="x",
yref="y",
)
fig.add_annotation(
x=0,
y=y_train[-1],
text=f"{y_train[-1]:.4f}",
showarrow=False,
yshift=11,
xshift=22,
font=dict(),
bgcolor="rgb(50,50,150)",
)
if not y_val.empty:
fig.update_yaxes(
range=[
max(min(y_train[-1], y_val.iloc[-1]) - 0.1, -0.01),
min(max(y_train[-1], y_val.iloc[-1]) + 0.1, 1.01),
]
)
fig.add_shape(
type="line",
x0=0,
y0=y_val.iloc[-1],
x1=step.iloc[-1] * 1.1,
y1=y_val.iloc[-1],
line=dict(color="red", dash="dot", width=1),
xref="x",
yref="y",
)
fig.add_annotation(
x=0,
y=y_val.iloc[-1],
text=f"{y_val.iloc[-1]:.4f}",
showarrow=False,
yshift=-11,
xshift=22,
font=dict(),
bgcolor="rgb(150,50,50)",
)
return dcc.Graph(
id=graph_id,
config={
"displayModeBar": False,
"scrollZoom": True,
},
figure=fig,
)
return dcc.Graph(
id=graph_id,
config={
"displayModeBar": False,
"scrollZoom": True,
},
figure=fig,
)
def update_current_value(value_train, value_validation, value_title, run_log_json):
if run_log_json:
run_log_df = pd.read_json(run_log_json, orient="split")
if run_log_df["epoch"].last_valid_index():
last_val_index = run_log_df["epoch"].last_valid_index()
val_div = (
html.Div(
f"Validation: \
{run_log_df[value_validation].iloc[last_val_index]:.4f}"
),
)
return [
html.P(
f"Current {value_title}:",
style={
"font-weight": "bold",
"margin-top": "10px",
"margin-bottom": "0px",
},
),
html.Div(f"Training: {run_log_df[value_train].iloc[-1]:.4f}"),
val_div[0],
]
if len(run_log_df["batch"]) != 0:
return [
html.P(
f"Current {value_title}:",
style={
"font-weight": "bold",
"margin-top": "10px",
"margin-bottom": "0px",
},
),
html.Div(f"Training: {run_log_df[value_train].iloc[-1]:.4f}"),
]
def get_input_layer_info(summary):
if "0" in summary["config"]:
config = json.loads(summary["config"]["0"])
layer_info = {
"class_name": config["layers"][0]["class_name"],
"name": config["layers"][0]["config"]["name"],
"input_shape": config["layers"][0]["config"]["batch_input_shape"],
}
return layer_info
def get_layers(summary):
layers = []
if "0" in summary["config"]:
config = json.loads(summary["config"]["0"])
def get_layer_info(layer):
layer_info = {
"Type": layer["class_name"],
"name": layer["config"]["name"],
}
if layer["class_name"] == "Dense":
layer_info["units"] = layer["config"]["units"]
layer_info["activation"] = layer["config"]["activation"]
return layer_info
for i, layer in enumerate(config["layers"]):
layers.append(get_layer_info(layer))
return layers
def update_interval_log(interval_rate):
if interval_rate == "fast":
return 500
elif interval_rate == "regular":
return 1000
elif interval_rate == "slow":
return 5 * 1000
elif interval_rate == "no":
return 24 * 60 * 60 * 1000
def update_progress_bars(run_log_json, model_params):
if run_log_json:
run_log_df = pd.read_json(run_log_json, orient="split")
if len(run_log_df["batch"]) != 0 and model_params:
batch_prog = (
(run_log_df["batch"].iloc[-1])
* 100
/ model_params["max_batch_step"]["0"]
)
step_prog = (
run_log_df["step"].iloc[-1]
* 100
/ model_params["no_tracked_steps"]["0"]
)
return (
batch_prog,
f"{batch_prog:.0f} %" if batch_prog >= 5 else "",
step_prog,
f"{step_prog:.0f} %" if step_prog >= 5 else "",
)
return 0, 0, 0, 0
def update_progress_display(run_log_json, model_params):
steps_div = ()
if run_log_json:
run_log_df = | pd.read_json(run_log_json, orient="split") | pandas.read_json |
import pandas as pd
from tornado.ioloop import IOLoop
import yaml
from jinja2 import Template
from bokeh.application.handlers import FunctionHandler
from bokeh.application import Application
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider, Div
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.themes import Theme
from bokeh.client import push_session
import os
# if running locally, listen on port 5000
PORT = int(os.getenv('PORT', '5000'))
HOST = "0.0.0.0"
try:
# We are not running on cloud foundry so we must be running locally
# This variable is set in the manifest.yml
ALLOW_WEBSOCKET_ORIGIN = os.getenv("ALLOW_WEBSOCKET_ORIGIN").split(',')
except:
# We are not running on cloud foundry so we must be running locally
ALLOW_WEBSOCKET_ORIGIN = [ 'localhost:{0}'.format(PORT) ]
io_loop = IOLoop.current()
# This example simulates reading from a stream such as kafka
def modify_doc(doc):
df_all = | pd.read_csv('data.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
#########################################################################################################
'''
Feature Engineering
'''
def create_name_feat(train, test):
for i in [train, test]:
i['Name_Len'] = i['Name'].apply(lambda x: len(x))
i['Name_Title'] = i['Name'].apply(lambda x: x.split(',')[1]).apply(lambda x: x.split()[0])
del i['Name']
return train, test
# There are 177 null values for Age, and those ones have a 10% lower survival rate than the non-nulls.
# Before imputing values for the nulls, we are including an Age_null flag just to make
# sure we can account for this characteristic of the data.
def age_impute(train, test):
for i in [train, test]:
i['Age_Null_Flag'] = i['Age'].apply(lambda x: 1 if | pd.isnull(x) | pandas.isnull |
"""
make_allvar_report
allvar_periodogram_checkplot
allvar_plot_timeseries_vecs
plot_rotationcheck
"""
from glob import glob
import os, pickle, shutil, multiprocessing
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from numpy import array as nparr
from datetime import datetime
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs import WCS
from astropy.table import Table
from astroquery.vizier import Vizier
from astroquery.mast import Catalogs
from astrobase import periodbase, checkplot
from astrobase.plotbase import skyview_stamp
from cdips.plotting import vetting_pdf as vp
from cdips.paths import DATADIR
from cdips.vetting import (
centroid_analysis as cdva,
initialize_neighborhood_information as ini
)
nworkers = multiprocessing.cpu_count()
APSIZEDICT = {
1: 1,
2: 1.5,
3: 2.25
}
def make_allvar_report(allvardict, plotdir):
"""
allvardict = {
'source_id': source_id,
'ap': ap,
'TMID_BJD': time,
f'PCA{ap}': flux,
f'IRE{ap}': fluxerr,
'STIME': s_time,
f'SPCA{ap}': s_flux
f'SPCAE{ap}': s_flux
'dtr_infos': dtr_infos
}
Each dtr_infos tuple entry contains
primaryhdr, data, ap, dtrvecs, eigenvecs, smooth_eigenvecs
"""
source_id = allvardict['source_id']
outpath = os.path.join(plotdir, f'{source_id}_allvar_report.pdf')
with PdfPages(outpath) as pdf:
##########
# page 1
##########
fig, lsp, spdm, objectinfo = allvar_periodogram_checkplot(
allvardict
)
pdf.savefig(fig, bbox_inches='tight')
plt.close()
if | pd.isnull(lsp) | pandas.isnull |
from .base import GenericPreprocessor
import numpy as np
import pandas as pd
class ZTFLightcurvePreprocessor(GenericPreprocessor):
def __init__(self, stream=False):
super().__init__()
self.not_null_columns = [
'mjd',
'fid',
'magpsf',
'sigmapsf',
'magpsf_ml',
'sigmapsf_ml',
'ra',
'dec',
'rb'
]
self.stream = stream
if not self.stream:
self.not_null_columns.append('sgscore1')
self.column_translation = {
'mjd': 'time',
'fid': 'band',
'magpsf_ml': 'magnitude',
'sigmapsf_ml': 'error'
}
self.max_sigma = 1.0
self.rb_threshold = 0.55
def has_necessary_columns(self, dataframe):
"""
:param dataframe:
:return:
"""
input_columns = set(dataframe.columns)
constraint = set(self.not_null_columns)
difference = constraint.difference(input_columns)
return len(difference) == 0
def discard_invalid_value_detections(self, detections):
"""
:param detections:
:return:
"""
detections = detections.replace([np.inf, -np.inf], np.nan)
valid_alerts = detections[self.not_null_columns].notna().all(axis=1)
detections = detections[valid_alerts.values]
detections[self.not_null_columns] = detections[self.not_null_columns].apply(
lambda x: | pd.to_numeric(x, errors='coerce') | pandas.to_numeric |
import os
import lmfit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ImagingReso._utilities import ev_to_s
from cycler import cycler
from lmfit import Model
from lmfit.models import LinearModel
from scipy.interpolate import interp1d
import ResoFit._utilities as fit_util
from ResoFit.model import cole_windsor
from ResoFit.model import cole_windsor_jparc
from ResoFit.model import ikeda_carpenter
from ResoFit.model import ikeda_carpenter_jparc
from ResoFit.model import loglog_linear
from ResoFit.model import pseudo_voigt
# import ImagingReso._utilities as reso_util
__file_path = os.path.abspath(os.path.dirname(__file__))
_rel_path_to_proton = 'data/_data_for_tutorial/proton_pulse/waveform_20170901.txt'
proton_path = os.path.join(__file_path, _rel_path_to_proton)
t_min_us = 5e-2
t_max_us = 183.7
t_step_us = 0.01
t_nbr = round((t_max_us - t_min_us) / t_step_us) + 1
mcnp_plot_lim_dict = {'x_min': 0,
'x_max': 5,
'y_min': -0.05,
'y_max': 1.05}
plot_x_range = mcnp_plot_lim_dict['x_max'] - mcnp_plot_lim_dict['x_min']
default_cycler = cycler('color',
['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf'])
# fmt_list = ['o--', 'v--', 's--', 'p--', 'D--', 'h--', '^--', '<--', '>--', 'o--']
marker_list = ['o', 'v', 's', '^', 'D', 'h', '<', '>', 'x', 'p']
class NeutronPulse(object):
def __init__(self, path, model_index=1):
"""
Load total neutron pulse shape from .dat file
:param path: path to the '.dat' file
:type path: str
"""
self.shape_total_df = _load_neutron_total_shape(path)
self.shape_dict_mcnp = None
self.shape_df_mcnp = None
self.shape_df_mcnp_norm = None
self.shape_df_interp = None
self.shape_tof_df_interp = None
self.shape_tof_df_dir = None
self.result_shape_fit = None
self.param_df_dir = None
self.param_df = None
self.linear_df = None
self.linear_df_dir = None
self.model_param_names = None
self.e_min = None
self.e_max = None
self.t_us_mcnp = None
self.t_us_conv_proton = np.linspace(t_min_us, t_max_us, t_nbr).round(3)
self.result_neutron_folder = None
self._energy_list = None
self._energy_list_dropped = None
self.model_map = {1: 'ikeda_carpenter',
2: 'cole_windsor',
3: 'pseudo_voigt',
4: 'ikeda_carpenter_jparc',
5: 'cole_windsor_jparc',
}
self.model_index = None
self.model_used = None
self.model = None
self.__set_model(model_index)
if self.result_neutron_folder is None:
self.result_neutron_folder = self._check_and_make_subdir('result', 'neutron_pulse', self.model_used)
self.proton_pulse = ProtonPulse(path=proton_path)
def load_shape_each(self, path, save_each=False):
"""
Load each eV neutron pulse shape from .dat file
:param save_each:
:type save_each:
:param path: path to the '.dat' file
:type path: str
"""
self.shape_dict_mcnp = _load_neutron_each_shape(path, export=save_each)
self.shape_df_mcnp, self.shape_df_mcnp_norm = _shape_dict_to_dfs(self.shape_dict_mcnp,
t_max=t_max_us)
self._energy_list = list(self.shape_df_mcnp.set_index('t_us').columns)
self.t_us_mcnp = np.array(self.shape_df_mcnp['t_us']).round(5)
def _form_energy_list_dropped(self, e_min, e_max):
assert self._energy_list is not None
_energy_list_dropped = []
for _each_e in self._energy_list:
if e_min <= _each_e <= e_max:
_energy_list_dropped.append(_each_e)
_energy_list_dropped.sort()
self._energy_list_dropped = _energy_list_dropped
return _energy_list_dropped
def plot_shape_total(self, x1_type='energy', x2_type='lambda', source_to_detector_m=None):
"""
Plot the total beam shape obtained from MCNPX simulation
:param x1_type:
:type x1_type:
:param x2_type:
:type x2_type:
:param source_to_detector_m:
:type source_to_detector_m:
:return: plot
:rtype: matplotlib
"""
x_type_list = ['energy', 'lambda', 'time', 'none']
if x1_type not in x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list))
if x2_type not in x_type_list:
raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list))
if x1_type == 'time' or x2_type == 'time':
if source_to_detector_m is None:
raise ValueError("Please specify the source-to-detector distance in m.")
if x1_type == x2_type:
x2_type = 'none'
fig, ax1 = plt.subplots()
if x1_type == 'energy':
ax1.loglog(self.shape_total_df['E_eV'], self.shape_total_df['f(E)'], 'b.')
ax1.set_xlabel('Energy (eV)', color='b')
elif x1_type == 'lambda':
ax1.loglog(self.shape_total_df['l_angstrom'], self.shape_total_df['f(l)'], 'b.')
ax1.set_xlabel(u"Wavelength (\u212B)", color='b')
ax1.invert_xaxis()
elif x1_type == 'time':
ax1.loglog(ev_to_s(array=self.shape_total_df['E_eV'],
offset_us=0,
source_to_detector_m=source_to_detector_m) * 1e6,
self.shape_total_df['f(l)'], 'b.')
ax1.set_xlabel(u"Time-of-flight (\u03BCs)", color='b')
ax1.invert_xaxis()
ax1.set_ylabel('Flux (n/sterad/pulse)')
ax1.tick_params('x', colors='b', which='both')
ax1.grid(axis='x', which='both', color='b', alpha=0.3)
ax1.grid(axis='y', which='major', alpha=0.3)
if x2_type != 'none':
ax2 = ax1.twiny()
if x2_type == 'energy':
ax2.loglog(self.shape_total_df['E_eV'], self.shape_total_df['f(E)'], 'rx')
ax2.set_xlabel('Energy (eV)', color='r')
elif x2_type == 'lambda':
ax2.loglog(self.shape_total_df['l_angstrom'], self.shape_total_df['f(l)'], 'rx')
ax2.set_xlabel(u"Wavelength (\u212B)", color='r')
ax2.invert_xaxis()
elif x2_type == 'time':
ax2.loglog(ev_to_s(array=self.shape_total_df['E_eV'],
offset_us=0,
source_to_detector_m=source_to_detector_m) * 1e6,
self.shape_total_df['f(l)'], 'rx')
ax2.set_xlabel(u"Time-of-flight (\u03BCs)", color='r')
ax2.invert_xaxis()
ax2.grid(axis='x', which='both', color='r', alpha=0.3)
ax2.tick_params('x', colors='r', which='both')
# ax1.set_title('Neutron total flux', y=1.08, loc='left')
return fig
def plot_shape_mcnp(self, e_min, e_max, logy=False, norm=False, marker='o', ax_mpl=None, plt_arrow=True):
"""
Plot each eV beam shape obtained from MCNPX simulation
:param e_min:
:type e_min:
:param e_max:
:type e_max:
:param logy:
:type logy:
:param norm:
:type norm:
:param marker:
:type marker:
:param ax_mpl:
:type ax_mpl:
:param plt_arrow:
:type plt_arrow:
:return:
:rtype:
"""
assert self.shape_dict_mcnp is not None
if norm:
_shape_df = self.shape_df_mcnp_norm
_y_label = 'Ratio out of max flux of each energy'
else:
_shape_df = self.shape_df_mcnp
_y_label = 'Flux (n/sterad/pulse)'
# Only the energy provided by MCNPX simulation will be filtered
_energy_list_dropped = self._form_energy_list_dropped(e_min=e_min, e_max=e_max)
if ax_mpl is None:
fig, ax_mpl = plt.subplots()
ax_mpl.set_prop_cycle(default_cycler)
for each in _energy_list_dropped:
if logy:
ax_mpl.semilogy(_shape_df['t_us'],
_shape_df[each],
linestyle='-',
marker=marker,
# fmt=marker,
label=str(each) + ' eV (MCNPX)')
else:
ax_mpl.plot(_shape_df['t_us'],
_shape_df[each],
linestyle='-',
marker=marker,
# fmt=marker,
label=str(each) + ' eV (MCNPX)')
ax_mpl.set_ylabel(_y_label)
ax_mpl.set_xlabel(u'Time (\u03BCs)')
ax_mpl.grid()
if len(_energy_list_dropped) <= 7:
ax_mpl.legend()
else:
if plt_arrow:
_plot_ev_arrow_as_legend(ax=ax_mpl, ev_list=_energy_list_dropped)
ax_mpl.set_title('Energy dependent neutron pulse shape (MCNPX)')
ax_mpl.set_xlim(left=mcnp_plot_lim_dict['x_min'], right=mcnp_plot_lim_dict['x_max'])
return ax_mpl
def plot_shape_interp(self, e_ev, source_to_detector_m, conv_proton, proton_params={},
t_interp=None, logy=False, norm=False, for_sum=False,
marker='o', ax_mpl=None, plt_arrow=True):
"""
Plot each eV beam shape obtained from the fitting approach
:param e_ev:
:type e_ev:
:param source_to_detector_m:
:type source_to_detector_m:
:param conv_proton:
:type conv_proton:
:param proton_params:
:type proton_params:
:param t_interp:
:type t_interp:
:param logy:
:type logy:
:param norm:
:type norm:
:param for_sum:
:type for_sum:
:param marker:
:type marker:
:param ax_mpl:
:type ax_mpl:
:param plt_arrow:
:type plt_arrow:
:return:
:rtype:
"""
if t_interp is None:
t_interp = self.t_us_mcnp
self._make_shape(e_ev=e_ev, t_interp=t_interp, norm=norm, for_sum=for_sum,
source_to_detector_m=source_to_detector_m, print_tof=False,
conv_proton=conv_proton, proton_params=proton_params)
_shape_df_interp = self.shape_df_interp
_y_label = 'Flux (n/sterad/pulse)'
if norm:
_y_label = 'Ratio out of max flux of each energy'
_for_sum_s = ''
if for_sum:
_for_sum_s = ' for_sum'
_norm_s = ''
if norm:
_norm_s = ' norm'
_conv_proton_s = ''
if conv_proton:
_conv_proton_s = ' proton'
_proton_param_s = ''
for _param in proton_params.keys():
_proton_param_s = _proton_param_s + '_' + _param + '_' + str(proton_params[_param])
_details_s = _norm_s + _conv_proton_s + _proton_param_s + _for_sum_s
_title_s = 'Energy dependent neutron pulse shape (interp.{})'.format(_details_s)
_energy_interp_list = list(_shape_df_interp.set_index('t_us').columns)
_energy_interp_list.sort()
if ax_mpl is None:
fig, ax_mpl = plt.subplots()
ax_mpl.set_prop_cycle(default_cycler)
for each in _energy_interp_list:
if logy:
ax_mpl.semilogy(_shape_df_interp['t_us'],
_shape_df_interp[each],
linestyle='--',
marker=marker,
# fmt=fmt,
mfc='none',
label=str(each) + ' eV (interp.)')
else:
ax_mpl.plot(_shape_df_interp['t_us'],
_shape_df_interp[each],
linestyle='--',
marker=marker,
# fmt=fmt,
mfc='none',
label=str(each) + ' eV (interp.)')
ax_mpl.set_ylabel(_y_label)
ax_mpl.set_xlabel(u'Time (\u03BCs)')
ax_mpl.grid()
if len(_energy_interp_list) <= 7:
ax_mpl.legend()
else:
if plt_arrow:
_plot_ev_arrow_as_legend(ax=ax_mpl, ev_list=_energy_interp_list)
ax_mpl.set_title(_title_s)
return ax_mpl
def plot_shape_each_compare(self, e_min, e_max, source_to_detector_m, conv_proton, proton_params={},
t_interp=None, logy=False, norm=False, for_sum=False):
"""
Plot each eV beam shape obtained from MCNPX simulation and current fitting approach to compare
:param e_min:
:type e_min:
:param e_max:
:type e_max:
:param source_to_detector_m:
:type source_to_detector_m:
:param conv_proton:
:type conv_proton:
:param proton_params:
:type proton_params:
:param t_interp:
:type t_interp:
:param logy:
:type logy:
:param norm:
:type norm:
:param for_sum:
:type for_sum:
:return:
:rtype:
"""
if t_interp is None:
t_interp = self.t_us_mcnp
_energy_list_dropped = self._form_energy_list_dropped(e_min=e_min, e_max=e_max)
ax_mcnp = self.plot_shape_mcnp(e_min=e_min, e_max=e_max, norm=norm, logy=logy, plt_arrow=False)
ax = self.plot_shape_interp(e_ev=_energy_list_dropped,
source_to_detector_m=source_to_detector_m,
conv_proton=conv_proton, proton_params=proton_params,
t_interp=t_interp, logy=logy, norm=norm, for_sum=for_sum, ax_mpl=ax_mcnp)
ax.set_title('Energy dependent neutron pulse shape MCNPX vs. interp.')
if len(_energy_list_dropped) <= 7:
ax.legend()
ax.grid()
return ax
def plot_tof_shape_interp(self, e_ev, source_to_detector_m, conv_proton, proton_params={},
t_interp=None, for_sum=False, logy=False, norm=False, marker='o', ax_mpl=None,
plt_arrow=True):
"""
Plot each eV beam shape obtained from the fitting approach
:param e_ev:
:type e_ev:
:param source_to_detector_m:
:type source_to_detector_m:
:param conv_proton:
:type conv_proton:
:param proton_params:
:type proton_params:
:param t_interp:
:type t_interp:
:param for_sum:
:type for_sum:
:param logy:
:type logy:
:param norm:
:type norm:
:param marker:
:type marker:
:param ax_mpl:
:type ax_mpl:
:param plt_arrow:
:type plt_arrow:
:return:
:rtype:
"""
if isinstance(e_ev, int) or isinstance(e_ev, float):
e_ev = [e_ev]
e_ev.sort()
if t_interp is None:
t_interp = self.t_us_mcnp
self._make_shape(e_ev=e_ev, t_interp=t_interp, for_sum=for_sum, norm=norm,
source_to_detector_m=source_to_detector_m, print_tof=False,
conv_proton=conv_proton, proton_params=proton_params)
_shape_tof_df_interp = self.shape_tof_df_interp
_y_label = 'Flux (n/sterad/pulse)'
if norm:
_y_label = 'Ratio out of max flux of each energy'
_x_tag = 'tof_us'
_for_sum_s = ''
if for_sum:
_for_sum_s = ' for_sum'
_norm_s = ''
if norm:
_norm_s = ' norm'
_conv_proton_s = ''
if conv_proton:
_conv_proton_s = ' proton'
_proton_param_s = ''
for _param in proton_params.keys():
_proton_param_s = _proton_param_s + '_' + _param + '_' + str(proton_params[_param])
_details_s = _norm_s + _conv_proton_s + _proton_param_s + _for_sum_s
_title_s = 'Energy dependent neutron pulse shape (interp.{})'.format(_details_s)
if ax_mpl is None:
fig, ax_mpl = plt.subplots()
ax_mpl.set_prop_cycle(default_cycler)
for each_e in e_ev:
if not for_sum:
_x_tag = str(each_e) + '_tof_us'
if logy:
ax_mpl.semilogy(_shape_tof_df_interp[_x_tag],
_shape_tof_df_interp[str(each_e)],
linestyle='--',
marker=marker,
mfc='none',
label=str(each_e) + ' eV (interp.{})'.format(_proton_param_s))
else:
ax_mpl.plot(_shape_tof_df_interp[_x_tag],
_shape_tof_df_interp[str(each_e)],
linestyle='--',
marker=marker,
mfc='none',
label=str(each_e) + ' eV (interp.{})'.format(_proton_param_s))
if len(e_ev) <= 7:
ax_mpl.legend()
else:
if plt_arrow:
_plot_ev_arrow_as_legend(ax=ax_mpl, ev_list=e_ev)
ax_mpl.set_ylabel(_y_label)
ax_mpl.set_xlabel(u'Time (\u03BCs)')
ax_mpl.grid()
ax_mpl.set_title(_title_s)
return ax_mpl
def plot_proton_conv(self, e_ev, source_to_detector_m, conv_proton, sigma_list, tof=True,
t_interp=None, for_sum=False, logy=False, norm=False, plt_arrow=True):
_list_of_dicts = _break_proton_param_list_to_dict(proton_param_list=sigma_list, proton_param_name='sigma')
fig, ax1 = plt.subplots()
for i, _e_param_dict in enumerate(_list_of_dicts):
if tof:
ax1 = self.plot_tof_shape_interp(e_ev=e_ev, source_to_detector_m=source_to_detector_m,
conv_proton=conv_proton, proton_params=_e_param_dict,
t_interp=t_interp, for_sum=for_sum, logy=logy, norm=norm, ax_mpl=ax1,
plt_arrow=plt_arrow, marker=marker_list[i])
else:
ax1 = self.plot_shape_interp(e_ev=e_ev, source_to_detector_m=source_to_detector_m,
conv_proton=conv_proton, proton_params=_e_param_dict,
t_interp=t_interp, for_sum=for_sum, logy=logy, norm=norm, ax_mpl=ax1,
plt_arrow=plt_arrow, marker=marker_list[i])
_for_sum_s = ''
if for_sum:
_for_sum_s = ' for_sum'
_title_s = 'Energy dependent neutron pulse shape (interp.{}) with proton convolved'.format(_for_sum_s)
ax1.set_title(_title_s)
return ax1
def make_shape(self, e_ev, source_to_detector_m, conv_proton, proton_params={},
t_interp=None, for_sum=False, norm=False, overwrite_csv=False):
assert self.linear_df is not None
assert self.model is not None
if isinstance(e_ev, int) or isinstance(e_ev, float):
e_ev = [e_ev]
e_ev.sort()
if t_interp is None:
t_interp = self.t_us_mcnp
# t_interp = self.t_us
if isinstance(t_interp, int) or isinstance(t_interp, float):
raise ValueError("'t_interp' must be a list or array.")
t_interp.sort()
# construct the str for the file name
_distance_s = '_' + str(source_to_detector_m) + 'm'
_for_sum_s = ''
if for_sum:
_for_sum_s = '_for_sum'
_norm_s = ''
if norm:
_norm_s = '_norm'
_conv_proton_s = ''
if conv_proton:
_conv_proton_s = '_proton'
_proton_param_s = ''
for _param in proton_params.keys():
_proton_param_s = _proton_param_s + '_' + _param + '_' + str(proton_params[_param])
_e_min = e_ev[0]
_e_max = e_ev[-1]
_e_nbr = len(e_ev) - 1
_e_step = (_e_max - _e_min) / _e_nbr
_e_str = '_eV_' + str(_e_min) + '_' + str(_e_max) + '_' + str(_e_step)
_t_min = t_min_us
_t_max = t_max_us
_t_step = t_step_us
_t_str = '_us_' + str(_t_min) + '_' + str(_t_max) + '_' + str(_t_step)
assert self.model_used is not None
_model_s = '_' + self.model_used + '.csv'
_filename = 'TOF_shape' + _e_str + _t_str + _norm_s + _conv_proton_s + _proton_param_s \
+ _for_sum_s + _distance_s + _model_s
_shape_tof_df_dir = os.path.join(self.result_neutron_folder, _filename)
self.shape_tof_df_dir = _shape_tof_df_dir
# File exists
if os.path.isfile(_shape_tof_df_dir):
print("\u2705 '{}' exists...".format(_shape_tof_df_dir))
if overwrite_csv:
# Override==True, perform making shape and overwrite the .csv file
print("File overwriting...")
print("New beam shape generation starts...")
# Making starts
self._make_shape(e_ev=e_ev, t_interp=t_interp, for_sum=for_sum, norm=norm,
source_to_detector_m=source_to_detector_m,
save_dir=_shape_tof_df_dir,
print_tof=True,
conv_proton=conv_proton,
proton_params=proton_params,
)
print("File overwritten.")
else:
# Override==False, read the .csv file
self.shape_tof_df_interp = pd.read_csv(_shape_tof_df_dir)
print("TOF neutron beam shape file loaded.")
self.proton_pulse.make_new_shape(proton_params=proton_params) # Making sure proton shape params updated
# File not exists, perform fitting
else:
print("\u274C No previous TOF neutron beam shape file named '{}' detected.".format(_shape_tof_df_dir))
print("Beam shape generation starts...")
# Making starts
self._make_shape(e_ev=e_ev, t_interp=t_interp, for_sum=for_sum, norm=norm,
source_to_detector_m=source_to_detector_m,
save_dir=_shape_tof_df_dir,
print_tof=True,
conv_proton=conv_proton,
proton_params=proton_params,
)
def _make_shape(self, e_ev, t_interp, for_sum, norm, source_to_detector_m, print_tof,
conv_proton, proton_params={}, save_dir=None):
assert self.linear_df is not None
assert self.model is not None
if isinstance(e_ev, int) or isinstance(e_ev, float):
e_ev = [e_ev]
e_ev.sort()
if t_interp is not None:
t_interp.sort()
else:
t_interp = self.t_us_mcnp
if isinstance(t_interp, int) or isinstance(t_interp, float):
raise ValueError("'t_interp' must be a list or array.")
# _t_shift = 0
# if conv_proton:
# _t_shift = self.proton_pulse._t_shift
# # t used to shift convolution from 'full' to 'same'
_param_df_interp = self._interpolate_param(e_ev=e_ev).set_index('E_eV')
_shape_df_interp = pd.DataFrame()
_shape_df_interp['t_us'] = t_interp
# _shape_df_interp['t_us'] = t_interp - _t_shift
_shape_tof_df_interp = pd.DataFrame()
_tof_us_dict = {}
_tof_total_us_array = []
self.proton_pulse.make_new_shape(proton_params=proton_params) # Making sure proton shape params updated
if print_tof:
print('For {} (m)'.format(source_to_detector_m))
for _each_e in e_ev:
_t_us, _array = self._make_single_shape(e_ev=_each_e,
t_us=t_interp,
param_df=_param_df_interp,
conv_proton=conv_proton,
)
if not norm:
_array = _array * _param_df_interp['f_max'][_each_e]
_array[_array < 0] = 0
_array = _array.round(5)
_shape_df_interp[_each_e] = _array
_tof_diff_us = ev_to_s(offset_us=0, source_to_detector_m=source_to_detector_m, array=_each_e) * 1e6
if print_tof:
print('{} (eV) neutron spent {} (us)'.format(_each_e, _tof_diff_us))
_tof_us_dict[_each_e] = _tof_diff_us
_current_tof_us = t_interp + _tof_diff_us
_tof_total_us_array = np.append(_tof_total_us_array, _current_tof_us)
if not for_sum:
# _shape_tof_df_interp[str(_each_e) + '_tof_us'] = _current_tof_us - _t_shift
_shape_tof_df_interp[str(_each_e) + '_tof_us'] = _t_us + _tof_diff_us
_shape_tof_df_interp[str(_each_e)] = _array
self.shape_df_interp = _shape_df_interp
self.tof_us_dict = _tof_us_dict
_tof_total_us_array.sort() # list of all time that exist in all energy
if for_sum:
# _shape_tof_df_interp['tof_us'] = _tof_total_us_array - _t_shift
if print_tof is True:
print('Making shape for:')
for _each_e in e_ev:
__tof_diff_us = _tof_us_dict[_each_e]
_current_t_without_tof = _tof_total_us_array - __tof_diff_us
if print_tof is True:
print('{} (eV) neutron ...'.format(_each_e))
_t_us, _array = self._make_single_shape(e_ev=_each_e,
t_us=_current_t_without_tof,
param_df=_param_df_interp,
conv_proton=conv_proton,
)
if not norm:
_array = _array * _param_df_interp['f_max'][_each_e]
_array[_array < 0] = 0
_array = _array.round(5)
_shape_tof_df_interp['tof_us'] = _t_us + __tof_diff_us
_shape_tof_df_interp[str(_each_e)] = _array
self.shape_tof_df_interp = _shape_tof_df_interp
# Save shape_tof_df_interp as .csv
if save_dir is not None:
self.shape_tof_df_interp.to_csv(save_dir, index=False)
print("TOF neutron beam shape file has been saved at '{}'".format(save_dir))
def _make_single_shape(self, e_ev, t_us, param_df, conv_proton):
# if not isinstance(e_ev, int) or isinstance(e_ev, float):
# raise ValueError("'e_ev' must be a number for single shape generation.")
if isinstance(t_us, int) or isinstance(t_us, float):
raise ValueError("'t_us' must be a list or array for shape interpolation.")
# if e_ev < 1 or e_ev > 500:
# raise ValueError()
t_us = np.array(t_us)
_my_model = self.model
for _each_param in self.model_param_names:
_my_model.set_param_hint(_each_param, value=param_df[_each_param][e_ev])
_params = _my_model.make_params()
_t_shift = 0
if not conv_proton:
_array = _my_model.eval(_params, t=t_us) # lmfit.model.eval() returns np.ndarray
else:
_t_shift = self.proton_pulse._t_shift
# t used to shift convolution from 'full' to 'same'
_array_for_conv_proton = _my_model.eval(_params, t=self.t_us_conv_proton)
_proton_x = np.array(self.proton_pulse.new_shape_df['t_ns'] / 1e3 + self.t_us_conv_proton[-1])
_proton_y = np.array(self.proton_pulse.new_shape_df['intensity'])
_conv_y = np.convolve(_array_for_conv_proton, _proton_y, mode='full')
_conv_x = np.append(self.t_us_conv_proton, _proton_x[1:])
_array_function = interp1d(x=_conv_x, y=_conv_y, kind='cubic', bounds_error=False, fill_value=0)
_array = _array_function(t_us)
# print(t_us)
# print(len(t_us))
assert len(t_us) == len(_array)
return t_us - _t_shift, _array
def _interpolate_param(self, e_ev):
_linear_df = self.linear_df.set_index('param_name')
_param_df_interp = | pd.DataFrame() | pandas.DataFrame |
# read content function
## read content based on user & task inputs
## NOTE: might need to think of some parrellal solutions for this function
import pandas as pd
from sika.task_bypass.tasktypes.read.http_request import http_request, http_request_dynamic
from IPython import embed
def read_content(db, stage_name, task_id, inputs, function, _from_output = None):
# for now all the read_content will do http_request related jobs
concurrent = False
if 'concurrent' in inputs:
concurrent = inputs['concurrent']
# if has input dataframes
dataframe_length = 1
if _from_output:
dataframe_length = len(_from_output)
# your input should be only one dataframe to do the concurrent tasks
# else would throw an error and prompt you to concat your list of dataframes to only list of only one dataframe
if concurrent and dataframe_length != 1:
raise ValueError(f"You can not run concurrent http request tasks on `list that contains over 1 dataframe`, please concat your dataframes first. #ref: {task_id}")
task_input = None
if 'stage_inputs' in inputs:
task_input = inputs['stage_inputs'][0]
if 'task_inputs' in inputs:
task_input = inputs['task_inputs'][0]
if task_input:
if function == 'http-request':
result_lists = []
extract_field = 0
if 'extract_field' in task_input:
extract_field = task_input['extract_field']
preserve_origin_data = None
if 'preserve_origin_data' in task_input:
preserve_origin_data = task_input['preserve_origin_data']
for single_df in _from_output:
result_df = http_request(db, stage_name, task_id, single_df, extract_field, preserve_origin_data, concurrent)
# add dataframe into lists (produce list of dataframes)
result_lists.append(result_df)
return {
task_id: result_lists
}
if function == "http-request-dynamic":
user_input = inputs['user_input']
params_df = pd.DataFrame({
'base_url': [user_input['base_url']],
})
mapping_items = None
if 'params_dynamic' in user_input:
mapping_items = user_input['params_dynamic']
fixed_items = user_input['params_fixed']
param_dict = {}
preserve_fields = []
result_lists = []
mapping_fields = {}
for single_df in _from_output:
for item in mapping_items:
param_dict[item['name']] = list(single_df[item['value']])
preserve_fields.append(item['name'])
mapping_fields[item['name']] = item['value']
if param_dict:
params_df = pd.DataFrame(param_dict)
for item in fixed_items:
params_df[item['name']] = item['value']
params_df['base_url'] = user_input['base_url']
if 'headers' in user_input:
params_df['headers'] = json.dumps(user_input['headers'])
page_name = None
if 'pagination' in user_input:
page_name = user_input['pagination']['name']
till = user_input['pagination']['till']
params_df[page_name] = till
result_df = http_request_dynamic(db, stage_name, task_id, params_df, preserve_fields, mapping_fields, page_name, concurrent)
result_lists.append(result_df)
return {
task_id: result_lists
}
result_lists = []
if 'user_input' in inputs:
user_input = inputs['user_input']
extract_field = 0
if 'extract_field' in user_input:
extract_field = user_input['extract_field']
file_format = None
if 'file_format' in user_input:
file_format = user_input['file_format']
file_name = user_input['file_name']
base_url = None
if 'base_url' in user_input:
base_url = user_input['base_url']
if function == 'http-request':
if file_format == 'csv':
if extract_field:
input_df = pd.read_csv(file_name)
rows = input_df[extract_field]
else:
input_df = pd.read_csv(file_name, header=None)
# default take index 0 column as input
rows = input_df[0]
## NOTE
for row in rows:
# each of url df will produce a str df in return
row_df = pd.DataFrame([row])
result_df = http_request(db, stage_name, task_id, row_df, concurrent=concurrent)
# add dataframe into lists (produce list of dataframes)
result_lists.append(result_df)
return {
task_id: result_lists
}
if base_url:
params_df = | pd.DataFrame([base_url]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from sktime.transformers.series_as_features.summarize import PlateauFinder
@pytest.mark.parametrize("value", [np.nan, -10, 10, -0.5, 0.5])
def test_PlateauFinder(value):
# generate test data
value = np.nan
X = pd.DataFrame(pd.Series([
pd.Series([value, 1, 2, 3, value, 2, 2, 3]), # nan at start
| pd.Series([value, value, 3, 3, value, 2, 2, 3]) | pandas.Series |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.