prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import sys, math, sqlite3
import datetime, time, requests
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
# import os; os.environ['KERAS_BACKEND'] = 'theano'
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.models import load_model
def getKlines(symbol, interval="1d", startTime=None, endTime=None, limit=30):
url = "https://api.binance.com/api/v3/klines?symbol="+symbol+"&interval="+interval
if startTime:
url += "&startTime="+str(startTime)
if endTime:
url += "&endTime="+str(endTime)
r_json = requests.get(url+"&limit="+str(limit))
print("The current weight is : "+str(r_json.headers['X-MBX-USED-WEIGHT']))
if r_json.status_code == 429 or r_json.status_code == 418:
print("Weight limit of the API excedeed with code"+str(r_json.status_code))
sys.exit()
if r_json.status_code != 200:
print("Exiting because of unhandled status code from http header:")
print("Status Code : "+str(r_json.status_code))
sys.exit()
return r_json.json()
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = np.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
if test.size > 0:
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
else:
test_scaled = []
return scaler, train_scaled, test_scaled
def fit_lstm(train, batch_size, nb_epoch, neurons, loadedModel=None):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = loadedModel
if not loadedModel:
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return pd.Series(diff)
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = pd.DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = | pd.concat(columns, axis=1) | pandas.concat |
from __future__ import annotations
import math
import re
from decimal import Decimal, ROUND_HALF_UP
from dateutil.parser._parser import ParserError
from typing import Dict, Hashable, Union
import json
import numpy
import pandas
from pandas import Series
from .utils import to_utf8_bytes
from .errors import InvalidRedshiftType
Dtype = Union[str, "RedshiftType"]
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
_TYPE_REGEX = re.compile(r"^([a-zA-Z0-9 ]*)(\(([0-9, ]*?)\))?$")
def get_redshift_type(type_str):
m = _TYPE_REGEX.match(type_str)
if not m:
raise InvalidRedshiftType(
"Redshift type not found for '{}'".format(type_str)
)
type_name = m.group(1)
type_args = m.group(3)
type_name = type_name.upper().strip()
type_dict = {
"SMALLINT": SmallInt,
"INT2": SmallInt,
"INTEGER": Integer,
"INT": Integer,
"INT4": Integer,
"BIGINT": BigInt,
"INT8": BigInt,
"DECIMAL": Numeric,
"NUMERIC": Numeric,
"REAL": Real,
"FLOAT4": Real,
"DOUBLE PRECISION": DoublePrecision,
"FLOAT8": DoublePrecision,
"FLOAT": DoublePrecision,
"BOOLEAN": Boolean,
"BOOL": Boolean,
"CHAR": Char,
"CHARACTER": Char,
"NCHAR": Char,
"BPCHAR": BPChar,
"VARCHAR": VarChar,
"CHARACTER VARYING": VarChar,
"NVARCHAR": VarChar,
"TEXT": Text,
"DATE": Date,
"TIMESTAMP": TimeStamp,
"TIMESTAMP WITHOUT TIME ZONE": TimeStamp,
"TIMESTAMPTZ": TimeStampTz,
"TIMESTAMP WITH TIME ZONE": TimeStampTz,
"TIME": Time,
"TIME WITHOUT TIME ZONE": Time,
"TIMETZ": TimeTz,
"TIME WITH TIME ZONE": TimeTz,
"GEOMETRY": Geometry,
"SUPER": Super,
}
if type_name not in type_dict:
raise InvalidRedshiftType(
"Redshift type not found for '{}'".format(type_str)
)
redshift_type = type_dict[type_name]
if type_args:
type_args = [int(elm.strip()) for elm in type_args.split(",")]
else:
type_args = []
return redshift_type(*type_args)
class RedshiftType(object):
"""An abstracttype for Redshift types.
Each type has encoder and decoder.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
type, for ease of dealing.
"""
_ESCAPES = [
("\\", "\\\\"),
("'", "\\'"),
("\n", "\\n"),
("\t", "\\t"),
("\b", "\\b"),
("\f", "\\f"),
]
def _check(self, text, ubytes):
pass
def _encode_text(self, text):
if pandas.isnull(text) or pandas.isna(text):
return "NULL"
ubytes = to_utf8_bytes(str(text))
encoded_text = ubytes.decode("utf-8")
self._check(encoded_text, ubytes)
encoded_text = "\n".join(encoded_text.splitlines())
for old, new in self._ESCAPES:
encoded_text = encoded_text.replace(old, new)
return "'{}'".format(encoded_text)
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame to
``str``-typed Redshift notations, which are used in DMLs.
First, values are casted to string. Next, character encoding is
changed to ``utf-8``, which Redshift supports as a multibyte
character set. Next, strings are checked in terms of length or
multibyte characters to avoid errors when running ``INSERT``
statements. Then, escapes are replaced. Finally, the string is quoted.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.fillna(numpy.nan)
encoded_col = encoded_col.map(self._encode_text)
return encoded_col
def decode(self, col: Series) -> Series:
"""Decode response from Redshift data api to Python ojects. See
comments on each Redshift type class to confirm what type or class
is used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing Python objects.
"""
return col
def __str__(self):
return self.__redshift_name__
class DoublePrecision(RedshiftType):
"""A type for Redshift ``DOUBLE PRECISION`` type.
This type is decoded to numpy ``float64`` type.
The encoder for this type accepts any types which are able to
casted to numpy ``float64`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy float type, for ease of dealing.
"""
__np_type__ = "float64"
__redshift_name__ = "DOUBLE PRECISION"
__min_abs__ = 2.22507385850721e-308
__max_abs__ = 1.79769313486231e308
__to_be_checked__ = True
def _check_range(self, val):
if pandas.isna(val) or val == 0.0:
return val
val_abs = abs(val)
if val_abs < self.__min_abs__ or self.__max_abs__ < val_abs:
raise TypeError(
"'{}' is out of range for type '{}'".format(val, str(self))
)
return val
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
First, values are casted to numpy float type. Next, value
range are checked to avoid overflow or underflow errors
when running ``INSERT`` statements. Finally, the numeric
types are casted to str.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.astype(self.__np_type__)
encoded_col = encoded_col.fillna(numpy.nan)
if self.__to_be_checked__:
encoded_col.map(self._check_range)
encoded_col = encoded_col.replace([numpy.nan], ["NULL"])
encoded_col = encoded_col.astype(str)
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str or float types. This decoder will map these raw values to
the proper numpy float type, for ease of dealing.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy float values.
"""
return col.astype(self.__np_type__)
class Real(DoublePrecision):
"""A type for Redshift ``REAL`` type.
This type is decoded to numpy ``float64`` type since deciaml
inaccuracy is observed in case of using numpy ``float32``.
The encoder for this type accepts any values which are able to
casted to numpy ``float64`` type and do not cause overflow or
underflow for Redshift ``REAL`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy float type, for ease of dealing.
"""
__redshift_name__ = "REAL"
__min_abs__ = 1.1755e-38
__max_abs__ = 3.40282e38
class Numeric(DoublePrecision):
"""A type for Redshift ``DECIMAL`` type.
In this library, the alias ``NUMERIC`` is used instead to avoid
conflict with Python ``decimal.Decimal`` type.
There are not any fixed point types in numpy. This made us
develop the decoder to cast values from Redshift Data API to
Python ``decimal.Decimal``. Hense, the output for the decoder
looks ``object``-type Series.
The encoder for this type accepts any values which are able to
casted to numpy ``float128`` type and do not cause overflow
for the decimal with the specific precision and scale.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
``decimal.Decimal`` type, for ease of dealing.
"""
__np_type__ = "float128"
__redshift_name__ = "NUMERIC"
def __init__(self, precision: int = 18, scale: int = 0):
"""Construct the Redshift ``NUMERIC`` type.
Parameters
----------
precision :
the numeric precision for use in DDL ``CREATE TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
"""
if precision != 18 or scale != 0:
self.__redshift_name__ = "NUMERIC({},{})".format(precision, scale)
self.__max_abs__ = Decimal(str(math.pow(10.0, precision - scale)))
self.__exp_to_quantize__ = Decimal(
"1.{}".format("".join(["0" for i in range(scale)]))
)
def _encode_numeric(self, val):
if | pandas.isna(val) | pandas.isna |
import methods
import MACROS as M
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import random
from time import time
import xgboost
from collections.abc import Iterable
import warnings
import sys
import os
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore"
# TODO: documentation
def convert_to_iterable(var):
vars = var
if isinstance(vars, str) or not isinstance(vars, Iterable):
vars = [var]
return vars
def run_method(regressor, method, list_type, X_train, y_train, X_pool, n_instances):
"""
:param regressor: regressor init function (For example - LinearRegression (from sklearn.linear_model).
The fitting (training) is executed inside this function.
:param method: one of the follows: qbc, emcm, greedy_distances, greedy_predictions, cluster_uncertainty,
mse_uncertainty, discretization_uncertainty, pareto, greedy_clustering. otherwise - random.
:param list_type: models or bootstrap. The use is only for qbc and emcm methods.
:param X_train:
:param y_train:
:param X_pool:
:param n_instances: int. number of instances from the pool to be returned.
:return: query_idx - indices of the selected instances from the pool. Use X_pool[query_idx].
"""
learner_list = []
if list_type == 'models':
learner_list = methods.list_by_models(X_train, y_train)
elif list_type == 'bootstrap':
learner_list = methods.list_by_bootstrap(X_train, y_train, regressor, 5)
if method == 'qbc':
query_idx = methods.query_by_committee(learner_list, X_pool, n_instances=n_instances)
elif method == 'emcm':
regressor = regressor.fit(X_train, y_train)
query_idx = methods.expected_model_change_maximization(regressor, learner_list, X_pool, n_instances=n_instances)
elif method == 'greedy_distances':
query_idx = methods.greedy_distances(X_train, X_pool, n_instances=n_instances)
elif method == 'greedy_predictions':
regressor = regressor.fit(X_train, y_train)
query_idx = methods.greedy_predictions(regressor, y_train, X_pool, n_instances=n_instances)
elif method == 'cluster_uncertainty':
regressor = regressor.fit(X_train, y_train)
query_idx = methods.cluster_uncertainty(regressor, X_pool, n_clusters=M.N_CLUSTERS, n_instances=n_instances)
elif method == 'mse_uncertainty':
regressor = regressor.fit(X_train, y_train)
query_idx = methods.mse_uncertainty(regressor, X_train, y_train, X_pool, n_instances=n_instances)
elif method == 'discretization_uncertainty':
classifier = LogisticRegression()
query_idx = methods.discretization_uncertainty(classifier, X_train, y_train, X_pool,
bins=M.BINS, n_instances=n_instances)
elif method == 'greedy_paretos':
g_features = [i for i, col in enumerate(M.FEATURES) if col in M.AL_G_FEATURES]
b_features = [i for i, col in enumerate(M.FEATURES) if col in M.AL_B_FEATURES]
query_idx = methods.greedy_paretos(X_train, X_pool, g_features, b_features, n_instances=n_instances)
elif method == 'pareto':
g_features = [i for i, col in enumerate(M.FEATURES) if col in M.AL_G_FEATURES]
b_features = [i for i, col in enumerate(M.FEATURES) if col in M.AL_B_FEATURES]
query_idx = methods.paretos(X_pool, g_features, b_features, n_instances=n_instances)
elif method == 'greedy_clustering':
query_idx = methods.greedy_clustering(X_train, X_pool, n_clusters=M.N_CLUSTERS, n_instances=n_instances)
else:
query_idx = random.choices(list(range(X_pool.shape[0])), k=n_instances)
return query_idx
def simulation(regressor, method, list_type, X_train, y_train, X_pool, y_pool, X_test, y_test):
query_idx = run_method(regressor, method, list_type, X_train, y_train, X_pool, M.N_INSTANCES)
X_new = X_pool[query_idx]
y_new = y_pool[query_idx]
regressor.fit(np.concatenate([X_train, X_new], axis=0), np.concatenate([y_train, y_new], axis=0))
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
return query_idx, mse, mae, r2
def create_init_train(data_train):
init = data_train.groupby(M.DATE_COLUMN).apply(lambda x: x.sample(n=M.N_INIT, random_state=42))
X_train, y_train = init[M.FEATURES], init[M.Y_COLUMN]
return X_train, y_train
def create_datasets(data):
train_date = data[M.DATE_COLUMN].min()
data_train = data[data[M.DATE_COLUMN] <= train_date]
X_train, y_train = create_init_train(data_train)
data_val, data_test = train_test_split(data[data[M.DATE_COLUMN] > train_date],
test_size=M.TEST_SIZE, random_state=42)
X_test = data_test[M.FEATURES]
y_test = data_test[M.Y_COLUMN]
return X_train.values, y_train.values, data_val.reset_index(drop=True), X_test.values, y_test.values
def run_methods_simulations(regressor, method, list_type, X_train, y_train, data_val, X_test, y_test, return_train=False):
results = []
dates = data_val[M.DATE_COLUMN].unique()
dates.sort()
for date in dates:
filtered_data = data_val[data_val[M.DATE_COLUMN] == date]
X_pool = filtered_data[M.FEATURES].values
y_pool = filtered_data[M.Y_COLUMN].values
t0 = time()
query_idx, mse, mae, r2 = simulation(regressor, method, list_type, X_train, y_train,
X_pool, y_pool, X_test, y_test)
method_time = (time() - t0) / 60
X_new = X_pool[query_idx]
y_new = y_pool[query_idx]
X_train = np.concatenate([X_train, X_new], axis=0)
y_train = np.concatenate([y_train, y_new], axis=0)
query_idx = filtered_data.iloc[query_idx].index.tolist()
results.append({'regressor': regressor.__class__.__name__ ,'method': method, 'list_type': list_type,
'date': date, 'query_idx': query_idx, 'MSE': mse, 'MAE': mae, 'r2': r2, 'time': method_time})
results = pd.DataFrame(results)
if return_train:
return results, X_train, y_train
else:
return results
def init_phase(regressor, X_train, y_train, data_val, X_test, y_test, method, steps):
methods = convert_to_iterable(method)
train_days = steps
dates = data_val[M.DATE_COLUMN].unique()
dates.sort()
if len(dates) < train_days:
raise ValueError("Not enough data for init phase - data_val should contain at least " +
f"{train_days} dates.")
results = []
method_chunk_size = int(np.ceil(train_days / len(methods)))
for i, method in enumerate(methods):
method_dates = dates[i * method_chunk_size: min((i+1) * method_chunk_size, train_days)]
method_data_val = data_val[data_val[M.DATE_COLUMN].isin(method_dates)]
method_results, X_train, y_train = run_methods_simulations(regressor, method, 'init', X_train, y_train,
method_data_val, X_test, y_test, return_train=True)
results.append(method_results)
results = pd.concat(results, axis=0)
used_dates = dates[:train_days]
return results, X_train, y_train, data_val[data_val[M.DATE_COLUMN].isin(used_dates) == False], X_test, y_test
def run_all_methods(regressor_alg, X_train, y_train, data_val, X_test, y_test):
results = []
for method in ['greedy_distances', 'greedy_predictions', 'cluster_uncertainty', 'greedy_paretos',
'mse_uncertainty', 'discretization_uncertainty', 'greedy_clustering', 'pareto']:
regressor = regressor_alg()
print('\t\t', method)
results.append(run_methods_simulations(regressor, method, '', X_train,
y_train, data_val, X_test, y_test))
for method in ['qbc', 'emcm']:
for list_type in ['bootstrap', 'models']:
regressor = regressor_alg()
print('\t\t', method, list_type)
results.append(run_methods_simulations(regressor, method, list_type, X_train,
y_train, data_val, X_test, y_test))
for i in range(15):
regressor = regressor_alg()
results.append(run_methods_simulations(regressor, 'random', str(i), X_train,
y_train, data_val, X_test, y_test))
results = pd.concat(results, axis=0)
return results
def run_methods_with_init(data, regressors, init_method=None, init_steps=None):
results = []
init_method_string = '_'.join([str(x) for x in convert_to_iterable(init_method)])
for regressor_alg in regressors:
print(regressor_alg.__name__)
X_train, y_train, data_val, X_test, y_test = create_datasets(data)
print('\t', init_method_string)
if init_method is not None and init_steps is not None:
steps_list = convert_to_iterable(init_steps)
prev_steps = 0
prev_steps_results = None
for steps in steps_list:
print('\t', steps)
steps_results = []
regressor = regressor_alg()
init_results, X_train, y_train, data_val, X_test, y_test = init_phase(regressor, X_train, y_train,
data_val, X_test, y_test,
init_method, steps - prev_steps)
if prev_steps_results is not None:
init_results = pd.concat([prev_steps_results, init_results], axis=0)
prev_steps = steps
prev_steps_results = init_results
steps_results.append(init_results)
all_methods_results = run_all_methods(regressor_alg, X_train, y_train, data_val, X_test, y_test)
steps_results.append(all_methods_results)
steps_results = pd.concat(steps_results, axis=0)
steps_results['init'] = init_method_string + '_' + str(steps)
results.append(steps_results)
else:
all_methods_results = run_all_methods(regressor_alg, X_train, y_train, data_val, X_test, y_test)
all_methods_results['init'] = 'no_init'
results.append(all_methods_results)
results = pd.concat(results, axis=0)
return results
def random_forest():
return methods.RandomForestRegressor(n_estimators=40, min_samples_leaf=5 ,random_state=42)
def xgboost_regressor():
return xgboost.XGBRegressor(objective='reg:squarederror', max_depth=20, n_estimators=40, random_state=42)
def mlp_regressor():
return methods.MLPRegressor(hidden_layer_sizes=(16, 8, ))
def main():
init_methods = [None, ['greedy_distances'], ['greedy_paretos'], ['greedy_clustering'], ['random']]
regressors = [LogisticRegression, random_forest, xgboost_regressor]
files = []
os.makedirs(M.RESULTS_FOLDER, exist_ok=True)
for i in range(len(regressors)):
print(regressors[i].__name__, regressors[i]())
dfs = []
for init_method in init_methods:
data = | pd.read_csv(M.DATA_FILE, parse_dates=[M.DATE_COLUMN]) | pandas.read_csv |
# Need options to upload one to 3 files so i can do if none options
# merge datasets on sch line item doing whiochever merge need it to probabily be efficent
# highlight yellow if status is under review with CSAM or action sdm is Not order created on tool Not processed on LTSI tool
import pandas as pd
from datetime import datetime, timedelta
import io
import streamlit as st
import numpy as np
# ideas
# join the all the columns then merge ?
def app():
# st.set_page_config(page_title='LTSI Feedback Form')
st.write("""
# LTSI Feedback
### Instructions: \n
- Used when SDM feedback is in seperate files
- Upload excel feedback files (up to 3)
- Upload Open Orders File """)
st.write("## Upload 1 to 3 Feedback Files")
feedback1 = st.file_uploader("Upload Feedback File 1", type="xlsx")
feedback2 = st.file_uploader("Upload Feedback File 2", type="xlsx")
feedback3 = st.file_uploader("Upload Feedback File 3", type="xlsx")
st.write("## Upload Open Orders File")
open_orders = st.file_uploader("Upload Open Order File if feedback does not contain all open order rows",
type="xlsx")
if st.button("Create Feedback"):
def download_file(merged):
action_sdm = merged.columns[34]
merged[action_sdm] = merged[action_sdm].str.lower()
merged[action_sdm] = merged[action_sdm].fillna("0")
merged['Status (SS)'] = np.where(merged[action_sdm].str.contains('cancel', regex=False),
'To be cancelled / reduced', merged['Status (SS)'])
merged['Status (SS)'] = np.where(merged[action_sdm].str.contains('block', regex=False),
'Blocked', merged['Status (SS)'])
merged[action_sdm] = merged[action_sdm].astype(str)
merged[action_sdm].replace(['0', '0.0'], '', inplace=True)
# Writing df to Excel Sheet
buffer = io.BytesIO()
with pd.ExcelWriter(buffer, engine='xlsxwriter') as writer:
merged.to_excel(writer, sheet_name='Sheet1', index=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
formatdict = {'num_format': 'dd/mm/yyyy'}
fmt = workbook.add_format(formatdict)
worksheet.set_column('K:K', None, fmt)
worksheet.set_column('L:L', None, fmt)
# Light yellow fill with dark yellow text.
number_rows = len(merged.index) + 1
yellow_format = workbook.add_format({'bg_color': '#FFEB9C'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Under Review with CSAM"',
'format': yellow_format})
grey_format = workbook.add_format({'bg_color': '#C0C0C0'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="To be cancelled / reduced"',
'format': grey_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Under Review with C-SAM"',
'format': yellow_format})
red_format = workbook.add_format({'bg_color': '#ffc7ce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Blocked"',
'format': red_format})
green_format = workbook.add_format({'bg_color': '#c6efce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Shippable"',
'format': green_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Scheduled Out"',
'format': green_format})
# COL MIGHT BE AH
grey_format = workbook.add_format({'bg_color': '#C0C0C0'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="To be cancelled / reduced"',
'format': grey_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Under Review with CSAM"',
'format': yellow_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Under Review with C-SAM"',
'format': yellow_format})
red_format = workbook.add_format({'bg_color': '#ffc7ce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Blocked"',
'format': red_format})
green_format = workbook.add_format({'bg_color': '#c6efce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Shippable"',
'format': green_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Scheduled Out"',
'format': green_format})
for column in merged:
column_width = max(merged[column].astype(str).map(len).max(), len(column))
col_idx = merged.columns.get_loc(column)
writer.sheets['Sheet1'].set_column(col_idx, col_idx, column_width)
worksheet.autofilter(0, 0, merged.shape[0], merged.shape[1])
worksheet.set_column(11, 12, 20)
worksheet.set_column(12, 13, 20)
worksheet.set_column(13, 14, 20)
header_format = workbook.add_format({'bold': True,
'bottom': 2,
'bg_color': '#0AB2F7'})
# Write the column headers with the defined format.
for col_num, value in enumerate(merged.columns.values):
worksheet.write(0, col_num, value, header_format)
my_format = workbook.add_format()
my_format.set_align('left')
worksheet.set_column('N:N', None, my_format)
writer.save()
today = datetime.today()
d1 = today.strftime("%d/%m/%Y")
st.write("Download Completed File:")
st.download_button(
label="Download Excel worksheets",
data=buffer,
file_name="LTSI_file_" + d1 + ".xlsx",
mime="application/vnd.ms-excel"
)
def columns_to_keep():
cols = ['sales_org', 'country', 'cust_num', 'customer_name', 'sales_dis', 'rtm', 'sd_line_item',
'order_method', 'del_blk', 'cust_req_date', 'ord_entry_date',
'cust_po_num', 'ship_num', 'ship_cust', 'ship_city', 'plant',
'material_num', 'brand', 'lob', 'project_code', 'material_desc',
'mpn_desc', 'ord_qty', 'shpd_qty', 'delivery_qty', 'remaining_qty',
'delivery_priority', 'opt_delivery_qt', 'rem_mod_opt_qt',
'sch_line_blocked_for_delv']
return cols
def old_feedback_getter(df):
cols = [8]
col_count = 37
if df.shape[1] >= 39:
while col_count < df.shape[1]:
cols.append(col_count)
col_count += 1
return df.iloc[:, cols]
def new_feedback_getter(df):
return df.iloc[:, [8, 34, 35, 36]]
def open_new_feedback_merge(open, new_feedback):
return open.merge(new_feedback, how="left", on="Sales Order and Line Item")
def case2(feedback, open_orders):
feed1 = pd.read_excel(feedback, sheet_name=0, engine="openpyxl")
openOrders = pd.read_excel(open_orders, sheet_name=0, engine="openpyxl")
old_feedback = old_feedback_getter(feed1)
new_feedback = new_feedback_getter(feed1)
open = openOrders.iloc[:, :34]
combined_feedback = open.merge(new_feedback, how="left", on="Sales Order and Line Item")
final = combined_feedback.merge(old_feedback, how="left", on="Sales Order and Line Item")
cols = columns_to_keep()
final.drop_duplicates(subset=cols, keep='first', inplace=True)
download_file(final)
def case3(feedback1, feedback2, open_orders):
feed1 = pd.read_excel(feedback1, sheet_name=0, engine="openpyxl")
feed2 = pd.read_excel(feedback2, sheet_name=0, engine="openpyxl")
openOrders = pd.read_excel(open_orders, sheet_name=0, engine="openpyxl")
old_feedback1 = old_feedback_getter(feed1)
new_feedback1 = new_feedback_getter(feed1)
old_feedback2 = old_feedback_getter(feed2)
new_feedback2 = new_feedback_getter(feed2)
open = openOrders.iloc[:, :34]
joined_new_feedback = pd.concat([new_feedback1, new_feedback2], ignore_index=True)
joined_old_feedback = pd.concat([old_feedback1, old_feedback2], ignore_index=True)
combined_feedback = open.merge(joined_new_feedback, how="left", on="Sales Order and Line Item")
final = combined_feedback.merge(joined_old_feedback, how="left", on="Sales Order and Line Item")
cols = columns_to_keep()
final.drop_duplicates(subset=cols, keep='first', inplace=True)
download_file(final)
def case4(feedback1, feedback2, feedback3, open_orders):
feed1 = pd.read_excel(feedback1, sheet_name=0, engine="openpyxl")
feed2 = pd.read_excel(feedback2, sheet_name=0, engine="openpyxl")
feed3 = pd.read_excel(feedback3, sheet_name=0, engine="openpyxl")
openOrders = pd.read_excel(open_orders, sheet_name=0, engine="openpyxl")
old_feedback1 = old_feedback_getter(feed1)
new_feedback1 = new_feedback_getter(feed1)
old_feedback2 = old_feedback_getter(feed2)
new_feedback2 = new_feedback_getter(feed2)
old_feedback3 = old_feedback_getter(feed3)
new_feedback3 = new_feedback_getter(feed3)
open = openOrders.iloc[:, :34]
joined_new_feedback = | pd.concat([new_feedback1, new_feedback2, new_feedback3], ignore_index=True) | pandas.concat |
import math
import pandas as pd
import pandas.testing as pdt
from pyam import IamDataFrame, IAMC_IDX
from pyam.testing import assert_iamframe_equal
from pyam.timeseries import growth_rate
import pytest
from conftest import META_DF
EXP_DF = IamDataFrame(
pd.DataFrame(
[
["model_a", "scen_a", "World", "Growth Rate", "", 0.430969],
["model_a", "scen_b", "World", "Growth Rate", "", 0.284735],
],
columns=IAMC_IDX + [2005],
),
meta=META_DF,
)
@pytest.mark.parametrize("append", (False, True))
def test_growth_rate(test_df_year, append):
"""Check computing the growth rate from an IamDataFrame"""
if append:
obs = test_df_year.copy()
obs.compute.growth_rate({"Primary Energy": "Growth Rate"}, append=True)
assert_iamframe_equal(test_df_year.append(EXP_DF), obs)
else:
obs = test_df_year.compute.growth_rate({"Primary Energy": "Growth Rate"})
assert_iamframe_equal(EXP_DF, obs)
@pytest.mark.parametrize("append", (False, True))
def test_growth_rate_empty(test_df_year, append):
"""Assert that computing the growth rate with invalid variables returns empty"""
if append:
obs = test_df_year.copy()
obs.compute.growth_rate({"foo": "bar"}, append=True)
assert_iamframe_equal(test_df_year, obs) # assert that no data was added
else:
obs = test_df_year.compute.growth_rate({"foo": "bar"})
assert obs.empty
@pytest.mark.parametrize("x2010", (1, 27, -3))
@pytest.mark.parametrize("rates", ([0.05, 1.25], [0.5, -0.5]))
def test_growth_rate_timeseries(x2010, rates):
"""Check several combinations of growth rates directly on the timeseries"""
x2013 = x2010 * math.pow(1 + rates[0], 3) # 3 years: 2010 - 2013
x2017 = x2013 * math.pow(1 + rates[1], 4) # 4 years: 2013 - 2017
pdt.assert_series_equal(
growth_rate(pd.Series([x2010, x2013, x2017], index=[2010, 2013, 2017])),
pd.Series(rates, index=[2010, 2013]),
)
@pytest.mark.parametrize("value", (0, -1))
def test_growth_rate_timeseries_fails(value):
"""Check that a timeseries reaching/crossing 0 raises"""
with pytest.raises(ValueError, match="Cannot compute growth rate when*."):
growth_rate( | pd.Series([1.0, value]) | pandas.Series |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import time
import random
import urllib3
import datetime
import os
today=datetime.date.today()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
user_agent = {'User-agent':'Mozilla/5.0'}
base_url = 'https://www.gsmarena.com/makers.php3'
ur='https://www.gsmarena.com/'
country = 'INDIA'
company = 'YU'
model_list = []
usp = []
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
records = []
href = []
st_list_heads=[]
st_list_dets=[]
hr=[]
spec_url=[]
HREF=[]
BRANDS=[]
device_num=[]
price_list=[]
launch_date_list=[]
company_name_list=[]
devnum=[]
r=requests.get(base_url)
soup=BeautifulSoup(r.text,'html.parser')
results=soup.find_all('div',attrs={'class':'st-text'})
URL=''
PAGES=[]
for a in range(len(results)):
sa=results[a].find_all('table')
#print(len(sa))
for b in range(len(sa)):
sb=sa[b].find_all('tr')
#print(len(sb))
for c in range(len(sb)):
sc=sb[c].find_all('td')
for d in range(len(sc)):
sd=sc[d].find('a')
if'yu' in sd.text.lower():
URL=sc[d].find('a')['href']
URL=ur+URL
#print(URL)
R=requests.get(URL)
soup=BeautifulSoup(R.text,'html5lib')
results1=soup.find('div',attrs={'class':'nav-pages'})
PAGES.append(URL)
print(results1)
if (results1) is not None:
sa=results1.find_all('a')
for a in range(len(sa)):
PAGES.append(ur+ sa[a]['href'])
for i in range(len(PAGES)):
PAGES[i]=ur+PAGES[i]
print(PAGES[i])
for a in range(len(PAGES)):
r=requests.get(PAGES[a])
soup=BeautifulSoup(r.text,'html.parser')
results=soup.find_all('div',attrs={'class':'makers'})
for b in range(len(results)):
sb=results[b].find_all('ul')
for c in range(len(sb)):
sc=sb[c].find_all('li')
for d in range(len(sc)):
href.append(sc[d].find('a')['href'])
usp.append(sc[d].find('img')['title'])
model_list.append(sc[d].find('strong').text.strip())
print('href:-',end='')
print(len(href))
print('USP:-',end='')
print(len(usp))
print('model list:-',end='')
print(len(model_list))
for i in range(len(href)):
href[i]=ur+href[i]
c1=''
d1=''
r=requests.get(href[i])
soup = BeautifulSoup(r.text,'html5lib')
results=soup.find_all('div',attrs={'id':'specs-list'})
for a in range(len(results)):
sa=results[a].find_all('table',attrs={'cellspacing':'0'})
for b in range(len(sa)):
sb=sa[b].find_all('tbody')
for c in range(len(sb)):
sc=sb[c].find('th').text
if 'body' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'dimension' in se[e].text.lower():
thickness_list.append(sf[e].text)
if 'platform' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'cpu' in se[e].text.lower():
processor_list.append(sf[e].text)
if 'memory' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'internal' in se[e].text.lower():
memory_list.append(sf[e].text)
if 'camera' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'primary' in se[e].text.lower() or 'secondary' in se[e].text.lower():
c1=c1+se[e].text+':- '+sf[e].text+' || '
if 'display' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'type' in se[e].text.lower() or 'size' in se[e].text.lower():
d1=d1+se[e].text+':- '+sf[e].text+' || '
if 'battery' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'mah' in sf[e].text.lower():
battery_list.append(sf[e].text)
if 'launch' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'announce' in se[e].text.lower():
launch_date_list.append(sf[e].text.strip())
if 'misc' in sc.lower():
sd=sb[c].find_all('tr')
for d in range(len(sd)):
se=sd[d].find_all('td',attrs={'class':'ttl'})
sf=sd[d].find_all('td',attrs={'class':'nfo'})
for e in range(len(sf)):
if 'price' in se[e].text.lower():
price_list.append(sf[e].text.strip())
if d1!='':
display_list.append(d1)
if c1!='':
camera_list.append(c1)
if len(battery_list)==i:
battery_list.append('Not Available')
if len(memory_list)==i:
memory_list.append('Not Available')
if len(processor_list)==i:
processor_list.append('Not Available')
if len(display_list)==i:
display_list.append('Not Available')
if len(thickness_list)==i:
thickness_list.append('Not Available')
if len(camera_list)==i:
camera_list.append('Not Available')
if len(price_list)==i:
price_list.append('Not Available')
if len(usp)==i:
usp.append('Not Available')
if len(launch_date_list)==i:
launch_date_list.append('Not Available')
## if var==500:
## break
print('DISPLAY LIST:- ')
print(len(display_list))
##for i in display_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('PROCESSOR LIST:- ')
print(len(processor_list))
##for i in processor_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('MEMORY LIST:- ')
print(len(memory_list))
##for i in memory_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('CAMERA LIST:- ')
print(len(camera_list))
##for i in camera_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('BATTERY LIST:- ')
print(len(battery_list))
##for i in battery_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('THICKNESS LIST:-')
print(len(thickness_list))
##for i in thickness_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('PRICE LIST:_')
print(len(price_list))
##for i in price_list:
## print(i)
##print('-------------------------------------------------------------------------------------------------------------------------------------------------------------')
print('LAUNCH DATE:-')
print(len(launch_date_list))
##for i in launch_date_list:
## print(i)
extras_links = href
for i in range(len(model_list)):
records.append((country,company,model_list[i],price_list[i],launch_date_list[i], usp[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
path='C:\\LavaWebScraper\\GSMARENA\\'
df = | pd.DataFrame(records, columns = ['COUNTRY','COMPANY', 'MODEL', 'PRICE','LAUNCH DATE', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
"""
THIS IS A SAMPLE CUSTOM ALGORITHM to provide a skeleton to develop your own
custom algorithms. The algorithm itself, although viable, is not recommended for
production or general use, it is simply a toy algorithm here to demonstrate the
structure of a simple custom algorithm that has ``algorithm_parameters`` passed
as an empty dict {}.
It is documented via comments #
"""
# REQUIRED Skyline imports. All custom algorithms MUST have the following two
# imports. These are required for exception handling and to record algorithm
# errors regardless of debug_logging setting for the custom_algorithm
import logging
import traceback
from custom_algorithms import record_algorithm_error
# Import ALL modules that the custom algorithm requires. Remember that if a
# requirement is not one that is provided by the Skyline requirements.txt you
# must ensure it is installed in the Skyline virtualenv
import pandas as pd
# To test max_execution_time import sleep
# from time import sleep
from functions.numpy.percent_different import get_percent_different
# Define your simple algorithm.
# The name of the function MUST be the same as the name declared in
# settings.CUSTOM_ALGORITHMS.
# It MUST have 3 parameters:
# current_skyline_app, timeseries, algorithm_parameters
# See https://earthgecko-skyline.readthedocs.io/en/latest/algorithms/custom-algorithms.html
# for a full explanation about each.
# ALWAYS WRAP YOUR ALGORITHM IN try and except
def significant_change_window_percent_sustained(
current_skyline_app, parent_pid, timeseries, algorithm_parameters):
"""
A data point is anomalous if it is x percent different from the median of the
window (seconds resample) of the last p period (seconds). A few examples,
If the value is 10% different from the median value of the 10min windows of
the last hour.
algorithm_parameters: {'window': 600, 'percent': 10.0, 'period': 3600}
If the value is 50% different from the median value of the 10min windows of
the last day.
algorithm_parameters: {'window': 600, 'percent': 50.0, 'period': 86400}
:param current_skyline_app: the Skyline app executing the algorithm. This
will be passed to the algorithm by Skyline. This is **required** for
error handling and logging. You do not have to worry about handling the
argument in the scope of the custom algorithm itself, but the algorithm
must accept it as the first agrument.
:param parent_pid: the parent pid which is executing the algorithm, this is
**required** for error handling and logging. You do not have to worry
about handling this argument in the scope of algorithm, but the
algorithm must accept it as the second argument.
:param timeseries: the time series as a list e.g. ``[[1578916800.0, 29.0],
[1578920400.0, 55.0], ... [1580353200.0, 55.0]]``
:param algorithm_parameters: a dictionary of any parameters and their
arguments you wish to pass to the algorithm.
:type current_skyline_app: str
:type parent_pid: int
:type timeseries: list
:type algorithm_parameters: dict
:return: True, False or Non
:rtype: boolean
"""
# You MUST define the algorithm_name
algorithm_name = 'significant_change_window_percent_sustained'
# If you wanted to log, you can but this should only be done during
# testing and development
def get_log(current_skyline_app):
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
return current_logger
# Define the default state of None and None, anomalous does not default to
# False as that is not correct, False is only correct if the algorithm
# determines the data point is not anomalous. The same is true for the
# anomalyScore.
anomalous = None
anomalyScore = None
# Use the algorithm_parameters to determine the sample_period
debug_logging = None
try:
debug_logging = algorithm_parameters['debug_logging']
except:
debug_logging = False
if debug_logging:
try:
current_logger = get_log(current_skyline_app)
current_logger.debug('debug :: %s :: debug_logging enabled with algorithm_parameters - %s' % (
algorithm_name, str(algorithm_parameters)))
except:
# This except pattern MUST be used in ALL custom algortihms to
# facilitate the traceback from any errors. The algorithm we want to
# run super fast and without spamming the log with lots of errors.
# But we do not want the function returning and not reporting
# anything to the log, so the pythonic except is used to "sample" any
# algorithm errors to a tmp file and report once per run rather than
# spewing tons of errors into the log e.g. analyzer.log
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback.format_exc())
# Return None and None as the algorithm could not determine True or False
return (anomalous, anomalyScore)
# window in seconds
window = 600
try:
window = algorithm_parameters['window']
except:
pass
resample_window = '%sS' % str(window)
# Allow the LevelShiftAD c parameter to be passed in the
# algorithm_parameters
percent = 10.0
try:
percent = algorithm_parameters['percent']
except:
pass
period = 3600
try:
period = algorithm_parameters['period']
except:
pass
return_percent_as_anomalyScore = False
try:
return_percent_as_anomalyScore = algorithm_parameters['return_percent_as_anomalyScore']
except:
pass
times_in_a_row = 0
try:
times_in_a_row = algorithm_parameters['times_in_a_row']
except:
pass
# To test max_execution_time enable a sleep
# sleep(1)
# ALWAYS WRAP YOUR ALGORITHM IN try and the BELOW except
try:
timestamp = int(timeseries[-1][0])
value = timeseries[-1][1]
values = []
if times_in_a_row:
values = [item[1] for item in timeseries[-times_in_a_row:]]
from_timestamp = timestamp - period
applicable_timeseries = [item for item in timeseries if int(item[0] >= from_timestamp)]
del timeseries
df = pd.DataFrame(applicable_timeseries, columns=['date', 'value'])
df['date'] = pd.to_datetime(df['date'], unit='s')
datetime_index = pd.DatetimeIndex(df['date'].values)
df = df.set_index(datetime_index)
df.drop('date', axis=1, inplace=True)
resampled_df = df.resample(resample_window, origin='epoch').median()
del df
resampled_values = resampled_df['value'].values.tolist()
series = | pd.Series([x for x in resampled_values]) | pandas.Series |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Latency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab as pl
import re
import os
from collections import namedtuple
from analysis_module import AnalysisModule
from devlib.utils.misc import memoized
from trappy.utils import listify
# Tuple representing all IDs data of a Task
TaskData = namedtuple('TaskData', ['pid', 'names', 'label'])
CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below'])
class LatencyAnalysis(AnalysisModule):
"""
Support for plotting Latency Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
def __init__(self, trace):
super(LatencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
@memoized
def _dfg_latency_df(self, task):
"""
DataFrame of task's wakeup/suspend events
The returned DataFrame index is the time, in seconds, an event related
to `task` happened.
The DataFrame has these columns:
- target_cpu: the CPU where the task has been scheduled
reported only for wakeup events
- curr_state: the current task state:
A letter which corresponds to the standard events reported by the
prev_state field of a sched_switch event.
Only exception is 'A', which is used to represent active tasks,
i.e. tasks RUNNING on a CPU
- next_state: the next status for the task
- t_start: the time when the current status started, it matches Time
- t_delta: the interval of time after witch the task will switch to the
next_state
:param task: the task to report wakeup latencies for
:type task: int or str
"""
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Events [sched_wakeup] not found, '
'cannot compute CPU active signal!')
return None
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Events [sched_switch] not found, '
'cannot compute CPU active signal!')
return None
# Get task data
td = self._getTaskData(task)
if not td:
return None
wk_df = self._dfg_trace_event('sched_wakeup')
sw_df = self._dfg_trace_event('sched_switch')
# Filter Task's WAKEUP events
task_wakeup = wk_df[wk_df.pid == td.pid][['target_cpu', 'pid']]
# Filter Task's START events
task_events = (sw_df.prev_pid == td.pid) | (sw_df.next_pid == td.pid)
task_switches_df = sw_df[task_events]\
[['__cpu', 'prev_pid', 'next_pid', 'prev_state']]
# Unset prev_state for switch_in events, i.e.
# we don't care about the status of a task we are replacing
task_switches_df.prev_state = task_switches_df.apply(
lambda r : np.nan if r['prev_pid'] != td.pid
else self._taskState(r['prev_state']),
axis=1)
# Rename prev_state
task_switches_df.rename(columns={'prev_state' : 'curr_state'}, inplace=True)
# Fill in Running status
# We've just set curr_state (a.k.a prev_state) to nan where td.pid was
# switching in, so set the state to 'A' ("active") in those places.
task_switches_df.curr_state = task_switches_df.curr_state.fillna(value='A')
# Join Wakeup and SchedSwitch events
task_latency_df = task_wakeup.join(task_switches_df, how='outer',
lsuffix='_wkp', rsuffix='_slp')
# Remove not required columns
task_latency_df = task_latency_df[['target_cpu', '__cpu', 'curr_state']]
# Set Wakeup state on each Wakeup event
task_latency_df.curr_state = task_latency_df.curr_state.fillna(value='W')
# Sanity check for all task states to be mapped to a char
numbers = 0
for value in task_switches_df.curr_state.unique():
if type(value) is not str:
self._log.warning('The [sched_switch] events contain "prev_state" value [%s]',
value)
numbers += 1
if numbers:
verb = 'is' if numbers == 1 else 'are'
self._log.warning(' which %s not currently mapped into a task state.',
verb)
self._log.warning('Check mappings in:')
self._log.warning(' %s::%s _taskState()',
__file__, self.__class__.__name__)
# Forward annotate task state
task_latency_df['next_state'] = task_latency_df.curr_state.shift(-1)
# Forward account for previous state duration
task_latency_df['t_start'] = task_latency_df.index
task_latency_df['t_delta'] = (
task_latency_df['t_start'].shift(-1)
- task_latency_df['t_start']
)
# Fix the last entry, which will have a NaN state duration
# Set duration to trace_end - last_event
task_latency_df.loc[task_latency_df.index[-1], 't_delta'] = (
self._trace.start_time +
self._trace.time_range -
task_latency_df.index[-1]
)
return task_latency_df
# Select Wakeup latency
def _dfg_latency_wakeup_df(self, task):
"""
DataFrame of task's wakeup latencies
The returned DataFrame index is the time, in seconds, `task` waken-up.
The DataFrame has just one column:
- wakeup_latency: the time the task waited before getting a CPU
:param task: the task to report wakeup latencies for
:type task: int or str
"""
task_latency_df = self._dfg_latency_df(task)
if task_latency_df is None:
return None
df = task_latency_df[
(task_latency_df.curr_state == 'W') &
(task_latency_df.next_state == 'A')][['t_delta']]
df.rename(columns={'t_delta' : 'wakeup_latency'}, inplace=True)
return df
# Select Wakeup latency
def _dfg_latency_preemption_df(self, task):
"""
DataFrame of task's preemption latencies
The returned DataFrame index is the time, in seconds, `task` has been
preempted.
The DataFrame has just one column:
- preemption_latency: the time the task waited before getting again a CPU
:param task: the task to report wakeup latencies for
:type task: int or str
"""
task_latency_df = self._dfg_latency_df(task)
if task_latency_df is None:
return None
df = task_latency_df[
(task_latency_df.curr_state.isin([0, 'R', 'R+'])) &
(task_latency_df.next_state == 'A')][['t_delta']]
df.rename(columns={'t_delta' : 'preempt_latency'}, inplace=True)
return df
@memoized
def _dfg_activations_df(self, task):
"""
DataFrame of task's wakeup intrvals
The returned DataFrame index is the time, in seconds, `task` has
waken-up.
The DataFrame has just one column:
- activation_interval: the time since the previous wakeup events
:param task: the task to report runtimes for
:type task: int or str
"""
# Select all wakeup events
wkp_df = self._dfg_latency_df(task)
wkp_df = wkp_df[wkp_df.curr_state == 'W'].copy()
# Compute delta between successive wakeup events
wkp_df['activation_interval'] = (
wkp_df['t_start'].shift(-1) - wkp_df['t_start'])
wkp_df['activation_interval'] = wkp_df['activation_interval'].shift(1)
# Return the activation period each time the task wakeups
wkp_df = wkp_df[['activation_interval']].shift(-1)
return wkp_df
@memoized
def _dfg_runtimes_df(self, task):
"""
DataFrame of task's runtime each time the task blocks
The returned DataFrame index is the time, in seconds, `task` completed
an activation (i.e. sleep or exit)
The DataFrame has just one column:
- running_time: the time the task spent RUNNING since its last wakeup
:param task: the task to report runtimes for
:type task: int or str
"""
# Select all wakeup events
run_df = self._dfg_latency_df(task)
# Filter function to add up RUNNING intervals of each activation
def cr(row):
if row['curr_state'] in ['S']:
return cr.runtime
if row['curr_state'] in ['W']:
if cr.spurious_wkp:
cr.runtime += row['t_delta']
cr.spurious_wkp = False
return cr.runtime
cr.runtime = 0
return cr.runtime
if row['curr_state'] != 'A':
return cr.runtime
if row['next_state'] in ['R', 'R+', 'S', 'x', 'D']:
cr.runtime += row['t_delta']
return cr.runtime
# This is required to capture strange trace sequences where
# a switch_in event is follower by a wakeup_event.
# This sequence is not expected, but we found it in some traces.
# Possible reasons could be:
# - misplaced sched_wakeup events
# - trace buffer artifacts
# TO BE BETTER investigated in kernel space.
# For the time being, we account this interval as RUNNING time,
# which is what kernelshark does.
if row['next_state'] in ['W']:
cr.runtime += row['t_delta']
cr.spurious_wkp = True
return cr.runtime
if row['next_state'] in ['n']:
return cr.runtime
self._log.warning("Unexpected next state: %s @ %f",
row['next_state'], row['t_start'])
return 0
# cr's static variables intialization
cr.runtime = 0
cr.spurious_wkp = False
# Add up RUNNING intervals of each activation
run_df['running_time'] = run_df.apply(cr, axis=1)
# Return RUNTIME computed for each activation,
# each time the task blocks or terminate
run_df = run_df[run_df.next_state.isin(['S', 'x'])][['running_time']]
return run_df
@memoized
def _dfg_task_residency(self, task):
"""
DataFrame of a task's execution time on each CPU
The returned DataFrame index is the CPU indexes
The DataFrame has just one column:
- runtime: the time the task spent being active on a given CPU,
in seconds.
:param task: the task to report runtimes for
:type task: int or str
"""
cpus = range(self._platform['cpus_count'])
runtimes = {cpu : 0.0 for cpu in cpus}
df = self._dfg_latency_df(task)
# Exclude sleep time
df = df[df.curr_state != 'S']
for time, data in df.iterrows():
cpu = data['__cpu']
# When waking up, '__cpu' is NaN but 'target_cpu' is populated instead
if np.isnan(cpu):
if data['curr_state'] == 'W':
cpu = data['target_cpu']
else:
raise RuntimeError('No CPU data for latency_df @{}'.format(time))
runtimes[cpu] += data['t_delta']
data = [(cpu, time) for cpu, time in runtimes.iteritems()]
return pd.DataFrame(data, columns=['CPU', 'runtime']).set_index('CPU')
###############################################################################
# Plotting Methods
###############################################################################
def plotLatency(self, task, kind='all', tag=None, threshold_ms=1, bins=64):
"""
Generate a set of plots to report the WAKEUP and PREEMPT latencies the
specified task has been subject to. A WAKEUP latencies is the time from
when a task becomes RUNNABLE till the first time it gets a CPU.
A PREEMPT latencies is the time from when a RUNNING task is suspended
because of the CPU is assigned to another task till when the task
enters the CPU again.
:param task: the task to report latencies for
:type task: int or list(str)
:param kind: the kind of latencies to report (WAKEUP and/or PREEMPT")
:type kind: str
:param tag: a string to add to the plot title
:type tag: str
:param threshold_ms: the minimum acceptable [ms] value to report
graphically in the generated plots
:type threshold_ms: int or float
:param bins: number of bins to be used for the runtime's histogram
:type bins: int
:returns: a DataFrame with statistics on ploted latencies
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task data
td = self._getTaskData(task)
if not td:
return None
# Load wakeup latencies (if required)
wkp_df = None
if 'all' in kind or 'wakeup' in kind:
wkp_df = self._dfg_latency_wakeup_df(td.pid)
if wkp_df is not None:
wkp_df.rename(columns={'wakeup_latency' : 'latency'}, inplace=True)
self._log.info('Found: %5d WAKEUP latencies', len(wkp_df))
# Load preempt latencies (if required)
prt_df = None
if 'all' in kind or 'preempt' in kind:
prt_df = self._dfg_latency_preemption_df(td.pid)
if prt_df is not None:
prt_df.rename(columns={'preempt_latency' : 'latency'}, inplace=True)
self._log.info('Found: %5d PREEMPT latencies', len(prt_df))
if wkp_df is None and prt_df is None:
self._log.warning('No Latency info for task [%s]', td.label)
return
# Join the two data frames
df = wkp_df.append(prt_df)
ymax = 1.1 * df.latency.max()
self._log.info('Total: %5d latency events', len(df))
# Build the series for the CDF
cdf = self._getCDF(df.latency, (threshold_ms / 1000.))
self._log.info('%.1f %% samples below %d [ms] threshold',
100. * cdf.below, threshold_ms)
# Setup plots
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1])
plt.figure(figsize=(16, 8))
plot_title = "[{}]: {} latencies".format(td.label, kind.upper())
if tag:
plot_title = "{} [{}]".format(plot_title, tag)
plot_title = "{}, threshold @ {} [ms]".format(plot_title, threshold_ms)
# Latency events duration over time
axes = plt.subplot(gs[0,0:2])
axes.set_title(plot_title)
try:
wkp_df.rename(columns={'latency': 'wakeup'}, inplace=True)
wkp_df.plot(style='b+', logy=True, ax=axes)
except: pass
try:
prt_df.rename(columns={'latency' : 'preempt'}, inplace=True)
prt_df.plot(style='r+', logy=True, ax=axes)
except: pass
axes.axhline(threshold_ms / 1000., linestyle='--', color='g')
self._trace.analysis.status.plotOverutilized(axes)
axes.legend(loc='lower center', ncol=2)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
# Cumulative distribution of latencies samples
axes = plt.subplot(gs[1,0])
cdf.df.plot(ax=axes, legend=False, xlim=(0,None),
title='Latencies CDF ({:.1f}% within {} [ms] threshold)'\
.format(100. * cdf.below, threshold_ms))
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
axes.axhline(y=cdf.below, linewidth=1, color='r', linestyle='--')
# Histogram of all latencies
axes = plt.subplot(gs[1,1])
df.latency.plot(kind='hist', bins=bins, ax=axes,
xlim=(0,ymax), legend=False,
title='Latency histogram ({} bins, {} [ms] green threshold)'\
.format(bins, threshold_ms));
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
# Save generated plots into datadir
task_name = re.sub('[\ :/]', '_', td.label)
figname = '{}/{}task_latencies_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
td.pid, task_name)
pl.savefig(figname, bbox_inches='tight')
# Return statistics
stats_df = df.describe(percentiles=[0.95, 0.99])
label = '{:.1f}%'.format(100. * cdf.below)
stats = { label : cdf.threshold }
return stats_df.append(pd.DataFrame(
stats.values(), columns=['latency'], index=stats.keys()))
def plotLatencyBands(self, task, axes=None):
"""
Draw a plot that shows intervals of time when the execution of a
RUNNABLE task has been delayed. The plot reports:
WAKEUP lantecies as RED colored bands
PREEMPTION lantecies as BLUE colored bands
The optional axes parameter allows to plot the signal on an existing
graph.
:param task: the task to report latencies for
:type task: str
:param axes: axes on which to plot the signal
:type axes: :mod:`matplotlib.axes.Axes`
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task PID
td = self._getTaskData(task)
if not td:
return None
wkl_df = self._dfg_latency_wakeup_df(td.pid)
prt_df = self._dfg_latency_preemption_df(td.pid)
if wkl_df is None and prt_df is None:
self._log.warning('No task with name [%s]', td.label)
return
# If not axis provided: generate a standalone plot
if not axes:
gs = gridspec.GridSpec(1, 1)
plt.figure(figsize=(16, 2))
axes = plt.subplot(gs[0, 0])
axes.set_title('Latencies on [{}] '
'(red: WAKEUP, blue: PREEMPT)'\
.format(td.label))
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_yticklabels([])
axes.set_xlabel('Time [s]')
axes.grid(True)
# Draw WAKEUP latencies
try:
bands = [(t, wkl_df['wakeup_latency'][t]) for t in wkl_df.index]
for (start, duration) in bands:
end = start + duration
axes.axvspan(start, end, facecolor='r', alpha=0.1)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
except: pass
# Draw PREEMPTION latencies
try:
bands = [(t, prt_df['preempt_latency'][t]) for t in prt_df.index]
for (start, duration) in bands:
end = start + duration
axes.axvspan(start, end, facecolor='b', alpha=0.1)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
except: pass
def plotActivations(self, task, tag=None, threshold_ms=16, bins=64):
"""
Plots "activation intervals" for the specified task
An "activation interval" is time incurring between two consecutive
wakeups of a task. A set of plots is generated to report:
- Activations interval at wakeup time: every time a task wakeups a
point is plotted to represent the time interval since the previous
wakeup.
- Activations interval cumulative function: reports the cumulative
function of the activation intervals.
- Activations intervals histogram: reports a 64 bins histogram of
the activation intervals.
All plots are parameterized based on the value of threshold_ms, which
can be used to filter activations intervals bigger than 2 times this
value.
Such a threshold is useful to filter out from the plots outliers thus
focusing the analysis in the most critical periodicity under analysis.
The number and percentage of discarded samples is reported in output.
A default threshold of 16 [ms] is used, which is useful for example
to analyze a 60Hz rendering pipelines.
A PNG of the generated plots is generated and saved in the same folder
where the trace is.
:param task: the task to report latencies for
:type task: int or list(str)
:param tag: a string to add to the plot title
:type tag: str
:param threshold_ms: the minimum acceptable [ms] value to report
graphically in the generated plots
:type threshold_ms: int or float
:param bins: number of bins to be used for the runtime's histogram
:type bins: int
:returns: a DataFrame with statistics on ploted activation intervals
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task data
td = self._getTaskData(task)
if not td:
return None
# Load activation data
wkp_df = self._dfg_activations_df(td.pid)
if wkp_df is None:
return None
self._log.info('Found: %5d activations for [%s]',
len(wkp_df), td.label)
# Disregard data above two time the specified threshold
y_max = (2 * threshold_ms) / 1000.
len_tot = len(wkp_df)
wkp_df = wkp_df[wkp_df.activation_interval <= y_max]
len_plt = len(wkp_df)
if len_plt < len_tot:
len_dif = len_tot - len_plt
len_pct = 100. * len_dif / len_tot
self._log.warning('Discarding {} activation intervals (above 2 x threshold_ms, '
'{:.1f}% of the overall activations)'\
.format(len_dif, len_pct))
ymax = 1.1 * wkp_df.activation_interval.max()
# Build the series for the CDF
cdf = self._getCDF(wkp_df.activation_interval, (threshold_ms / 1000.))
self._log.info('%.1f %% samples below %d [ms] threshold',
100. * cdf.below, threshold_ms)
# Setup plots
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1])
plt.figure(figsize=(16, 8))
plot_title = "[{}]: activaton intervals (@ wakeup time)".format(td.label)
if tag:
plot_title = "{} [{}]".format(plot_title, tag)
plot_title = "{}, threshold @ {} [ms]".format(plot_title, threshold_ms)
# Activations intervals over time
axes = plt.subplot(gs[0,0:2])
axes.set_title(plot_title)
wkp_df.plot(style='g+', logy=False, ax=axes)
axes.axhline(threshold_ms / 1000., linestyle='--', color='g')
self._trace.analysis.status.plotOverutilized(axes)
axes.legend(loc='lower center', ncol=2)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
# Cumulative distribution of all activations intervals
axes = plt.subplot(gs[1,0])
cdf.df.plot(ax=axes, legend=False, xlim=(0,None),
title='Activations CDF ({:.1f}% within {} [ms] threshold)'\
.format(100. * cdf.below, threshold_ms))
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
axes.axhline(y=cdf.below, linewidth=1, color='r', linestyle='--')
# Histogram of all activations intervals
axes = plt.subplot(gs[1,1])
wkp_df.plot(kind='hist', bins=bins, ax=axes,
xlim=(0,ymax), legend=False,
title='Activation intervals histogram ({} bins, {} [ms] green threshold)'\
.format(bins, threshold_ms));
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
# Save generated plots into datadir
task_name = re.sub('[\ :/]', '_', td.label)
figname = '{}/{}task_activations_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
td.pid, task_name)
pl.savefig(figname, bbox_inches='tight')
# Return statistics
stats_df = wkp_df.describe(percentiles=[0.95, 0.99])
label = '{:.1f}%'.format(100. * cdf.below)
stats = { label : cdf.threshold }
return stats_df.append(pd.DataFrame(
stats.values(), columns=['activation_interval'], index=stats.keys()))
def plotRuntimes(self, task, tag=None, threshold_ms=8, bins=64):
"""
Plots "running times" for the specified task
A "running time" is the sum of all the time intervals a task executed
in between a wakeup and the next sleep (or exit).
A set of plots is generated to report:
- Running times at block time: every time a task blocks a
point is plotted to represent the cumulative time the task has be
running since its last wakeup
- Running time cumulative function: reports the cumulative
function of the running times.
- Running times histogram: reports a 64 bins histogram of
the running times.
All plots are parameterized based on the value of threshold_ms, which
can be used to filter running times bigger than 2 times this value.
Such a threshold is useful to filter out from the plots outliers thus
focusing the analysis in the most critical periodicity under analysis.
The number and percentage of discarded samples is reported in output.
A default threshold of 16 [ms] is used, which is useful for example to
analyze a 60Hz rendering pipelines.
A PNG of the generated plots is generated and saved in the same folder
where the trace is.
:param task: the task to report latencies for
:type task: int or list(str)
:param tag: a string to add to the plot title
:type tag: str
:param threshold_ms: the minimum acceptable [ms] value to report
graphically in the generated plots
:type threshold_ms: int or float
:param bins: number of bins to be used for the runtime's histogram
:type bins: int
:returns: a DataFrame with statistics on ploted running times
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task data
td = self._getTaskData(task)
if not td:
return None
# Load runtime data
run_df = self._dfg_runtimes_df(td.pid)
if run_df is None:
return None
self._log.info('Found: %5d activations for [%s]',
len(run_df), td.label)
# Disregard data above two time the specified threshold
y_max = (2 * threshold_ms) / 1000.
len_tot = len(run_df)
run_df = run_df[run_df.running_time <= y_max]
len_plt = len(run_df)
if len_plt < len_tot:
len_dif = len_tot - len_plt
len_pct = 100. * len_dif / len_tot
self._log.warning('Discarding {} running times (above 2 x threshold_ms, '
'{:.1f}% of the overall activations)'\
.format(len_dif, len_pct))
ymax = 1.1 * run_df.running_time.max()
# Build the series for the CDF
cdf = self._getCDF(run_df.running_time, (threshold_ms / 1000.))
self._log.info('%.1f %% samples below %d [ms] threshold',
100. * cdf.below, threshold_ms)
# Setup plots
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1])
plt.figure(figsize=(16, 8))
plot_title = "[{}]: running times (@ block time)".format(td.label)
if tag:
plot_title = "{} [{}]".format(plot_title, tag)
plot_title = "{}, threshold @ {} [ms]".format(plot_title, threshold_ms)
# Running time over time
axes = plt.subplot(gs[0,0:2])
axes.set_title(plot_title)
run_df.plot(style='g+', logy=False, ax=axes)
axes.axhline(threshold_ms / 1000., linestyle='--', color='g')
self._trace.analysis.status.plotOverutilized(axes)
axes.legend(loc='lower center', ncol=2)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
# Cumulative distribution of all running times
axes = plt.subplot(gs[1,0])
cdf.df.plot(ax=axes, legend=False, xlim=(0,None),
title='Runtime CDF ({:.1f}% within {} [ms] threshold)'\
.format(100. * cdf.below, threshold_ms))
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
axes.axhline(y=cdf.below, linewidth=1, color='r', linestyle='--')
# Histogram of all running times
axes = plt.subplot(gs[1,1])
run_df.plot(kind='hist', bins=bins, ax=axes,
xlim=(0,ymax), legend=False,
title='Latency histogram ({} bins, {} [ms] green threshold)'\
.format(bins, threshold_ms));
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
# Save generated plots into datadir
task_name = re.sub('[\ :/]', '_', td.label)
figname = '{}/{}task_runtimes_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
td.pid, task_name)
pl.savefig(figname, bbox_inches='tight')
# Return statistics
stats_df = run_df.describe(percentiles=[0.95, 0.99])
label = '{:.1f}%'.format(100. * cdf.below)
stats = { label : cdf.threshold }
return stats_df.append(pd.DataFrame(
stats.values(), columns=['running_time'], index=stats.keys()))
def plotTaskResidency(self, task):
"""
Plot CPU residency of the specified task
This will show an overview of how much time that task spent being
active on each available CPU, in seconds.
:param task: the task to report runtimes for
:type task: int or str
"""
df = self._dfg_task_residency(task)
ax = df.plot(kind='bar', figsize=(16, 6))
ax.set_title('CPU residency of task {}'.format(task))
figname = os.path.join(
self._trace.plots_dir,
'{}task_cpu_residency_{}.png'.format(
self._trace.plots_prefix, task
)
)
pl.savefig(figname, bbox_inches='tight')
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getTaskData(self, task):
# Get task PID
if isinstance(task, str):
task_pids = self._trace.getTaskByName(task)
if len(task_pids) == 0:
self._log.warning('No tasks found with name [%s]', task)
return None
task_pid = task_pids[0]
if len(task_pids) > 1:
self._log.warning('Multiple PIDs for task named [%s]', task)
for pid in task_pids:
self._log.warning(' %5d : %s', pid,
','.join(self._trace.getTaskByPid(pid)))
self._log.warning('Returning stats only for PID: %d',
task_pid)
task_name = self._trace.getTaskByPid(task_pid)
# Get task name
elif isinstance(task, int):
task_pid = task
task_name = self._trace.getTaskByPid(task_pid)
if task_name is None:
self._log.warning('No tasks found with name [%s]', task)
return None
else:
raise ValueError("Task must be either an int or str")
task_label = "{}: {}".format(task_pid, task_name)
return TaskData(task_pid, task_name, task_label)
@memoized
def _taskState(self, state):
try:
state = int(state)
except ValueError:
# State already converted to symbol
return state
# Tasks STATE flags (Linux 3.18)
TASK_STATES = {
0: "R", # TASK_RUNNING
1: "S", # TASK_INTERRUPTIBLE
2: "D", # TASK_UNINTERRUPTIBLE
4: "T", # __TASK_STOPPED
8: "t", # __TASK_TRACED
16: "X", # EXIT_DEAD
32: "Z", # EXIT_ZOMBIE
64: "x", # TASK_DEAD
128: "K", # TASK_WAKEKILL
256: "W", # TASK_WAKING
512: "P", # TASK_PARKED
1024: "N", # TASK_NOLOAD
}
kver = self._trace.platform['kernel']['parts']
if kver is None:
kver = (3, 18)
self._log.info('Parsing sched_switch states assuming kernel v%d.%d',
kver[0], kver[1])
if kver >= (4, 8):
TASK_STATES[2048] = "n" # TASK_NEW
TASK_MAX_STATE = 2 * max(TASK_STATES)
res = "R"
if state & (TASK_MAX_STATE - 1) != 0:
res = ""
for key in TASK_STATES.keys():
if key & state:
res += TASK_STATES[key]
if state & TASK_MAX_STATE:
res += "+"
else:
res = '|'.join(res)
return res
def _getCDF(self, data, threshold):
"""
Build the "Cumulative Distribution Function" (CDF) for the given data
"""
# Build the series of sorted values
ser = data.sort_values()
if len(ser) < 1000:
# Append again the last (and largest) value.
# This step is important especially for small sample sizes
# in order to get an unbiased CDF
ser = ser.append( | pd.Series(ser.iloc[-1]) | pandas.Series |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
from sklearn import metrics
import sys
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
# support class to redirect stderr
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self,name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# Stderr
oldstderr = sys.stderr # global
def capture_stderr(log):
oldstderr = sys.stderr
sys.stderr = open(log, 'w')
sys.stderr = flushfile(sys.stderr)
return log
def restore_stderr():
sys.stderr = oldstderr
def parse_xgblog(xgblog):
import re
pattern = re.compile(r'^\[(?P<round>\d+)\]\s*\D+:(?P<validation>\d+.\d+)\s*\D+:(?P<train>\d+.\d+)')
xgb_list = []
with open(xgblog, "r") as ins:
next(ins)
for line in ins:
match = pattern.match(line)
if match:
idx = int(match.group("round"))
validation = float(match.group("validation"))
training = float(match.group("train"))
xgb_list.append([idx, validation, training])
else:
pass # raise Exception("Failed to parse!")
return xgb_list
def preprocess_data(train,test):
id_test=test['patient_id']
train=train.drop(['patient_id'],axis=1)
test=test.drop(['patient_id'],axis=1)
y=train['is_screener']
train=train.drop(['is_screener'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir(os.getcwd())
train_file = '../input/patients_train.csv.gz'
test_file = '../input/patients_test.csv.gz'
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
train.drop( 'patient_gender', axis = 1, inplace = True )
test.drop( 'patient_gender', axis = 1, inplace = True )
########## last asctivity files
activity_file=('../input/activity_selected_last.csv.gz')
diagnosis_file=('../input/diagnosis_selected_last.csv.gz')
procedure_file=('../input/procedure_selected_last.csv.gz')
surgical_file=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
physicians_file=('../input/physicians.csv.gz')
drugs_file=('../input/drugs.csv.gz')
############ first activity files
activity_file_first=('../input/activity_selected_last.csv.gz')
diagnosis_file_first=('../input/diagnosis_selected_last.csv.gz')
procedure_file_first=('../input/procedure_selected_last.csv.gz')
surgical_file_first=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
activity=pd.read_csv(activity_file )
#Fa=pd.read_csv(activity_file_first,usecols=['activity_year'])
#print(Fa)
#activity['activity_first_year']=Fa['activity_year']
#activity['delta_time_activity']=activity['activity_year']-activity['activity_first_year']
#print(activity[activity['delta_time_activity']!=0,'delta_time_activity'])
train=pd.merge(train,activity, on='patient_id',how='left')
test=pd.merge(test,activity, on='patient_id',how='left')
print('after merging activity')
print(train.shape,test.shape)
procedure=pd.read_csv(procedure_file )
diagnosis=pd.read_csv(diagnosis_file)
diagnosis=pd.merge(diagnosis,procedure,on=['patient_id','claim_id'],how='left')
train=pd.merge(train,diagnosis, on='patient_id',how='left')
test=pd.merge(test,diagnosis, on='patient_id',how='left')
print('after merging diagnosis ')
print(train.shape,test.shape)
prescription=pd.read_csv(prescription_file)
drugs=pd.read_csv(drugs_file)
physicians=pd.read_csv(physicians_file)
prescription=pd.merge(prescription,drugs ,on='drug_id',how='left')
prescription=pd.merge(prescription,physicians,on='practitioner_id',how='left')
train=pd.merge(train,prescription,on='patient_id',how='left')
test= | pd.merge(test,prescription,on='patient_id',how='left') | pandas.merge |
import pandas as pd
def prep_events(events: pd.DataFrame, sort_by_date: bool = True):
events_p = events.copy()
events_p['asPitcher'] = 1
events_p['playerId'] = events_p['pitcherId']
events_p['teamId'] = events_p['pitcherTeamId']
events_h = events.copy()
events_h['asPitcher'] = 0
events_h['playerId'] = events_p['hitterId']
events_h['teamId'] = events_p['hitterTeamId']
if sort_by_date:
events_stacked = pd.concat([events_p, events_h]).sort_values(
by=['dailyDataDate', 'gamePk', 'inning']).reset_index(
drop=True)
events_stacked['dailyDataDate'] = pd.to_datetime(events_stacked['dailyDataDate'], format='%Y%m%d')
else:
events_stacked = | pd.concat([events_p, events_h]) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import warnings
from functools import reduce
import numpy as np
import pandas as pd
from pandas.io import pytables
from .measures.transformation import time_interpolate as time_interpolate_
from .measures.transformation import transformations_matrix
from ..utils import print_progress
import logging
log = logging.getLogger(__name__)
__all__ = []
class Trajectories(pd.DataFrame):
"""
This class is a subclass of the class :class:`pandas.DataFrame`.
It is mainly here to provide utility attributes and syntactic shugar.
Attributes
----------
t_stamps : ndarray
unique values of the `t_stamps` index of `self.trajs`
labels : ndarray
unique values of the `labels` index of `self.trajs`
iter_segments : iterator
yields a `(label, segment)` pair where `label` is iterated over `self.labels`
and `segment` is a chunk of `self.trajs`
segment_idxs : dictionnary
Keys are the segent label and values are a list
of `(t_stamp, label)` tuples for each time point of the segment
Parameters
----------
trajs : :class:`pandas.DataFrame`
Examples
--------
>>> from sktracker import data
>>> from sktracker.trajectories import Trajectories
>>>
>>> trajs = data.with_gaps_df()
>>> trajs = Trajectories(trajs)
>>>
>>> # One of the available method can display trajectories with matplotlib.
>>> trajs.show(xaxis='t', yaxis='x')
<matplotlib.axes.AxesSubplot at 0x7f027ecc2cf8>
"""
def __init__(self, *args, **kwargs):
"""
"""
super(self.__class__, self).__init__(*args, **kwargs)
@classmethod
def empty_trajs(cls, columns=['x', 'y', 'z']):
empty_index = pd.MultiIndex.from_arrays(np.empty((2, 0)),
names=['t_stamp', 'label'])
empty_trajs = pd.DataFrame(np.empty((0, len(columns))),
index=empty_index,
columns=columns)
return cls(empty_trajs)
def check_trajs_df_structure(self, index=None, columns=None):
"""Check wether trajectories contains a specified structure.
Parameters
----------
index : list
Index names (order is important)
columns : list
Column names (order does not matter here)
Raises
------
ValueError in both case
"""
error_mess = "Trajectories does not contain correct indexes : {}"
if index and self.index.names != index:
raise ValueError(error_mess.format(index))
error_mess = "Trajectories does not contain correct columns : {}"
if columns:
columns = set(columns)
if not columns.issubset(set(self.columns)):
raise ValueError(error_mess.format(columns))
# Trajs getter methods
@property
def t_stamps(self):
return self.index.get_level_values('t_stamp').unique()
@property
def labels(self):
if 'label' in self.columns:
return self['label'].unique()
else:
return self.index.get_level_values('label').unique()
@property
def segment_idxs(self):
return self.groupby(level='label').groups
@property
def iter_segments(self):
for lbl, idxs in self.segment_idxs.items():
yield lbl, self.loc[idxs]
def get_bounds(self, column=None):
"""Get bounds of all segments.
Parameters
----------
column : string
By default the method will return bounds as 't_stamp'. If you want another value from
column ('t' for example), you can put the column's name here.
asarray : bool
Return bounds as dict of as array wether it's True
Returns
-------
bounds as dict or ndarray
"""
all_segs = self.segment_idxs.items()
if column:
bounds = {k: (self.loc[v[0], column], self.loc[v[-1], column]) for k, v in all_segs}
else:
bounds = {k: (v[0][0], v[-1][0]) for k, v in all_segs}
return bounds
def get_segments(self):
"""A segment contains all the data from `self.trajs` with
Returns
-------
A dict with labels as keys and segments as values
"""
return {key: segment for key, segment
in self.iter_segments}
def get_longest_segments(self, n):
"""Get the n th longest segments label indexes.
Parameters
----------
n : int
"""
idxs = self.segment_idxs
return list(dict(sorted(idxs.items(), key=lambda x: len(x[1]))[-n:]).keys())
def get_shortest_segments(self, n):
"""Get the n th shortest segments label indexes.
Parameters
----------
n : int
"""
idxs = self.segment_idxs
return list(dict(sorted(idxs.items(), key=lambda x: len(x[1]))[:n]).keys())
def copy(self):
"""
"""
trajs = super(self.__class__, self).copy()
return Trajectories(trajs)
def get_colors(self, cmap="hsv", alpha=None, rgba=False):
'''Get color for each label.
Parameters
----------
cmap : string
See http://matplotlib.org/examples/color/colormaps_reference.html for a list of
available colormap.
alpha : float
Between 0 and 1 to add transparency on color.
rgba : bool
If True return RGBA tuple for each color. If False return HTML color code.
Returns
-------
dict of `label : color` pairs for each segment.
'''
import matplotlib.pyplot as plt
cmap = plt.cm.get_cmap(name=cmap)
n = len(self.labels)
ite = zip(np.linspace(0, 0.9, n), self.labels)
colors = {label: cmap(i, alpha=alpha) for i, label in ite}
if not rgba:
def get_hex(rgba):
rgba = np.round(np.array(rgba) * 255).astype('int')
if not alpha:
rgba = rgba[:3]
return "#" + "".join(['{:02X}'.format(a) for a in rgba])
colors = {label: get_hex(color) for label, color in colors.items()}
return colors
def get_t_stamps_correspondences(self, data_values, column):
"""For a given column return 't_stamp' values corresponding to 1d vector of this column.
Parameters
----------
data_values : 1d np.ndarray
Data found in self[column]
column : str
Which column to use.
Returns
-------
np.ndarray with the same length as 'data_values'.
"""
t_stamps = self.t_stamps
values = self[column].unique()
index = np.argsort(values)
values_sorted = values[index]
sorted_index = np.searchsorted(values_sorted, data_values)
yindex = np.take(index, sorted_index)
return t_stamps[yindex]
# Segment / spot modification methods
def remove_spots(self, spots, inplace=False):
"""Remove spots identified by (t_stamp, label).
Parameters
----------
spots : tuple or list of tuple
Each tuple must contain (t_stamp, label) to remove.
Returns
-------
Copy of modified trajectories or None wether inplace is True.
"""
return Trajectories(self.drop(spots, inplace=inplace))
def remove_segments(self, segments_idx, inplace=False):
"""Remove segments from trajectories.
Parameters
----------
segments_idx : list
List of label to remove
"""
return Trajectories(self.drop(segments_idx, level='label', inplace=inplace))
def merge_segments(self, labels, inplace=False):
"""Merge segments from a list of labels. If spots have the same t_stamp, only the first spot
for the t_stamp is keept (we may want to reconsider that behaviour later).
Parameters
----------
labels : list
Labels to merge.
Returns
-------
Copy of modified trajectories or None wether inplace is True.
"""
if inplace:
trajs = self
else:
trajs = self.copy()
new_label = labels[0]
trajs.sortlevel(inplace=True)
trajs.loc[:, 'new_label'] = trajs.index.get_level_values('label').values
idx = pd.IndexSlice
for label in labels[1:]:
trajs.loc[idx[:, label], 'new_label'] = new_label
trajs.reset_index('label', inplace=True)
trajs.drop('label', axis=1, inplace=True)
trajs.rename(columns={'new_label': 'label'}, inplace=True)
trajs.set_index('label', append=True, inplace=True)
trajs.sortlevel(inplace=True)
# Remove duplicate spots from the same t_stamp
gps = trajs.groupby(level=['t_stamp', 'label'])
trajs = Trajectories(gps.apply(lambda x: x.iloc[0]))
if inplace:
return None
else:
return trajs
def cut_segments(self, spot, inplace=False):
"""Cut segment. All spots with same label as `spot` and with `t_stamp` greater than
`spot` will have a new label.
Parameters
----------
spot : tuple
Must contain (t_stamp, label)
Returns
-------
Copy of modified trajectories or None wether inplace is True.
"""
if inplace:
trajs = self
else:
trajs = self.copy()
trajs.sortlevel(inplace=True)
t_stamp, label = spot
new_label = trajs.index.get_level_values('label').max() + 1
trajs.loc[:, 'new_label'] = trajs.index.get_level_values('label').values
idxs = (trajs.index.get_level_values('t_stamp') > t_stamp) & (trajs.index.get_level_values('label') == label)
trajs.loc[idxs, 'new_label'] = new_label
trajs.reset_index('label', inplace=True)
trajs.drop('label', axis=1, inplace=True)
trajs.rename(columns={'new_label': 'label'}, inplace=True)
trajs.set_index('label', append=True, inplace=True)
trajs.sortlevel(inplace=True)
if inplace:
return None
else:
return trajs
def duplicate_segments(self, label):
"""Duplicate segment.
Parameters
----------
label : int
Label index.
Returns
-------
Copy of modified :class:`sktracker.trajectories.Trajectories` or None wether inplace is
True.
"""
trajs = self.copy()
new_label = trajs.labels.max() + 1
index_names = trajs.index.names
trajs.reset_index(inplace=True)
new_segment = trajs[trajs['label'] == label].copy()
new_segment.loc[:, 'label'] = new_label
trajs = Trajectories( | pd.concat([trajs, new_segment]) | pandas.concat |
"""
Cleaning and transforming the single pandas dataframe
"""
import os
import numpy as np
import pandas as pd
from scipy import stats
def clean_and_wrangle(_df, station_info_path=os.path.join('references', 'nyc_subway_station_info.csv')):
# Create datetime columns
_df['DATETIME'] = pd.to_datetime(_df['DATE'] + ' ' + _df['TIME']) # Create Datetime variable
_df['DATE'] = | pd.to_datetime(_df['DATE'], infer_datetime_format=True) | pandas.to_datetime |
# ============================================================================ #
# IMPORTS #
# ============================================================================ #
import numpy as np
import time
import tqdm
import cv2
import pandas as pd
import os
from collections import defaultdict
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# ============================================================================ #
# TOOLS #
# ============================================================================ #
def display_imgs(imgs):
columns = 3
rows = 3
img_nums = np.random.choice(len(imgs), columns * rows)
img_data = imgs[img_nums]
fig = plt.figure(figsize=(columns * 5, rows * 4))
for i in range(rows):
for j in range(columns):
idx = i + j * columns
fig.add_subplot(rows, columns, idx + 1)
plt.axis('off')
# img = img_data[idx].astype(np.float32)
img = img_data[idx]
plt.imshow(img)
plt.tight_layout()
plt.show()
def plot_history(history):
plt.figure(figsize=(8, 10))
ax_loss = plt.subplot(311)
plt.plot(history.history['loss'])
plt.plot(history.history['regression_loss'])
plt.plot(history.history['classification_loss'])
ax_loss.set_yscale('log')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['loss', 'regression_loss', 'classification_loss'])
plt.subplot(312)
plt.plot(history.history['mAP'])
plt.title('mAP')
plt.xlabel('epoch')
ax_lr = plt.subplot(313)
plt.plot(history.history['lr'])
plt.title('learning rate')
plt.xlabel('epoch')
ax_lr.set_yscale('log')
plt.subplots_adjust(hspace=0.5)
plt.show()
# ============================================================================ #
# LOAD DATASETS #
# ============================================================================ #
# ------------------------------ helper functions ------------------------------
def load_images(paths, N_max=1000, alpha_chnl=False):
'''
Purpose: load images from paths.
for drones use alpha channel
'''
if len(paths) > N_max:
paths = paths[0: N_max]
# imgs = np.zeros((len(paths), *img_shape, 3), dtype=np.uint8)
imgs = []
for num, img_path in enumerate(paths):
if alpha_chnl:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
imgs.append(cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA))
else:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
imgs.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return np.array(imgs)
def read_file_paths(file_name):
'''
Purpose: Read file paths from data frame
'''
data = | pd.read_csv(file_name) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
# function for loading data from disk
def load_data():
"""
this function is responsible for loading traing data from disk.
and performs some basic opertaions like
- one-hot encoding
- feature scaling
- reshaping data
Parameters:
(no-parameters)
Returns:
X : numpy array (contains all features of training data)
y : numpy array (contains all targets of traing data)
"""
path = "../data/train.csv"
if(not Path(path).is_file()):
print("[util]: train data not found at '",path,"'")
#quit()
print("[util]: Loading '",path,"'")
train = pd.read_csv(path)
y = np.array(pd.get_dummies(train['label']))
X = train.drop(['label'], axis=1)
X = np.array(X/255)
X = X.reshape(X.shape + (1,))
y = y.reshape(y.shape + (1,))
del train
return X, y
# sigmoid activation function with derivative
def sigmoid(x, derivative=False):
if(derivative):
return sigmoid(x) * (1 - sigmoid(x))
return 1.0/(1.0 + np.exp(-x))
# relu activation function with derivative
def relu(x, derivative=False):
if(derivative):
return x > 0
return np.maximum(x, 0)
# softmax activation function
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
# function for viewing digit from numpy array
def view_digit(x, title):
"""
function for viewing one sample
Parameters:
x : numpy array (contains one sample of all features)
title: string (a predicted digit)
Returns:
(no-returns)
"""
plt.matshow(x.reshape(28,28))
plt.suptitle(" predicted as "+title)
plt.show()
# function for shuffling the features and labels
def shuffle(X, y):
"""
function for shuffleing both features and targets.
Parameters:
X : numpy array (contains all features)
y : numpy array (contains all targets)
Returns:
(no-returns)
"""
n = np.random.randint(1, 100)
np.random.seed(n)
np.random.shuffle(X)
np.random.seed(n)
np.random.shuffle(y)
# custom function for loading kaggle test data
def load_test_data():
"""
this function is responsible for loading test data from disk.
Parameters:
(no-parameters)
Returns:
kt : numpy array (contains all features of test data)
"""
path = "../data/test.csv"
if(not Path(path).is_file()):
print("[util]: test data not found at '",path,"'")
#quit()
print("[util]: Loading test data from: '",path,"'")
test = | pd.read_csv(path) | pandas.read_csv |
import torch
import json
from foobar.model.lstm import LSTM
from foobar.model.model_loader import download_model
from foobar.db_utils.cassandra_utils import query_table
from foobar.prediction.predictor import prediction
import os
import pandas as pd
import numpy as np
import boto3
class FinnHubPredictor:
def __init__(self,
gamestop_table : str,
bucket_name : str = None,
bucket=None) :
self.MODEL_FILE = "m1.pth"
self.LOCAL_FILE = self.MODEL_FILE
self.TIMESTAMP_COLUMN = "hour"
self.GAMESTOP_TABLE = gamestop_table
self.BUCKET_NAME = bucket_name
if bucket is None:
session = boto3.Session(
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY'],
region_name=os.environ['REGION_NAME'])
s3 = session.resource('s3')
self.bucket = s3.Bucket(bucket_name)
else:
self.bucket = bucket
def predictNewData(self, newdata_df=None):
# if historicaldata_ is None:
# historicaldata_ = query_table(self.GAMESTOP_TABLE)
print("queried gamestop table")
if newdata_df is None:
print("Cant continue without data")
return None
data_ = newdata_df
newpredictionids = data_[data_['prediction_finn'] == -1]
predictions = self.producePredictions(data_)
if predictions is None : return None
newpredictions = | pd.merge(predictions, newpredictionids[['hour']], on='hour') | pandas.merge |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
| pd.Timedelta("2 days") | pandas.Timedelta |
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
def generate_model_df(Ion, CrystalSystem, Spacegroup, Charge, Discharge):
"""Generates dataframe to input into the neural network model. The dataframe must contain
the working ion, crystal system, spacegroup number, charge formula, and discharge formula"""
import pandas as pd
bat_dataframe = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .rice_functions import RiceFunctions
class RiceInputs(ModelSharedInputs):
"""
Input class for Rice.
"""
def __init__(self):
"""Class representing the inputs for Rice"""
super(RiceInputs, self).__init__()
self.mai = pd.Series([], dtype="float")
self.dsed = pd.Series([], dtype="float")
self.area = pd.Series([], dtype="float")
self.pb = pd.Series([], dtype="float")
self.dw = pd.Series([], dtype="float")
self.osed = pd.Series([], dtype="float")
self.kd = pd.Series([], dtype="float")
class RiceOutputs(object):
"""
Output class for Rice.
"""
def __init__(self):
"""Class representing the outputs for Rice"""
super(RiceOutputs, self).__init__()
self.out_msed = pd.Series(name="out_msed", dtype="object")
self.out_vw = pd.Series(name="out_vw", dtype="object")
self.out_mass_area = pd.Series(name="out_mass_area", dtype="object")
self.out_cw = | pd.Series(name="out_cw", dtype="object") | pandas.Series |
import os
import sys
import gc
import numpy as np
import pandas as pd
import math
import scipy.stats as scst
import scipy as sp
import scipy.linalg as la
from bgen_reader import read_bgen
import qtl_loader_utils
import pdb
from glimix_core.lmm import LMM
def run_QTL_analysis_load_intersect_phenotype_covariates_kinship_sample_mapping(pheno_filename, anno_filename, geno_prefix,
plinkGenotype, minimum_test_samples= 10, relatedness_score=None, cis_mode=True, skipAutosomeFiltering = False, snps_filename=None,
feature_filename=None, snp_feature_filename=None, selection='all', covariates_filename=None, randomeff_filename=None,
sample_mapping_filename=None, extended_anno_filename=None, feature_variant_covariate_filename=None):
# pheno_filename = "/Users/chaaya/dhonveli_dkfz/hipsci_pipeline/geuvadis_CEU_test_data/Expression/Geuvadis_CEU_YRI_Expr.txt.gz"
# anno_filename = "/Users/chaaya/dhonveli_dkfz/hipsci_pipeline/geuvadis_CEU_test_data/Expression/Geuvadis_CEU_Annot.txt"
# geno_prefix = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis"
# plinkGenotype = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis"
# minimum_test_samples = 10
# relatedness_score = 0.95
# cis_mode = True
# skipAutosomeFiltering = False
# snps_filename = None
# feature_filename = None
# snp_feature_filename = None
# selection = 'all'
# covariates_filename = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Expression/Geuvadis_CEU_YRI_covariates.txt"
# randomeff_filename = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis_chr1_kinship.normalized.txt,/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis_readdepth.txt"
# sample_mapping_filename = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Geuvadis_CEU_gte.txt"
# extended_anno_filename = None
# feature_variant_covariate_filename = None
# output_dir = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Output2/"
# window_size = 250000
# min_maf = 0.05
# min_hwe_P = 0.001
# min_call_rate = 0.95
# blocksize = 1000
# gaussianize_method = None
# genetic_range = "all"
# seed = np.random.randint(40000)
# n_perm = 0
# write_permutations = False
# regressCovariatesUpfront = False
# write_feature_top_permutations = False
# selection based on coordinates
selectionStart = None
selectionEnd = None
if(":" in selection):
parts = selection.split(":")
if("-" not in parts[1]):
print("No correct sub selection.")
print("Given in: "+selection)
print("Expected format: (chr number):(start location)-(stop location)")
sys.exit()
chromosome = parts[0]
if("-" in parts[1]):
parts2 = parts[1].split("-")
selectionStart = int(parts2[0])
selectionEnd = int(parts2[1])
else :
chromosome=selection
''' function to take input and intersect sample and genotype.'''
#Load input data files & filter for relevant data
#Load input data filesf
# loading phenotype and annotation files
phenotype_df = qtl_loader_utils.get_phenotype_df(pheno_filename)
annotation_df = qtl_loader_utils.get_annotation_df(anno_filename)
phenotype_df.columns = phenotype_df.columns.astype("str")
phenotype_df.index = phenotype_df.index.astype("str")
annotation_df.columns = annotation_df.columns.astype("str")
annotation_df.index = annotation_df.index.astype("str")
# loading genotype
if(plinkGenotype):
bim,fam,bed = qtl_loader_utils.get_genotype_data(geno_prefix)
bgen=None
else :
bgen = read_bgen(geno_prefix+'.bgen', verbose=False)
bed=None
fam =bgen['samples']
fam = fam.to_frame("iid")
fam.index=fam["iid"]
bim = bgen['variants'].compute()
bim = bim.assign(i = range(bim.shape[0]))
bim['id'] = bim['rsid']
bim = bim.rename(index=str, columns={"id": "snp"})
bim['a1'] = bim['allele_ids'].str.split(",", expand=True)[0]
bim.index = bim["snp"].astype(str).values
bim.index.name = "candidate"
##Fix chromosome ids
bim['chrom'].replace('^chr','',regex=True,inplace=True)
bim['chrom'].replace(['X', 'Y', 'XY', 'MT'], ['23', '24', '25', '26'],inplace=True)
##Remove non-biallelic & non-ploidy 2 (to be sure).
print("Warning, the current software only supports biallelic SNPs and ploidy 2")
bim.loc[np.logical_and(bim['nalleles']<3,bim['nalleles']>0),:]
# converting chromsome names
annotation_df.replace(['X', 'Y', 'XY', 'MT'], ['23', '24', '25', '26'],inplace=True)
if chromosome=='X' :
chromosome = '23'
elif chromosome=='Y':
chromosome = '24'
elif chromosome=='XY':
chromosome='25'
elif chromosome=='MT':
chromosome='26'
print("Intersecting data.")
if(annotation_df.shape[0] != annotation_df.groupby(annotation_df.index).first().shape[0]):
print("Only one location per feature supported. If multiple locations are needed please look at: --extended_anno_file")
sys.exit()
##Make sure that there is only one entry per feature id!.
sample2individual_df = qtl_loader_utils.get_samplemapping_df(sample_mapping_filename,list(phenotype_df.columns),'sample')
sample2individual_df.index = sample2individual_df.index.astype('str')
sample2individual_df = sample2individual_df.astype('str')
sample2individual_df['sample']=sample2individual_df.index
sample2individual_df = sample2individual_df.drop_duplicates();
##Filter first the linking files!
#Subset linking to relevant genotypes.
orgSize = sample2individual_df.shape[0]
sample2individual_df = sample2individual_df.loc[sample2individual_df['iid'].map(lambda x: x in list(map(str, fam.index))),:]
diff = orgSize- sample2individual_df.shape[0]
orgSize = sample2individual_df.shape[0]
print("Dropped: "+str(diff)+" samples because they are not present in the genotype file.")
#Subset linking to relevant phenotypes.
sample2individual_df = sample2individual_df.loc[np.intersect1d(sample2individual_df.index,phenotype_df.columns),:]
diff = orgSize- sample2individual_df.shape[0]
orgSize = sample2individual_df.shape[0]
print("Dropped: "+str(diff)+" samples because they are not present in the phenotype file.")
#Subset linking vs kinship.
kinship_df = None
readdepth_df = None
if randomeff_filename is not None:
kinship_df,readdepth_df = qtl_loader_utils.get_randeff_df(randomeff_filename)
if kinship_df is not None:
#Filter from individual2sample_df & sample2individual_df since we don't want to filter from the genotypes.
sample2individual_df = sample2individual_df[sample2individual_df['iid'].map(lambda x: x in list(map(str, kinship_df.index)))]
diff = orgSize- sample2individual_df.shape[0]
orgSize = sample2individual_df.shape[0]
print("Dropped: "+str(diff)+" samples because they are not present in the kinship file.")
if readdepth_df is not None:
#This needs to come from the covariate site not the genotype side!
#Filter from individual2sample_df & sample2individual_df since we don't want to filter from the genotypes.
sample2individual_df = sample2individual_df[sample2individual_df['sample'].map(lambda x: x in list(map(str, readdepth_df.index)))]
diff = orgSize- sample2individual_df.shape[0]
orgSize = sample2individual_df.shape[0]
print("Dropped: "+str(diff)+" samples because they are not present in the second random effect file.")
#Subset linking vs covariates.
covariate_df = qtl_loader_utils.get_covariate_df(covariates_filename)
if covariate_df is not None:
if np.nansum(covariate_df==1,0).max()<covariate_df.shape[0]: covariate_df.insert(0, 'ones', np.ones(covariate_df.shape[0]))
sample2individual_df = sample2individual_df.loc[list(set(sample2individual_df.index) & set(covariate_df.index)),:]
diff = orgSize- sample2individual_df.shape[0]
orgSize = sample2individual_df.shape[0]
print("Dropped: "+str(diff)+" samples because they are not present in the covariate file.")
###
print("Number of samples with genotype & phenotype data: " + str(sample2individual_df.shape[0]))
if(sample2individual_df.shape[0]<minimum_test_samples):
print("Not enough samples with both genotype & phenotype data.")
sys.exit()
##Filter now the actual data!
#Filter phenotype data based on the linking files.
phenotype_df = phenotype_df.loc[list(set(phenotype_df.index)&set(annotation_df.index)),sample2individual_df.index.values]
#Filter kinship data based on the linking files.
genetically_unique_individuals = None
if kinship_df is not None:
kinship_df = kinship_df.loc[np.intersect1d(kinship_df.index,sample2individual_df['iid']),np.intersect1d(kinship_df.index,sample2individual_df['iid'])]
if (kinship_df is not None) and (relatedness_score is not None):
genetically_unique_individuals = get_unique_genetic_samples(kinship_df, relatedness_score);
#Filter covariate data based on the linking files.
snp_feature_filter_df= qtl_loader_utils.get_snp_feature_df(snp_feature_filename)
try:
feature_filter_df = qtl_loader_utils.get_snp_df(feature_filename)
except:
if feature_filename is not None:
feature_filter_df= | pd.DataFrame(index=feature_filename) | pandas.DataFrame |
import time
from pprint import pprint
import requests
import json
from shapely.geometry import Point
import geopandas as gpd
import pandas as pd
from geopandas import GeoDataFrame
import matplotlib.pyplot as plt
import numpy as np
API_KEY = '<KEY>' # normal account
FILENAME = 'crawled_ips.txt'
RESPONSE_FILE = 'responses.json'
def get_ips(filename: str) -> list:
""" Reads ips from a list of files """
ips = []
with open(filename, 'r') as f:
ips = f.read().splitlines()
return ips
def fetch_locations(ips: list) -> list:
""" Use ips give in list format to make API calls to ipstack
to get locations
"""
info = []
count = 1
responses = []
for ip in ips:
api = 'http://api.ipstack.com/' + ip + '?access_key=' + API_KEY
response = requests.get(api).json()
print('({}/{}) Found location for {} at {}, {} ({}, {})'.format( \
count,
len(ips),
ip,
response['country_name'],
response['city'],
response['latitude'],
response['longitude']))
info.append((response['latitude'], response['longitude'], \
response['country_name'], response['city'], \
response['connection']['isp']))
count += 1
responses.append(response)
save_response_to_file(responses)
return info
def load_from_file(filename: str) -> list:
"""Can load the files provided they are in the same json format as
what is given in the free version of ipstack
"""
with open(filename, 'r') as f:
responses = json.load(f)
info = []
for response in responses:
info.append((response['latitude'], response['longitude'], \
response['country_name'], response['city'], \
response['connection']['isp']))
return info
def save_response_to_file(responses: dict):
""" Saves the responses from fetching to a file"""
with open(RESPONSE_FILE, 'w') as f:
j = json.dumps(responses, indent=4)
print(j, file=f)
def format_pct(df_column):
"""Used for formatting the numbers on the pie chart the pie function accepts formatpct as a
keyword arg that requires a function reference"""
def format(val):
a = np.round(val/100*sum(df_column.tolist()), 0)
return '%d(%.2f%s)' % (int(a), val, '%')
return format
def country_breakdown(info:list, countries: list):
"""Provides a country breakdown visualisation as a pie chart"""
d = {'isp': [], 'country': []}
for coord in info:
d['isp'].append(coord[-1])
d['country'].append(coord[2])
df = pd.DataFrame(data=d)
mask1 = df['country'].isin(countries)
others = df[~mask1]
print(others)
others = others.groupby('country', as_index=False)['country'].agg({'isp_country_count':'count'}).set_index('country')
others = others.sort_values(by='isp_country_count', ascending=False)
for country in countries:
print('####' + country + '####')
mask = df['country'] == country
df_isp = df.loc[mask]
df_isp = df_isp.groupby('isp', as_index=False)['isp'].agg({'isp_country_count':'count'}).set_index('isp')
df_isp = df_isp.sort_values(by='isp_country_count', ascending=False)
if len(df_isp) > 7:
tmp = df_isp[:7].copy() # Take the top 10 countries
new_row = pd.DataFrame(data={
'isp': ['others'],
'isp_country_count': [df_isp['isp_country_count'][7:].sum()]
}).set_index('isp')
df_isp = pd.concat([tmp, new_row])
df_isp.plot.pie(y='isp_country_count', autopct=format_pct(df_isp['isp_country_count']), pctdistance=0.8, colors=plt.cm.tab20.colors)
legend = plt.legend()
legend.remove()
plt.axis('off')
plt.savefig('breakdown_for_' + country + '.png', bbox_inches='tight')
def breakdown_isp_cities(info: list, cities: list):
""" Provides a visualization using a pie chart of all ISPs on where their nodes are located by city"""
d = {'isp': [], 'city': []}
for coord in info:
d['isp'].append(coord[-1])
d['city'].append(coord[3])
df = pd.DataFrame(data=d)
for city in cities:
print('####' + city + '####')
mask = df['city'] == city
df_isp = df.loc[mask]
df_isp = df_isp.groupby('isp', as_index=False)['isp'].agg({'isp_city_count':'count'}).set_index('isp')
df_isp = df_isp.sort_values(by='isp_city_count', ascending=False)
if len(df_isp) > 8:
tmp = df_isp[:8].copy() # Take the top 10 countries
new_row = pd.DataFrame(data={
'isp': ['others'],
'isp_city_count': [df_isp['isp_city_count'][8:].sum()]
}).set_index('isp')
df_isp = pd.concat([tmp, new_row])
df_isp.plot.pie(y='isp_city_count', autopct=format_pct(df_isp['isp_city_count']), pctdistance=0.7, colors=plt.cm.tab20.colors)
legend = plt.legend()
legend.remove()
plt.axis('off')
plt.savefig('isp_breakdown_for_' + city + '.png')
def breakdown_isp_countries(info: list, isps: list):
""" Provides a visualization of all ISPs on where their nodes are located by country"""
d = {'isp': [], 'country': []}
for coord in info:
d['isp'].append(coord[-1])
d['country'].append(coord[2])
df = pd.DataFrame(data=d)
mask1 = df['isp'].isin(isps)
for isp in isps:
print('####' + isp + '####')
mask = df['isp'] == isp
df_isp = df.loc[mask]
df_isp = df_isp.groupby('country', as_index=False)['country'].agg({'isp_country_count':'count'}).set_index('country')
df_isp = df_isp.sort_values(by='isp_country_count', ascending=False)
if len(df_isp) > 8:
tmp = df_isp[:8].copy() # Take the top 10 countries
new_row = pd.DataFrame(data={
'country': ['others'],
'isp_country_count': [df_isp['isp_country_count'][8:].sum()]
}).set_index('country')
df_isp = pd.concat([tmp, new_row])
if isp == 'Alibaba (Us) Technology Co. Ltd.':
tmp = df_isp[:5].copy() # Take the top 10 countries
new_row = pd.DataFrame(data={
'country': ['others'],
'isp_country_count': [df_isp['isp_country_count'][5:].sum()]
}).set_index('country')
df_isp = pd.concat([tmp, new_row])
df_isp.plot.pie(y='isp_country_count', autopct=format_pct(df_isp['isp_country_count']), pctdistance=0.7, colors=plt.cm.tab20.colors)
legend = plt.legend()
legend.remove()
plt.axis('off')
plt.savefig('breakdown_for_' + isp + '.png', bbox_inches='tight')
def breakdown_isp_alt(info: list):
""" Piechart to visualize the top 10 cloud hosts on the network"""
d = {'isp': []}
for coord in info:
d['isp'].append(coord[-1])
df = pd.DataFrame(data=d)
df = df.groupby('isp', as_index=False)['isp'].agg({'isp_count':'count'}).set_index('isp')
df = df.sort_values(by='isp_count', ascending=False)
alt_row = pd.DataFrame(data={
'isp': ['Cloud hosted'],
'isp_count': [df['isp_count'][:10].sum()]
}).set_index('isp')
new_row = pd.DataFrame(data={
'isp': ['Individually hosted'],
'isp_count': [df['isp_count'][10:].sum()]
}).set_index('isp')
df_final = pd.concat([alt_row, new_row])
df_final.plot.pie(y='isp_count', autopct=format_pct(df['isp_count']), colors=plt.cm.tab20.colors)
legend = plt.legend()
legend.remove()
plt.axis('off')
plt.savefig('isp_count_alt.png', bbox_inches='tight')
def breakdown_isp(info: list):
""" Break down of ISPs for all nodes across the world and graphed on a
pie chart
"""
d = {'isp': []}
for coord in info:
d['isp'].append(coord[-1])
df = pd.DataFrame(data=d)
df_country = | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
#################
# This script takes as an input the supplementary data
# from Saunois et al. (2020), imported in csv format from
# the original .xlsx file, and returns a csv file with
# data for the 2008-2017 period for the different regions
# defined in Human Impacts by the Numbers. Data is provided
# for both the Top-Down (TD) and Bottom-Up (BU) estimates.
#
# Last updated: Nov 2020
# Author: <NAME>
#
#################
import numpy as np
import pandas as pd
def add_agg_col(df, col_inds, type_ = float):
df_agg_col = df[col_inds[0]].astype(float)
for i in range(1,len(col_inds)):
df_agg_col = df_agg_col + df[col_inds[i]].astype(float)
return df_agg_col
######### Get TD 2008-2017 data #########
data_ = pd.read_csv('../source/TD_2008_2017_clean.csv', header=0)
# Drop spurious row
data_ = data_.drop([0], axis=0)
# Generate standard regional data
data_['North America'] = add_agg_col(data_, ["Canada", "USA", "Central America"])
data_['Africa'] = add_agg_col(data_, ['Northern Africa',
'Equatorial Africa', 'Southern Africa'])
data_['Europe and Russia'] = add_agg_col(data_, ['Europe', 'Russia'])
data_['Asia'] = add_agg_col(data_, ['South Asia',
'Southeast Asia', 'Central Asia', 'Middle East',
'China', 'Korean Japan'])
data_['South America'] = add_agg_col(data_, ['Northern South America',
'Brazil', 'Southwest South America'])
# Create new DataFrame to store regional data
cols = ['Africa', 'Europe and Russia', 'Asia',
'South America', 'North America', 'Oceania']
sectors_ = ['Wetlands', 'Other Natural Sources',
'Agriculture and Waste', 'Fossil Fuels',
'Biomass and Biofuel Burning',
'Total Anthropogenic']
source_range = np.linspace(0, 132, 7).astype(int)
# Add sample mean of all available references
td_mean = pd.DataFrame(data_[cols].iloc[
source_range[0]:source_range[1]].astype(float).mean(),
columns=[sectors_[0]])
for j in range(1, len(source_range)-1):
td_mean[sectors_[j]] = data_[cols].iloc[
source_range[j]:source_range[j+1]].astype(float).mean()
td_mean["Measure"] = "Sample Mean"
# Add standard deviation of sample mean
td_std = pd.DataFrame(data_[cols].iloc[
source_range[0]:source_range[1]].astype(float).std()/
np.sqrt(source_range[1]-source_range[0]),
columns=[sectors_[0]])
for j in range(len(source_range)-1):
td_std[sectors_[j]] = data_[cols].iloc[
source_range[j]:source_range[j+1]].astype(float).std(
)/np.sqrt(source_range[j+1]-source_range[j])
td_std["Measure"] = "Std of Sample Mean"
# Concatenate mean and std values
td_total = pd.concat([td_mean, td_std], axis=0, ignore_index=False)
td_total["Region"] = td_total.index
td_total["Estimate type"] = "Top-Down"
td_total["Period"] = "2008-2017"
# Rearrange columns
td_total = td_total[["Region", "Period", "Estimate type", "Measure",
'Total Anthropogenic', 'Wetlands', 'Other Natural Sources',
'Agriculture and Waste', 'Fossil Fuels',
'Biomass and Biofuel Burning']]
######### Get BU 2008-2017 data #########
data_ = | pd.read_csv('../source/BU_2008_2017_clean.csv', header=0) | pandas.read_csv |
from PIL import Image
from jieba.analyse import extract_tags
from os import path
import pandas as pd
import wordcloud as wc
import jieba
# 短線多頭
df = | pd.read_excel('closes_acc.xlsx', index_col=0) | pandas.read_excel |
import pandas
import matplotlib.pyplot as plt
import pickle
import numpy as np
import warnings
warnings.filterwarnings("ignore")
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor
class MovingTimeRegression:
def __init__(self, cleanedRoute, modelsRoute, scalerRoute, paramsRoute, cleanedFileName): # define data routes and csv names!
self.cleanedRoute = cleanedRoute
self.modelsRoute = modelsRoute
self.scalerRoute = scalerRoute
self.paramsRoute = paramsRoute
self.dataset = pandas.read_csv(cleanedRoute+cleanedFileName, sep="|")
self.allTrainX = None
self.allTestX = None
self.allTrainy = None
self.allTesty = None
self.reducedTrainX = None
self.reducedTestX = None
self.reducedTrainy = None
self.reducedTesty = None
self.baseTrainX = None
self.baseTestX = None
self.baseTrainy = None
self.baseTesty = None
self.allRidgeModel = None
self.reducedRidgeModel = None
self.baseRidgeModel = None
self.allLassoModel = None
self.reducedLassoModel = None
self.baseLassoModel = None
self.allRandomForestModel = None
self.reducedRandomForestModel = None
self.baseRandomForestModel = None
self.allStandardScaler = None
self.reducedStandardScaler = None
self.baseStandardScaler = None
self.CatCols8 = ['#003f5c', '#2f4b7c', '#665191', '#a05195', '#d45087', '#f95d6a', '#ff7c43', '#ffa600']
self.CatCols5 = ['#003f5c', '#3d61f4', '#bc5090', '#ff6361', '#ffa600']
self.CatCols3 = ['#003f5c', '#bc5090', '#ffa600']
print('call .dataset to see the cleaned dataset')
def prepareData(self):
# all data
X = self.dataset.copy()
Y = X['moving_time'].copy()
X.drop(columns=['moving_time', 'elapsed_time', 'average_speed'], inplace=True)
names = X.columns
self.allStandardScaler = StandardScaler()
scaledX = self.allStandardScaler.fit_transform(X)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.allTrainX, self.allTestX, self.allTrainy, self.allTesty = train_test_split(scaledX, Y, random_state=42)
# reduced data
X = self.dataset[['age_0.0', 'age_1.0', 'age_2.0', 'distance', 'elev_high', 'elev_low', 'hashed_id',
'total_elevation_gain', 'trainer_onehot', 'workout_type_11.0', 'workout_type_10.0', 'workout_type_12.0']].copy()
Y = self.dataset['moving_time'].copy()
names = X.columns
self.reducedStandardScaler = StandardScaler()
scaledX = self.reducedStandardScaler.fit_transform(X)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.reducedTrainX, self.reducedTestX, self.reducedTrainy, self.reducedTesty = train_test_split(scaledX, Y,
random_state=42)
# base data
X = self.dataset[['distance', 'elev_high', 'elev_low', 'total_elevation_gain', 'trainer_onehot']].copy()
Y = self.dataset['moving_time'].copy()
names = X.columns
self.baseStandardScaler = StandardScaler()
scaledX = self.baseStandardScaler.fit_transform(X)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.baseTrainX, self.baseTestX, self.baseTrainy, self.baseTesty = train_test_split(scaledX, Y, random_state=42)
# TODO: print info about the 3 dataset and the train-test pars
def trainModels(self, verbose=False, writeToFile=False):
# fitting models on all data
print('-- Fitting models on all data -- ')
if verbose:
print('Calculating best params for Ridge...')
ridgeParams = getBestParamsForRidge(self.allTrainX, self.allTestX, self.allTrainy, self.allTesty)
self.allRidgeModel = Ridge(alpha=ridgeParams['alpha'], solver=ridgeParams['solver'])
self.allRidgeModel.fit(self.allTrainX, self.allTrainy)
if verbose:
print('Done. Params: ')
print(ridgeParams)
print('')
if verbose:
print('Calculating best params for Lasso...')
lassoParams = getBestParamsForLasso(self.allTrainX, self.allTestX, self.allTrainy, self.allTesty)
self.allLassoModel = Lasso(alpha=lassoParams['alpha'])
self.allLassoModel.fit(self.allTrainX, self.allTrainy)
if verbose:
print('Done. Params: ')
print(lassoParams)
print('')
# TODO: calc best params for Random Forest
if verbose:
print('Loading best params for RandomForest...')
forestParams = pickle.load(open(self.paramsRoute+'moving_time_all_random_forest_params.p', 'rb'))
self.allRandomForestModel = RandomForestRegressor(n_estimators=forestParams['n_estimators'],
max_features=forestParams['max_features'],
min_samples_leaf=forestParams['min_samples_leaf'] )
self.allRandomForestModel.fit(self.allTrainX, self.allTrainy)
if verbose:
print('Done. Params: ')
print(forestParams)
print('')
print('Scores on allTesty data: ')
print(' - Ridge: '+str(self.allRidgeModel.score(self.allTestX, self.allTesty)))
print(' - Lasso: '+str(self.allLassoModel.score(self.allTestX, self.allTesty)))
print(' - RandomForest: '+str(self.allRandomForestModel.score(self.allTestX, self.allTesty)))
print('')
# fitting models on reduced data
print('-- Fitting models on reduced data --')
if verbose:
print('Calculating best params for Ridge...')
ridgeParams = getBestParamsForRidge(self.reducedTrainX, self.reducedTestX, self.reducedTrainy, self.reducedTesty)
self.reducedRidgeModel = Ridge(alpha=ridgeParams['alpha'], solver=ridgeParams['solver'])
self.reducedRidgeModel.fit(self.reducedTrainX, self.reducedTrainy)
if verbose:
print('Done. Params: ')
print(ridgeParams)
print('')
if verbose:
print('Calculating best params for Lasso...')
lassoParams = getBestParamsForLasso(self.reducedTrainX, self.reducedTestX, self.reducedTrainy, self.reducedTesty)
self.reducedLassoModel = Lasso(alpha=lassoParams['alpha'])
self.reducedLassoModel.fit(self.reducedTrainX, self.reducedTrainy)
if verbose:
print('Done. Params: ')
print(lassoParams)
print('')
# TODO: calc best params for Random Forest
if verbose:
print('Loading best params for RandomForest...')
forestParams = pickle.load(open(self.paramsRoute + 'moving_time_reduced_random_forest_params.p', 'rb'))
self.reducedRandomForestModel = RandomForestRegressor(n_estimators=forestParams['n_estimators'],
max_features=forestParams['max_features'],
min_samples_leaf=forestParams['min_samples_leaf'])
self.reducedRandomForestModel.fit(self.reducedTrainX, self.reducedTrainy)
if verbose:
print('Done. Params: ')
print(forestParams)
print('')
print('Scores on reudcedTesty data: ')
print(' - Ridge: ' + str(self.reducedRidgeModel.score(self.reducedTestX, self.reducedTesty)))
print(' - Lasso: ' + str(self.reducedLassoModel.score(self.reducedTestX, self.reducedTesty)))
print(' - RandomForest: ' + str(self.reducedRandomForestModel.score(self.reducedTestX, self.reducedTesty)))
print('')
# fitting models on base data
print('-- Fitting models on base data --')
if verbose:
print('Calculating best params for Ridge...')
ridgeParams = getBestParamsForRidge(self.baseTrainX, self.baseTestX, self.baseTrainy,
self.baseTesty)
self.baseRidgeModel = Ridge(alpha=ridgeParams['alpha'], solver=ridgeParams['solver'])
self.baseRidgeModel.fit(self.baseTrainX, self.baseTrainy)
if verbose:
print('Done. Params: ')
print(ridgeParams)
print('')
if verbose:
print('Calculating best params for Lasso...')
lassoParams = getBestParamsForLasso(self.baseTrainX, self.baseTestX, self.baseTrainy,
self.baseTesty)
self.baseLassoModel = Lasso(alpha=lassoParams['alpha'])
self.baseLassoModel.fit(self.baseTrainX, self.baseTrainy)
if verbose:
print('Done. Params: ')
print(lassoParams)
print('')
if verbose:
print('Loading best params for RandomForest...')
forestParams = pickle.load(open(self.paramsRoute + 'moving_time_base_random_forest_params.p', 'rb'))
self.baseRandomForestModel = RandomForestRegressor(n_estimators=forestParams['n_estimators'],
max_features=forestParams['max_features'],
min_samples_leaf=forestParams['min_samples_leaf'])
self.baseRandomForestModel.fit(self.baseTrainX, self.baseTrainy)
if verbose:
print('Done. Params: ')
print(forestParams)
print('')
print('Scores on baseTesty data: ')
print(' - Ridge: ' + str(self.baseRidgeModel.score(self.baseTestX, self.baseTesty)))
print(' - Lasso: ' + str(self.baseLassoModel.score(self.baseTestX, self.baseTesty)))
print(' - RandomForest: ' + str(self.baseRandomForestModel.score(self.baseTestX, self.baseTesty)))
print('')
if writeToFile:
writeRegressionModels(self.allRidgeModel, self.allLassoModel, self.allRandomForestModel, 'all', 'moving',
self.modelsRoute)
writeRegressionModels(self.reducedRidgeModel, self.reducedLassoModel, self.reducedRandomForestModel, 'reduced',
'moving', self.modelsRoute)
writeRegressionModels(self.baseRidgeModel, self.baseLassoModel, self.baseRandomForestModel, 'base', 'moving',
self.modelsRoute)
writeScalerModel(self.allStandardScaler, 'all', 'moving', self.scalerRoute)
writeScalerModel(self.reducedStandardScaler, 'reduced', 'moving', self.scalerRoute)
writeScalerModel(self.baseStandardScaler, 'base', 'moving', self.scalerRoute)
print('Get predictions: ')
print(' - based on all data: call getPredictionWithAllModels(list) func., where list has 27 elements ')
print(' - based on reduced data: call getPredictionWithReducedModels(list) func., where list has 9 elements ')
print(' - based on all data: call getPredictionWithBaseModels(list) func., where list has 5 elements ')
def calulateBestParamsForRandomForest(self):
print('Warning: this function will take several hours: the results already available in the ./results/params folder')
print('Consider interrupting the kernel')
GridSearchForRandomForest(pandas.concat([self.allTrainX, self.allTestX]),
pandas.concat([self.allTrainy, self.allTesty]), 'moving', 'all')
GridSearchForRandomForest(pandas.concat([self.reducedTrainX, self.reducedTestX]),
pandas.concat([self.reducedTrainy, self.reducedTesty]), 'moving', 'reduced')
GridSearchForRandomForest(pandas.concat([self.baseTrainX, self.baseTestX]),
pandas.concat([self.baseTrainy, self.baseTesty]), 'moving', 'base')
def getPredictionWithAllModels(self, X):
if len(X) != 26:
print('Shape mismatch: X should contains 26 values like the allTrainX dataset')
return
#X.append(0.0)
scaled = self.allStandardScaler.transform(np.reshape(X, (1,-1)))
# TODO: scale X before calling predict func
ridgeResult = self.allRidgeModel.predict(scaled)
lassoResult = self.allLassoModel.predict(scaled)
forestResult = self.allRandomForestModel.predict(scaled)
print(' - ridge: '+ str(ridgeResult))
print(' - lasso: '+ str(lassoResult))
print(' - random forest: '+ str(forestResult))
# TODO: create graph
def getPredictionWithReducedModels(self, X):
if len(X) != 9:
print('Shape mismatch: X should contains 9 values like the reducedTrainX dataset')
return
# TODO: scale X before calling predict func
scaled = self.reducedStandardScaler.transform(np.reshape(X, (1,-1)))
ridgeResult = self.reducedRidgeModel.predict(scaled)
lassoResult = self.reducedLassoModel.predict(scaled)
forestResult = self.reducedRandomForestModel.predict(scaled)
print(' - ridge: ' + str(ridgeResult))
print(' - lasso: ' + str(lassoResult))
print(' - random forest: ' + str(forestResult))
# TODO: create graph
def getPredictionWithBaseModels(self, X):
if len(X) != 5:
print('Shape mismatch: X should contains 5 values like the baseTrainX dataset')
return
# TODO: scale X before calling predict func
scaled = self.baseStandardScaler.transform(np.reshape(X, (1,-1)))
ridgeResult = self.baseRidgeModel.predict(scaled)
lassoResult = self.baseLassoModel.predict(scaled)
forestResult = self.baseRandomForestModel.predict(scaled)
print(' - ridge: ' + str(ridgeResult))
print(' - lasso: ' + str(lassoResult))
print(' - random forest: ' + str(forestResult))
# TODO: create graph
def loadTrainedModelsAndScalers(self):
self.loadRegressionModels()
self.loadScalers()
print('Regression models and scalers are loaded')
print('Use the following functions to get predictions:')
print(' - getPredictionWithAllModels')
print(' - getPredictionWithReducedModels')
print(' - getPredictionWithBaseModels')
def loadRegressionModels(self):
# loading models based on all dataset
self.allRidgeModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'all' + '_' + 'ridge.p', 'rb'))
self.allLassoModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'all' + '_' + 'lasso.p', 'rb'))
self.allRandomForestModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'all' + '_' + 'random_forest.p', 'rb'))
# loading models based on reduced dataset
self.reducedRidgeModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'reduced' + '_' + 'ridge.p', 'rb'))
self.reducedLassoModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'reduced' + '_' + 'lasso.p', 'rb'))
self.reducedRandomForestModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'reduced' + '_' + 'random_forest.p', 'rb'))
# loading models based on base dataset
self.baseRidgeModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'base' + '_' + 'ridge.p', 'rb'))
self.baseLassoModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'base' + '_' + 'lasso.p', 'rb'))
self.baseRandomForestModel = pickle.load(
open(self.modelsRoute + 'moving_time_' + 'base' + '_' + 'random_forest.p', 'rb'))
def loadScalers(self):
# load fitted scaler models
self.allStandardScaler = pickle.load(open(self.scalerRoute + 'moving_time_all_scaler.p', 'rb'))
self.reducedStandardScaler = pickle.load(open(self.scalerRoute + 'moving_time_reduced_scaler.p', 'rb'))
self.baseStandardScaler = pickle.load(open(self.scalerRoute + 'moving_time_base_scaler.p', 'rb'))
def mps_to_kmph(self, m_per_s):
return m_per_s * 3.6
def kmph_to_mps(self, km_h):
return km_h / 3.6
## END OF MOVING TIME CLASS
## ELAPSED TIME
class ElapsedTimeRegression():
def __init__(self, cleanedRoute, modelsRoute, scalerRoute, paramsRoute, cleanedFileName): # define data routes and csv names!
self.cleanedRoute = cleanedRoute
self.modelsRoute = modelsRoute
self.scalerRoute = scalerRoute
self.paramsRoute = paramsRoute
self.dataset = pandas.read_csv(cleanedRoute + cleanedFileName, sep="|")
# all data
self.allTrainX = None
self.allTestX = None
self.allTrainy = None
self.allTesty = None
# reduced data
self.reducedTrainX = None
self.reducedTestX = None
self.reducedTrainy = None
self.reducedTesty = None
# age group null data
self.ageNullTrainX = None
self.ageNullTestX = None
self.ageNullTrainy = None
self.ageNullTesty = None
# age group one data
self.ageOneTrainX = None
self.ageOneTestX = None
self.ageOneTrainy = None
self.ageOneTesty = None
# age group two data
self.ageTwoTrainX = None
self.ageTwoTestX = None
self.ageTwoTrainy = None
self.ageTwoTesty = None
# distance small data
self.distanceSmallTrainX = None
self.distanceSmallTestX = None
self.distanceSmallTrainy = None
self.distanceSmallTesty = None
# distance big data
self.distanceBigTrainX = None
self.distanceBigTestX = None
self.distanceBigTrainy = None
self.distanceBigTesty = None
# user data
self.userTrainX = None
self.userTestX = None
self.userTrainy = None
self.userTesty = None
# regression model initialization
# ridge
self.allRidgeModel = None
self.reducedRidgeModel = None
self.ageNullRidgeModel = None
self.ageOneRidgeModel = None
self.ageTwoRidgeModel = None
self.distanceSmallRidgeModel = None
self.distanceBigRidgeModel = None
self.userRidgeModel = None
# lasso
self.allLassoModel = None
self.reducedLassoModel = None
self.ageNullLassoModel = None
self.ageOneLassoModel = None
self.ageTwoLassoModel = None
self.distanceSmallLassoModel = None
self.distanceBigLassoModel = None
self.userLassoModel = None
# random forest
self.allForestModel = None
self.reducedForestModel = None
self.ageNullForestModel = None
self.ageOneForestModel = None
self.ageTwoForestModel = None
self.distanceSmallForestModel = None
self.distanceBigForestModel = None
self.userForestModel = None
self.allStandardScaler = None
self.reducedStandardScaler = None
self.ageNullStandardScaler = None
self.ageOneStandardScaler = None
self.ageTwoStandardScaler = None
self.distanceSmallStandardScaler = None
self.distanceBigStandardScaler = None
self.userStandardScaler = None
self.CatCols8 = ['#003f5c', '#2f4b7c', '#665191', '#a05195', '#d45087', '#f95d6a', '#ff7c43', '#ffa600']
self.CatCols5 = ['#003f5c', '#3d61f4', '#bc5090', '#ff6361', '#ffa600']
self.CatCols3 = ['#003f5c', '#bc5090', '#ffa600']
print('call .dataset to see the cleaned dataset')
def prepareData(self):
# all data
X = self.dataset.copy()
Y = X['elapsed_time'].copy()
X.drop(columns=['elapsed_time', 'average_speed'], inplace=True)
names = X.columns
self.allStandardScaler = StandardScaler()
scaledX = self.allStandardScaler.fit_transform(X)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.allTrainX, self.allTestX, self.allTrainy, self.allTesty = train_test_split(scaledX, Y, random_state=42)
# reduced data
X = self.dataset[['age_0.0', 'age_1.0', 'age_2.0', 'distance', 'elev_high', 'elev_low', 'hashed_id',
'total_elevation_gain', 'moving_time', 'trainer_onehot', 'workout_type_11.0', 'workout_type_10.0', 'workout_type_12.0']].copy()
Y = self.dataset['elapsed_time'].copy()
names = X.columns
self.reducedStandardScaler = StandardScaler()
scaledX = self.reducedStandardScaler.fit_transform(X)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.reducedTrainX, self.reducedTestX, self.reducedTrainy, self.reducedTesty = train_test_split(scaledX, Y,
random_state=42)
# age group: null data
ageNull = self.dataset[self.dataset['age_0.0'] == 1].copy()
Y = ageNull['elapsed_time'].copy()
ageNull.drop(columns=['age_0.0', 'age_1.0', 'age_2.0', 'elapsed_time', 'average_speed'], inplace=True)
names = ageNull.columns
self.ageNullStandardScaler = StandardScaler()
scaledX = self.ageNullStandardScaler.fit_transform(ageNull)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.ageNullTrainX, self.ageNullTestX, self.ageNullTrainy, self.ageNullTesty = train_test_split(scaledX, Y,
random_state=42)
# age group: one data
ageOne = self.dataset[self.dataset['age_1.0'] == 1].copy()
Y = ageOne['elapsed_time'].copy()
ageOne.drop(columns=['age_0.0', 'age_1.0', 'age_2.0', 'elapsed_time', 'average_speed'], inplace=True)
names = ageOne.columns
self.ageOneStandardScaler = StandardScaler()
scaledX = self.ageNullStandardScaler.fit_transform(ageOne)
scaledX = pandas.DataFrame(scaledX, columns=names)
self.ageOneTrainX, self.ageOneTestX, self.ageOneTrainy, self.ageOneTesty = train_test_split(scaledX, Y,
random_state=42)
# age group: two data
ageTwo = self.dataset[self.dataset['age_2.0'] == 1].copy()
Y = ageTwo['elapsed_time'].copy()
ageTwo.drop(columns=['age_0.0', 'age_1.0', 'age_2.0', 'elapsed_time', 'average_speed'], inplace=True)
names = ageTwo.columns
self.ageTwoStandardScaler = StandardScaler()
scaledX = self.ageTwoStandardScaler.fit_transform(ageTwo)
scaledX = | pandas.DataFrame(scaledX, columns=names) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.fs import LocalFileSystem, SubTreeFileSystem
from pyarrow.tests.parquet.common import (
parametrize_legacy_dataset, parametrize_legacy_dataset_not_supported)
from pyarrow.util import guid
from pyarrow.vendored.version import Version
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import (_read_table, _test_dataframe,
_write_table)
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
from pyarrow.tests.parquet.common import (_roundtrip_pandas_dataframe,
alltypes_sample)
except ImportError:
pd = tm = None
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema, check_metadata=True)
assert table1.schema.equals(table2.schema)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_column_multiindex(tempdir, use_legacy_dataset):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(
tempdir, use_legacy_dataset
):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
read_metadata = table_read.schema.metadata
assert arrow_table.schema.metadata == read_metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# TODO(dataset) duplicate column selection actually gives duplicate columns now
@pytest.mark.pandas
@parametrize_legacy_dataset_not_supported
def test_pandas_column_selection(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(
filename, columns=['uint8'], use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(
filename, columns=['uint8', 'uint8'],
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_native_file_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_pandas_column_subset(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(
reader, columns=['strings', 'uint8'],
use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_empty_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_can_write_nested_data(tempdir):
data = {
"agg_col": [
{"page_type": 1},
{"record_type": 1},
{"non_consecutive_home": 0},
],
"uid_first": "1001"
}
df = pd.DataFrame(data=data)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
# This succeeds under V2
_write_table(arrow_table, imos)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_pyfile_roundtrip(tempdir, use_legacy_dataset):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_configuration_options(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.6',
use_dictionary=use_dictionary)
table_read = _read_table(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.6',
write_statistics=write_statistics)
table_read = _read_table(filename,
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
if (compression != 'NONE' and
not pa.lib.Codec.is_available(compression)):
continue
_write_table(arrow_table, filename, version='2.6',
compression=compression)
table_read = _read_table(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_index_column_name_duplicate(tempdir, use_legacy_dataset):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path, use_legacy_dataset=use_legacy_dataset)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_multiindex_duplicate_values(tempdir, use_legacy_dataset):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename, use_legacy_dataset=use_legacy_dataset)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_naming(datadir, use_legacy_dataset):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(
datadir / 'v0.7.1.parquet', use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_multi_level_named(
datadir, use_legacy_dataset
):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet',
use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_index_multi_level_some_named(
datadir, use_legacy_dataset
):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet',
use_legacy_dataset=use_legacy_dataset)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_backwards_compatible_column_metadata_handling(
datadir, use_legacy_dataset
):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
| pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels') | pandas.date_range |
"""High-level functions to help perform complex tasks
"""
from __future__ import print_function, division
import os
import multiprocessing as mp
import warnings
from datetime import datetime
import platform
import struct
import shutil
import copy
import time
from ast import literal_eval
import traceback
import sys
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
from ..pyemu_warnings import PyemuWarning
try:
import flopy
except:
pass
import pyemu
from pyemu.utils.os_utils import run, start_workers
def geostatistical_draws(
pst, struct_dict, num_reals=100, sigma_range=4, verbose=True, scale_offset=True
):
"""construct a parameter ensemble from a prior covariance matrix
implied by geostatistical structure(s) and parameter bounds.
Args:
pst (`pyemu.Pst`): a control file (or the name of control file). The
parameter bounds in `pst` are used to define the variance of each
parameter group.
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
num_reals (`int`, optional): number of realizations to draw. Default is 100
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`,optional): flag to apply scale and offset to parameter bounds
when calculating variances - this is passed through to `pyemu.Cov.from_parameter_data()`.
Default is True.
Returns
`pyemu.ParameterEnsemble`: the realized parameter ensemble.
Note:
parameters are realized by parameter group. The variance of each
parameter group is used to scale the resulting geostatistical
covariance matrix Therefore, the sill of the geostatistical structures
in `struct_dict` should be 1.0
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
pe.to_csv("my_pe.csv")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}".format(
type(pst)
)
if verbose:
print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(
pst, sigma_range=sigma_range, scale_offset=scale_offset
)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
keys = list(struct_dict.keys())
keys.sort()
for gs in keys:
items = struct_dict[gs]
if verbose:
print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn(
"using first geostat structure in file {0}".format(gs), PyemuWarning
)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("GeoStruct {0} sill != 1.0 - this is bad!".format(gs.name))
if not isinstance(items, list):
items = [items]
# items.sort()
for iitem, item in enumerate(items):
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found".format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if "pargp" in df.columns:
if verbose:
print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ["x", "y", "parnme"]:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn(
"the following parameters are not "
+ "in the control file: {0}".format(",".join(missing)),
PyemuWarning,
)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if df.shape[0] == 0:
warnings.warn(
"geostatistical_draws(): empty parameter df at position {0} items for geostruct {1}, skipping...".format(
iitem, gs
)
)
continue
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn(
"all parameters in zone {0} tied and/or fixed, skipping...".format(
zone
),
PyemuWarning,
)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose:
print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose:
print("done")
if verbose:
print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose:
print("scaling full cov by diag var cov")
# cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i, :] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(
pst=pst, cov=cov, num_reals=num_reals, by_groups=False, fill=False
)
# df = pe.iloc[:,:]
par_ens.append(pe._df)
pars_in_cov.update(set(pe.columns))
if verbose:
print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if len(diff) > 0:
name_dict = {name: i for i, name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec, names=diff, isdiagonal=True)
# cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(
pst, cov, num_reals=num_reals, fill=False
)
par_ens.append(pe._df)
par_ens = pd.concat(par_ens, axis=1)
par_ens = pyemu.ParameterEnsemble(pst=pst, df=par_ens)
return par_ens
def geostatistical_prior_builder(
pst, struct_dict, sigma_range=4, verbose=False, scale_offset=False
):
"""construct a full prior covariance matrix using geostastical structures
and parameter bounds information.
Args:
pst (`pyemu.Pst`): a control file instance (or the name of control file)
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`): a flag to apply scale and offset to parameter upper and lower bounds
before applying log transform. Passed to pyemu.Cov.from_parameter_data(). Default
is False
Returns:
`pyemu.Cov`: a covariance matrix that includes all adjustable parameters in the control
file.
Note:
The covariance of parameters associated with geostatistical structures is defined
as a mixture of GeoStruct and bounds. That is, the GeoStruct is used to construct a
pyemu.Cov, then the entire pyemu.Cov is scaled by the uncertainty implied by the bounds and
sigma_range. Most users will want to sill of the geostruct to sum to 1.0 so that the resulting
covariance matrices have variance proportional to the parameter bounds. Sounds complicated...
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
cov = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
cov.to_binary("prior.jcb")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}".format(
type(pst)
)
if verbose:
print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(
pst, sigma_range=sigma_range, scale_offset=scale_offset
)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# full_cov = None
par = pst.parameter_data
for gs, items in struct_dict.items():
if verbose:
print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn(
"using first geostat structure in file {0}".format(gs), PyemuWarning
)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn(
"geostatistical_prior_builder() warning: geostruct sill != 1.0, user beware!"
)
if not isinstance(items, list):
items = [items]
for item in items:
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found".format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
for req in ["x", "y", "parnme"]:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn(
"the following parameters are not "
+ "in the control file: {0}".format(",".join(missing)),
PyemuWarning,
)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn(
"all parameters in zone {0} tied and/or fixed, skipping...".format(
zone
),
PyemuWarning,
)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose:
print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose:
print("done")
# find the variance in the diagonal cov
if verbose:
print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
# if np.std(tpl_var) > 1.0e-6:
# warnings.warn("pars have different ranges" +\
# " , using max range as variance for all pars")
# tpl_var = tpl_var.max()
if verbose:
print("scaling full cov by diag var cov")
cov *= tpl_var
if verbose:
print("test for inversion")
try:
ci = cov.inv
except:
df_zone.to_csv("prior_builder_crash.csv")
raise Exception("error inverting cov {0}".format(cov.row_names[:3]))
if verbose:
print("replace in full cov")
full_cov.replace(cov)
# d = np.diag(full_cov.x)
# idx = np.argwhere(d==0.0)
# for i in idx:
# print(full_cov.names[i])
return full_cov
def _rmse(v1, v2):
"""return root mean squared error between v1 and v2
Args:
v1 (iterable): one vector
v2 (iterable): another vector
Returns:
scalar: root mean squared error of v1,v2
"""
return np.sqrt(np.mean(np.square(v1 - v2)))
def calc_observation_ensemble_quantiles(
ens, pst, quantiles, subset_obsnames=None, subset_obsgroups=None
):
"""Given an observation ensemble, and requested quantiles, this function calculates the requested
quantile point-by-point in the ensemble. This resulting set of values does not, however, correspond
to a single realization in the ensemble. So, this function finds the minimum weighted squared
distance to the quantile and labels it in the ensemble. Also indicates which realizations
correspond to the selected quantiles.
Args:
ens (pandas DataFrame): DataFrame read from an observation
pst (pyemy.Pst object) - needed to obtain observation weights
quantiles (iterable): quantiles ranging from 0-1.0 for which results requested
subset_obsnames (iterable): list of observation names to include in calculations
subset_obsgroups (iterable): list of observation groups to include in calculations
Returns:
ens (pandas DataFrame): same ens object that was input but with quantile realizations
appended as new rows labelled with 'q_#' where '#' is the slected quantile
quantile_idx (dictionary): dictionary with keys being quantiles and values being realizations
corresponding to each realization
"""
# TODO: handle zero weights due to PDC
quantile_idx = {}
# make sure quantiles and subset names and groups are lists
if not isinstance(quantiles, list):
quantiles = list(quantiles)
if not isinstance(subset_obsnames, list) and subset_obsnames is not None:
subset_obsnames = list(subset_obsnames)
if not isinstance(subset_obsgroups, list) and subset_obsgroups is not None:
subset_obsgroups = list(subset_obsgroups)
if "real_name" in ens.columns:
ens.set_index("real_name")
# if 'base' real was lost, then the index is of type int. needs to be string later so set here
ens.index = [str(i) for i in ens.index]
if not isinstance(pst, pyemu.Pst):
raise Exception("pst object must be of type pyemu.Pst")
# get the observation data
obs = pst.observation_data.copy()
# confirm that the indices and weights line up
if False in np.unique(ens.columns == obs.index):
raise Exception("ens and pst observation names do not align")
# deal with any subsetting of observations that isn't handled through weights
trimnames = obs.index.values
if subset_obsgroups is not None and subset_obsnames is not None:
raise Exception(
"can only specify information in one of subset_obsnames of subset_obsgroups. not both"
)
if subset_obsnames is not None:
trimnames = subset_obsnames
if len(set(trimnames) - set(obs.index.values)) != 0:
raise Exception(
"the following names in subset_obsnames are not in the ensemble:\n"
+ ["{}\n".format(i) for i in (set(trimnames) - set(obs.index.values))]
)
if subset_obsgroups is not None:
if len((set(subset_obsgroups) - set(pst.obs_groups))) != 0:
raise Exception(
"the following groups in subset_obsgroups are not in pst:\n"
+ [
"{}\n".format(i)
for i in (set(subset_obsgroups) - set(pst.obs_groups))
]
)
trimnames = obs.loc[obs.obgnme.isin(subset_obsgroups)].obsnme.tolist()
if len((set(trimnames) - set(obs.index.values))) != 0:
raise Exception(
"the following names in subset_obsnames are not in the ensemble:\n"
+ ["{}\n".format(i) for i in (set(trimnames) - set(obs.index.values))]
)
# trim the data to subsets (or complete )
ens_eval = ens[trimnames].copy()
weights = obs.loc[trimnames].weight.values
for cq in quantiles:
# calculate the point-wise quantile values
qfit = np.quantile(ens_eval, cq, axis=0)
# calculate the weighted distance between all reals and the desired quantile
qreal = np.argmin(
np.linalg.norm([(i - qfit) * weights for i in ens_eval.values], axis=1)
)
quantile_idx["q{}".format(cq)] = qreal
ens = ens.append(ens.iloc[qreal])
idx = ens.index.values
idx[-1] = "q{}".format(cq)
ens.set_index(idx, inplace=True)
return ens, quantile_idx
def calc_rmse_ensemble(ens, pst, bygroups=True, subset_realizations=None):
"""Calculates RMSE (without weights) to quantify fit to observations for ensemble members
Args:
ens (pandas DataFrame): DataFrame read from an observation
pst (pyemy.Pst object) - needed to obtain observation weights
bygroups (Bool): Flag to summarize by groups or not. Defaults to True.
subset_realizations (iterable, optional): Subset of realizations for which
to report RMSE. Defaults to None which returns all realizations.
Returns:
rmse (pandas DataFrame object): rows are realizations. Columns are groups. Content is RMSE
"""
# TODO: handle zero weights due to PDC
# make sure subset_realizations is a list
if not isinstance(subset_realizations, list) and subset_realizations is not None:
subset_realizations = list(subset_realizations)
if "real_name" in ens.columns:
ens.set_index("real_name")
if not isinstance(pst, pyemu.Pst):
raise Exception("pst object must be of type pyemu.Pst")
# get the observation data
obs = pst.observation_data.copy()
# confirm that the indices and observations line up
if False in np.unique(ens.columns == obs.index):
raise Exception("ens and pst observation names do not align")
rmse = pd.DataFrame(index=ens.index)
if subset_realizations is not None:
rmse = rmse.loc[subset_realizations]
# calculate the rmse total first
rmse["total"] = [_rmse(ens.loc[i], obs.obsval) for i in rmse.index]
# if bygroups, do the groups as columns
if bygroups is True:
for cg in obs.obgnme.unique():
cnames = obs.loc[obs.obgnme == cg].obsnme
rmse[cg] = [
_rmse(ens.loc[i][cnames], obs.loc[cnames].obsval) for i in rmse.index
]
return rmse
def _condition_on_par_knowledge(cov, par_knowledge_dict):
"""experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception(
"par knowledge dict parameters not found: {0}".format(",".join(missing))
)
# build the selection matrix and sigma epsilon
# sel = pyemu.Cov(x=np.identity(cov.shape[0]),names=cov.row_names)
sel = cov.zero2d
sel = cov.to_pearson()
new_cov_diag = pyemu.Cov(x=np.diag(cov.as_2d.diagonal()), names=cov.row_names)
# new_cov_diag = cov.zero2d
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
for _ in range(2):
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
print(new_cov_diag)
return new_cov_diag
def kl_setup(
num_eig,
sr,
struct,
prefixes,
factors_file="kl_factors.dat",
islog=True,
basis_file=None,
tpl_dir=".",
):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Args:
num_eig (`int`): the number of basis vectors to retain in the
reduced basis
sr (`flopy.reference.SpatialReference`): a spatial reference instance
struct (`str`): a PEST-style structure file. Can also be a
`pyemu.geostats.Geostruct` instance.
prefixes ([`str`]): a list of parameter prefixes to generate KL
parameterization for.
factors_file (`str`, optional): name of the PEST-style interpolation
factors file to write (can be processed with FAC2REAL).
Default is "kl_factors.dat".
islog (`bool`, optional): flag to indicate if the parameters are log transformed.
Default is True
basis_file (`str`, optional): the name of the PEST-style binary (e.g. jco)
file to write the reduced basis vectors to. Default is None (not saved).
tpl_dir (`str`, optional): the directory to write the resulting
template files to. Default is "." (current directory).
Returns:
`pandas.DataFrame`: a dataframe of parameter information.
Note:
This is the companion function to `helpers.apply_kl()`
Example::
m = flopy.modflow.Modflow.load("mymodel.nam")
prefixes = ["hk","vka","ss"]
df = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",prefixes)
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr, flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct, str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i, j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(
sr.xcentergrid.flatten(), sr.ycentergrid.flatten(), names=names
)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
# trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:, :num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name": eig_names}, index=eig_names)
pp_df.loc[:, "x"] = -1.0 * sr.ncol
pp_df.loc[:, "y"] = -1.0 * sr.nrow
pp_df.loc[:, "zone"] = -999
pp_df.loc[:, "parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"), pp_df)
_eigen_basis_to_factor_file(
sr.nrow, sr.ncol, trunc_basis, factors_file=factors_file, islog=islog
)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir, "{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat", tpl_file, prefix)
shutil.copy2("temp.dat", tpl_file.replace(".tpl", ""))
df.loc[:, "tpl_file"] = tpl_file
df.loc[:, "in_file"] = tpl_file.replace(".tpl", "")
df.loc[:, "prefix"] = prefix
df.loc[:, "pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
# arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:, "parubnd"] = 10.0
df.loc[:, "parlbnd"] = 0.1
return pd.concat(dfs)
# back_array_dict = {}
# f = open(tpl_file,'w')
# f.write("ptf ~\n")
# f.write("name,org_val,new_val\n")
# for name,array in array_dict.items():
# mname = name+"mean"
# f.write("{0},{1:20.8E},~ {2} ~\n".format(mname,0.0,mname))
# #array -= array.mean()
# array_flat = pyemu.Matrix(x=np.atleast_2d(array.flatten()).transpose()
# ,col_names=["flat"],row_names=names,
# isdiagonal=False)
# factors = trunc_basis * array_flat
# enames = ["{0}{1:04d}".format(name,i) for i in range(num_eig)]
# for n,val in zip(enames,factors.x):
# f.write("{0},{1:20.8E},~ {0} ~\n".format(n,val[0]))
# back_array_dict[name] = (factors.T * trunc_basis).x.reshape(array.shape)
# print(array_back)
# print(factors.shape)
#
# return back_array_dict
def _eigen_basis_to_factor_file(nrow, ncol, basis, factors_file, islog=True):
assert nrow * ncol == basis.shape[0]
with open(factors_file, "w") as f:
f.write("junk.dat\n")
f.write("junk.zone.dat\n")
f.write("{0} {1}\n".format(ncol, nrow))
f.write("{0}\n".format(basis.shape[1]))
[f.write(name + "\n") for name in basis.col_names]
t = 0
if islog:
t = 1
for i in range(nrow * ncol):
f.write("{0} {1} {2} {3:8.5e}".format(i + 1, t, basis.shape[1], 0.0))
[
f.write(" {0} {1:12.8g} ".format(i + 1, w))
for i, w in enumerate(basis.x[i, :])
]
f.write("\n")
def kl_apply(par_file, basis_file, par_to_file_dict, arr_shape):
"""Apply a KL parameterization transform from basis factors to model
input arrays.
Args:
par_file (`str`): the csv file to get factor values from. Must contain
the following columns: "name", "new_val", "org_val"
basis_file (`str`): the PEST-style binary file that contains the reduced
basis
par_to_file_dict (`dict`): a mapping from KL parameter prefixes to array
file names.
arr_shape (tuple): a length 2 tuple of number of rows and columns
the resulting arrays should have.
Note:
This is the companion function to kl_setup.
This function should be called during the forward run
"""
df = pd.read_csv(par_file)
assert "name" in df.columns
assert "org_val" in df.columns
assert "new_val" in df.columns
df.loc[:, "prefix"] = df.name.apply(lambda x: x[:-4])
for prefix in df.prefix.unique():
assert prefix in par_to_file_dict.keys(), "missing prefix:{0}".format(prefix)
basis = pyemu.Matrix.from_binary(basis_file)
assert basis.shape[1] == arr_shape[0] * arr_shape[1]
arr_min = 1.0e-10 # a temp hack
# means = df.loc[df.name.apply(lambda x: x.endswith("mean")),:]
# print(means)
df = df.loc[df.name.apply(lambda x: not x.endswith("mean")), :]
for prefix, filename in par_to_file_dict.items():
factors = pyemu.Matrix.from_dataframe(df.loc[df.prefix == prefix, ["new_val"]])
factors.autoalign = False
basis_prefix = basis[: factors.shape[0], :]
arr = (factors.T * basis_prefix).x.reshape(arr_shape)
# arr += means.loc[means.prefix==prefix,"new_val"].values
arr[arr < arr_min] = arr_min
np.savetxt(filename, arr, fmt="%20.8E")
def zero_order_tikhonov(pst, parbounds=True, par_groups=None, reset=True):
"""setup preferred-value regularization in a pest control file.
Args:
pst (`pyemu.Pst`): the control file instance
parbounds (`bool`, optional): flag to weight the new prior information
equations according to parameter bound width - approx the KL
transform. Default is True
par_groups (`list`): a list of parameter groups to build PI equations for.
If None, all adjustable parameters are used. Default is None
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
Example::
pst = pyemu.Pst("my.pst")
pyemu.helpers.zero_order_tikhonov(pst)
pst.write("my_reg.pst")
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
pt = row["partrans"].lower()
try:
pt = pt.decode()
except:
pass
if pt not in ["tied", "fixed"] and row["pargp"] in par_groups:
pilbl.append(row["parnme"])
weight.append(1.0)
ogp_name = "regul" + row["pargp"]
obgnme.append(ogp_name[:12])
parnme = row["parnme"]
parval1 = row["parval1"]
if pt == "log":
parnme = "log(" + parnme + ")"
parval1 = np.log10(parval1)
eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1)
equation.append(eq)
if reset:
pst.prior_information = pd.DataFrame(
{"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}
)
else:
pi = pd.DataFrame(
{"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}
)
pst.prior_information = pst.prior_information.append(pi)
if parbounds:
_regweight_from_parbound(pst)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def _regweight_from_parbound(pst):
"""sets regularization weights from parameter bounds
which approximates the KL expansion. Called by
zero_order_tikhonov().
"""
pst.parameter_data.index = pst.parameter_data.parnme
pst.prior_information.index = pst.prior_information.pilbl
for idx, parnme in enumerate(pst.prior_information.pilbl):
if parnme in pst.parameter_data.index:
row = pst.parameter_data.loc[parnme, :]
lbnd, ubnd = row["parlbnd"], row["parubnd"]
if row["partrans"].lower() == "log":
weight = 1.0 / (np.log10(ubnd) - np.log10(lbnd))
else:
weight = 1.0 / (ubnd - lbnd)
pst.prior_information.loc[parnme, "weight"] = weight
else:
print(
"prior information name does not correspond"
+ " to a parameter: "
+ str(parnme)
)
def first_order_pearson_tikhonov(pst, cov, reset=True, abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
Args:
pst (`pyemu.Pst`): the PEST control file
cov (`pyemu.Cov`): a covariance matrix instance with
some or all of the parameters listed in `pst`.
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
abs_drop_tol (`float`, optional): tolerance to control how many pi equations
are written. If the absolute value of the Pearson CC is less than
abs_drop_tol, the prior information equation will not be included in
the control file.
Note:
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Example::
pst = pyemu.Pst("my.pst")
cov = pyemu.Cov.from_ascii("my.cov")
pyemu.helpers.first_order_pearson_tikhonov(pst,cov)
pst.write("my_reg.pst")
"""
assert isinstance(cov, pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.to_pearson()
# print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x: x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i, iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j, jname in enumerate(cc_mat.row_names[i + 1 :]):
if jname not in sadj_names:
continue
# print(i,iname,i+j+1,jname)
cc = cc_mat.x[i, j + i + 1]
if cc < abs_drop_tol:
continue
pilbl.append("pcc_{0}".format(pi_num))
iiname = str(iname)
if str(ptrans[iname]) == "log":
iiname = "log(" + iname + ")"
jjname = str(jname)
if str(ptrans[jname]) == "log":
jjname = "log(" + jname + ")"
equation.append("1.0 * {0} - 1.0 * {1} = 0.0".format(iiname, jjname))
weight.append(cc)
obgnme.append("regul_cc")
pi_num += 1
df = pd.DataFrame(
{"pilbl": pilbl, "equation": equation, "obgnme": obgnme, "weight": weight}
)
df.index = df.pilbl
if reset:
pst.prior_information = df
else:
pst.prior_information = pst.prior_information.append(df)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def simple_tpl_from_pars(parnames, tplfilename="model.input.tpl"):
"""Make a simple template file from a list of parameter names.
Args:
parnames ([`str`]): list of parameter names to put in the
new template file
tplfilename (`str`): Name of the template file to create. Default
is "model.input.tpl"
Note:
writes a file `tplfilename` with each parameter name in `parnames` on a line
"""
with open(tplfilename, "w") as ofp:
ofp.write("ptf ~\n")
[ofp.write("~{0:^12}~\n".format(cname)) for cname in parnames]
def simple_ins_from_obs(obsnames, insfilename="model.output.ins"):
"""write a simple instruction file that reads the values named
in obsnames in order, one per line from a model output file
Args:
obsnames (`str`): list of observation names to put in the
new instruction file
insfilename (`str`): the name of the instruction file to
create. Default is "model.output.ins"
Note:
writes a file `insfilename` with each observation read off
of a single line
"""
with open(insfilename, "w") as ofp:
ofp.write("pif ~\n")
[ofp.write("!{0}!\n".format(cob)) for cob in obsnames]
def pst_from_parnames_obsnames(
parnames, obsnames, tplfilename="model.input.tpl", insfilename="model.output.ins"
):
"""Creates a Pst object from a list of parameter names and a list of observation names.
Args:
parnames (`str`): list of parameter names
obsnames (`str`): list of observation names
tplfilename (`str`): template filename. Default is "model.input.tpl"
insfilename (`str`): instruction filename. Default is "model.output.ins"
Returns:
`pyemu.Pst`: the generic control file
"""
simple_tpl_from_pars(parnames, tplfilename)
simple_ins_from_obs(obsnames, insfilename)
modelinputfilename = tplfilename.replace(".tpl", "")
modeloutputfilename = insfilename.replace(".ins", "")
return pyemu.Pst.from_io_files(
tplfilename, modelinputfilename, insfilename, modeloutputfilename
)
def read_pestpp_runstorage(filename, irun=0, with_metadata=False):
"""read pars and obs from a specific run in a pest++ serialized
run storage file into dataframes.
Args:
filename (`str`): the name of the run storage file
irun (`int`): the run id to process. If 'all', then all runs are
read. Default is 0
with_metadata (`bool`): flag to return run stats and info txt as well
Returns:
tuple containing
- **pandas.DataFrame**: parameter information
- **pandas.DataFrame**: observation information
- **pandas.DataFrame**: optionally run status and info txt.
"""
header_dtype = np.dtype(
[
("n_runs", np.int64),
("run_size", np.int64),
("p_name_size", np.int64),
("o_name_size", np.int64),
]
)
try:
irun = int(irun)
except:
if irun.lower() == "all":
irun = irun.lower()
else:
raise Exception(
"unrecognized 'irun': should be int or 'all', not '{0}'".format(irun)
)
def status_str(r_status):
if r_status == 0:
return "not completed"
if r_status == 1:
return "completed"
if r_status == -100:
return "canceled"
else:
return "failed"
assert os.path.exists(filename)
f = open(filename, "rb")
header = np.fromfile(f, dtype=header_dtype, count=1)
p_name_size, o_name_size = header["p_name_size"][0], header["o_name_size"][0]
par_names = (
struct.unpack("{0}s".format(p_name_size), f.read(p_name_size))[0]
.strip()
.lower()
.decode()
.split("\0")[:-1]
)
obs_names = (
struct.unpack("{0}s".format(o_name_size), f.read(o_name_size))[0]
.strip()
.lower()
.decode()
.split("\0")[:-1]
)
n_runs, run_size = header["n_runs"][0], header["run_size"][0]
run_start = f.tell()
def _read_run(irun):
f.seek(run_start + (irun * run_size))
r_status = np.fromfile(f, dtype=np.int8, count=1)
info_txt = struct.unpack("41s", f.read(41))[0].strip().lower().decode()
par_vals = np.fromfile(f, dtype=np.float64, count=len(par_names) + 1)[1:]
obs_vals = np.fromfile(f, dtype=np.float64, count=len(obs_names) + 1)[:-1]
par_df = pd.DataFrame({"parnme": par_names, "parval1": par_vals})
par_df.index = par_df.pop("parnme")
obs_df = pd.DataFrame({"obsnme": obs_names, "obsval": obs_vals})
obs_df.index = obs_df.pop("obsnme")
return r_status, info_txt, par_df, obs_df
if irun == "all":
par_dfs, obs_dfs = [], []
r_stats, txts = [], []
for irun in range(n_runs):
# print(irun)
r_status, info_txt, par_df, obs_df = _read_run(irun)
par_dfs.append(par_df)
obs_dfs.append(obs_df)
r_stats.append(r_status)
txts.append(info_txt)
par_df = pd.concat(par_dfs, axis=1).T
par_df.index = np.arange(n_runs)
obs_df = pd.concat(obs_dfs, axis=1).T
obs_df.index = np.arange(n_runs)
meta_data = pd.DataFrame({"r_status": r_stats, "info_txt": txts})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
else:
assert irun <= n_runs
r_status, info_txt, par_df, obs_df = _read_run(irun)
meta_data = pd.DataFrame({"r_status": [r_status], "info_txt": [info_txt]})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
f.close()
if with_metadata:
return par_df, obs_df, meta_data
else:
return par_df, obs_df
def jco_from_pestpp_runstorage(rnj_filename, pst_filename):
"""read pars and obs from a pest++ serialized run storage
file (e.g., .rnj) and return jacobian matrix instance
Args:
rnj_filename (`str`): the name of the run storage file
pst_filename (`str`): the name of the pst file
Note:
This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco
file in a subsequent step to avoid memory resource issues associated
with very large problems.
Returns:
`pyemu.Jco`: a jacobian matrix constructed from the run results and
pest control file information.
"""
header_dtype = np.dtype(
[
("n_runs", np.int64),
("run_size", np.int64),
("p_name_size", np.int64),
("o_name_size", np.int64),
]
)
pst = pyemu.Pst(pst_filename)
par = pst.parameter_data
log_pars = set(par.loc[par.partrans == "log", "parnme"].values)
with open(rnj_filename, "rb") as f:
header = np.fromfile(f, dtype=header_dtype, count=1)
try:
base_par, base_obs = read_pestpp_runstorage(rnj_filename, irun=0)
except:
raise Exception("couldn't get base run...")
par = par.loc[base_par.index, :]
li = base_par.index.map(lambda x: par.loc[x, "partrans"] == "log")
base_par.loc[li] = base_par.loc[li].apply(np.log10)
jco_cols = {}
for irun in range(1, int(header["n_runs"])):
par_df, obs_df = read_pestpp_runstorage(rnj_filename, irun=irun)
par_df.loc[li] = par_df.loc[li].apply(np.log10)
obs_diff = base_obs - obs_df
par_diff = base_par - par_df
# check only one non-zero element per col(par)
if len(par_diff[par_diff.parval1 != 0]) > 1:
raise Exception(
"more than one par diff - looks like the file wasn't created during jco filling..."
)
parnme = par_diff[par_diff.parval1 != 0].index[0]
parval = par_diff.parval1.loc[parnme]
# derivatives
jco_col = obs_diff / parval
# some tracking, checks
print("processing par {0}: {1}...".format(irun, parnme))
print(
"%nzsens: {0}%...".format(
(jco_col[abs(jco_col.obsval) > 1e-8].shape[0] / jco_col.shape[0])
* 100.0
)
)
jco_cols[parnme] = jco_col.obsval
jco_cols = pd.DataFrame.from_records(
data=jco_cols, index=list(obs_diff.index.values)
)
jco_cols = pyemu.Jco.from_dataframe(jco_cols)
# write # memory considerations important here for very large matrices - break into chunks...
# jco_fnam = "{0}".format(filename[:-4]+".jco")
# jco_cols.to_binary(filename=jco_fnam, droptol=None, chunk=None)
return jco_cols
def parse_dir_for_io_files(d, prepend_path=False):
"""find template/input file pairs and instruction file/output file
pairs by extension.
Args:
d (`str`): directory to search for interface files
prepend_path (`bool`, optional): flag to prepend `d` to each file name.
Default is False
Note:
the return values from this function can be passed straight to
`pyemu.Pst.from_io_files()` classmethod constructor. Assumes the
template file names are <input_file>.tpl and instruction file names
are <output_file>.ins.
Returns:
tuple containing
- **[`str`]**: list of template files in d
- **[`str`]**: list of input files in d
- **[`str`]**: list of instruction files in d
- **[`str`]**: list of output files in d
"""
files = os.listdir(d)
tpl_files = [f for f in files if f.endswith(".tpl")]
in_files = [f.replace(".tpl", "") for f in tpl_files]
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", "") for f in ins_files]
if prepend_path:
tpl_files = [os.path.join(d, item) for item in tpl_files]
in_files = [os.path.join(d, item) for item in in_files]
ins_files = [os.path.join(d, item) for item in ins_files]
out_files = [os.path.join(d, item) for item in out_files]
return tpl_files, in_files, ins_files, out_files
def pst_from_io_files(
tpl_files, in_files, ins_files, out_files, pst_filename=None, pst_path=None
):
"""create a Pst instance from model interface files.
Args:
tpl_files ([`str`]): list of template file names
in_files ([`str`]): list of model input file names (pairs with template files)
ins_files ([`str`]): list of instruction file names
out_files ([`str`]): list of model output file names (pairs with instruction files)
pst_filename (`str`): name of control file to write. If None, no file is written.
Default is None
pst_path (`str`): the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. If python is being run in a directory other than where the control
file will reside, it is useful to pass `pst_path` as `.`. Default is None
Returns:
`Pst`: new control file instance with parameter and observation names
found in `tpl_files` and `ins_files`, repsectively.
Note:
calls `pyemu.helpers.pst_from_io_files()`
Assigns generic values for parameter info. Tries to use INSCHEK
to set somewhat meaningful observation values
all file paths are relatively to where python is running.
Example::
tpl_files = ["my.tpl"]
in_files = ["my.in"]
ins_files = ["my.ins"]
out_files = ["my.out"]
pst = pyemu.Pst.from_io_files(tpl_files,in_files,ins_files,out_files)
pst.control_data.noptmax = 0
pst.write("my.pst)
"""
par_names = set()
if not isinstance(tpl_files, list):
tpl_files = [tpl_files]
if not isinstance(in_files, list):
in_files = [in_files]
assert len(in_files) == len(tpl_files), "len(in_files) != len(tpl_files)"
for tpl_file in tpl_files:
assert os.path.exists(tpl_file), "template file not found: " + str(tpl_file)
# new_names = [name for name in pyemu.pst_utils.parse_tpl_file(tpl_file) if name not in par_names]
# par_names.extend(new_names)
new_names = pyemu.pst_utils.parse_tpl_file(tpl_file)
par_names.update(new_names)
if not isinstance(ins_files, list):
ins_files = [ins_files]
if not isinstance(out_files, list):
out_files = [out_files]
assert len(ins_files) == len(out_files), "len(out_files) != len(out_files)"
obs_names = []
for ins_file in ins_files:
assert os.path.exists(ins_file), "instruction file not found: " + str(ins_file)
obs_names.extend(pyemu.pst_utils.parse_ins_file(ins_file))
new_pst = pyemu.pst_utils.generic_pst(list(par_names), list(obs_names))
if "window" in platform.platform().lower() and pst_path == ".":
pst_path = ""
# new_pst.instruction_files = ins_files
# new_pst.output_files = out_files
new_pst.model_output_data = pd.DataFrame(
{"pest_file": ins_files, "model_file": out_files}, index=ins_files
)
# try to run inschek to find the observtion values
# do this here with full paths to files
pyemu.pst_utils.try_process_output_pst(new_pst)
if pst_path is not None:
tpl_files = [
os.path.join(pst_path, os.path.split(tpl_file)[-1])
for tpl_file in tpl_files
]
in_files = [
os.path.join(pst_path, os.path.split(in_file)[-1]) for in_file in in_files
]
# now set the true path location to instruction files and output files
ins_files = [
os.path.join(pst_path, os.path.split(ins_file)[-1])
for ins_file in ins_files
]
out_files = [
os.path.join(pst_path, os.path.split(out_file)[-1])
for out_file in out_files
]
new_pst.model_input_data = pd.DataFrame(
{"pest_file": tpl_files, "model_file": in_files}, index=tpl_files
)
new_pst.model_output_data = pd.DataFrame(
{"pest_file": ins_files, "model_file": out_files}, index=ins_files
)
new_pst.try_parse_name_metadata()
if pst_filename:
new_pst.write(pst_filename)
return new_pst
wildass_guess_par_bounds_dict = {
"hk": [0.01, 100.0],
"vka": [0.1, 10.0],
"sy": [0.25, 1.75],
"ss": [0.1, 10.0],
"cond": [0.01, 100.0],
"flux": [0.25, 1.75],
"rech": [0.9, 1.1],
"stage": [0.9, 1.1],
}
class PstFromFlopyModel(object):
"""a monster helper class to setup a complex PEST interface around
an existing MODFLOW-2005-family model.
Args:
model (`flopy.mbase`): a loaded flopy model instance. If model is an str, it is treated as a
MODFLOW nam file (requires org_model_ws)
new_model_ws (`str`): a directory where the new version of MODFLOW input files and PEST(++)
files will be written
org_model_ws (`str`): directory to existing MODFLOW model files. Required if model argument
is an str. Default is None
pp_props ([[`str`,[`int`]]]): pilot point multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup pilot point multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup pilot point multiplier parameters for recharge for stress
period 1,5,11,and 16.
const_props ([[`str`,[`int`]]]): constant (uniform) multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup constant (uniform) multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup constant (uniform) multiplier parameters for recharge for stress
period 1,5,11,and 16.
temporal_list_props ([[`str`,[`int`]]]): list-type input stress-period level multiplier parameters.
A nested list of list-type input elements to parameterize using
name, iterable pairs. The iterable is zero-based stress-period indices.
For example, to setup multipliers for WEL flux and for RIV conductance,
temporal_list_props = [["wel.flux",[0,1,2]],["riv.cond",None]] would setup
multiplier parameters for well flux for stress periods 1,2 and 3 and
would setup one single river conductance multiplier parameter that is applied
to all stress periods
spatial_list_props ([[`str`,[`int`]]]): list-type input for spatial multiplier parameters.
A nested list of list-type elements to parameterize using
names (e.g. [["riv.cond",0],["wel.flux",1] to setup up cell-based parameters for
each list-type element listed. These multiplier parameters are applied across
all stress periods. For this to work, there must be the same number of entries
for all stress periods. If more than one list element of the same type is in a single
cell, only one parameter is used to multiply all lists in the same cell.
grid_props ([[`str`,[`int`]]]): grid-based (every active model cell) multiplier parameters.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 in every active model cell). For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup grid-based multiplier parameters in every active model cell
for recharge for stress period 1,5,11,and 16.
sfr_pars (`bool`): setup parameters for the stream flow routing modflow package.
If list is passed it defines the parameters to set up.
sfr_temporal_pars (`bool`)
flag to include stress-period level spatially-global multipler parameters in addition to
the spatially-discrete `sfr_pars`. Requires `sfr_pars` to be passed. Default is False
grid_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to build the prior parameter covariance matrix
elements for grid-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
pp_space (`int`): number of grid cells between pilot points. If None, use the default
in pyemu.pp_utils.setup_pilot_points_grid. Default is None
zone_props ([[`str`,[`int`]]]): zone-based multiplier parameters.
A nested list of zone-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
pp_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to use for building the prior parameter
covariance matrix for pilot point parameters. If None, a generic
GeoStruct is created using pp_space and grid-spacing information.
Default is None
par_bounds_dict (`dict`): a dictionary of model property/boundary condition name, upper-lower bound pairs.
For example, par_bounds_dict = {"hk":[0.01,100.0],"flux":[0.5,2.0]} would
set the bounds for horizontal hydraulic conductivity to
0.001 and 100.0 and set the bounds for flux parameters to 0.5 and
2.0. For parameters not found in par_bounds_dict,
`pyemu.helpers.wildass_guess_par_bounds_dict` is
used to set somewhat meaningful bounds. Default is None
temporal_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for time-varying list-type multiplier parameters. This GeoStruct
express the time correlation so that the 'a' parameter is the length of
time that boundary condition multiplier parameters are correlated across.
If None, then a generic GeoStruct is created that uses an 'a' parameter
of 3 stress periods. Default is None
spatial_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for spatially-varying list-type multiplier parameters.
If None, a generic GeoStruct is created using an "a" parameter that
is 10 times the max cell size. Default is None.
remove_existing (`bool`): a flag to remove an existing new_model_ws directory. If False and
new_model_ws exists, an exception is raised. If True and new_model_ws
exists, the directory is destroyed - user beware! Default is False.
k_zone_dict (`dict`): a dictionary of zero-based layer index, zone array pairs.
e.g. {lay: np.2darray} Used to
override using ibound zones for zone-based parameterization. If None,
use ibound values greater than zero as zones. Alternatively a dictionary of dictionaries
can be passed to allow different zones to be defined for different parameters.
e.g. {"upw.hk" {lay: np.2darray}, "extra.rc11" {lay: np.2darray}}
or {"hk" {lay: np.2darray}, "rc11" {lay: np.2darray}}
use_pp_zones (`bool`): a flag to use ibound zones (or k_zone_dict, see above) as pilot
point zones. If False, ibound values greater than zero are treated as
a single zone for pilot points. Default is False
obssim_smp_pairs ([[`str`,`str`]]: a list of observed-simulated PEST-type SMP file
pairs to get observations
from and include in the control file. Default is []
external_tpl_in_pairs ([[`str`,`str`]]: a list of existing template file, model input
file pairs to parse parameters
from and include in the control file. Default is []
external_ins_out_pairs ([[`str`,`str`]]: a list of existing instruction file,
model output file pairs to parse
observations from and include in the control file. Default is []
extra_pre_cmds ([`str`]): a list of preprocessing commands to add to the forward_run.py script
commands are executed with os.system() within forward_run.py. Default is None.
redirect_forward_output (`bool`): flag for whether to redirect forward model output to text files (True) or
allow model output to be directed to the screen (False). Default is True
extra_post_cmds ([`str`]): a list of post-processing commands to add to the forward_run.py script.
Commands are executed with os.system() within forward_run.py. Default is None.
tmp_files ([`str`]): a list of temporary files that should be removed at the start of the forward
run script. Default is [].
model_exe_name (`str`): binary name to run modflow. If None, a default from flopy is used,
which is dangerous because of the non-standard binary names
(e.g. MODFLOW-NWT_x64, MODFLOWNWT, mfnwt, etc). Default is None.
build_prior (`bool`): flag to build prior covariance matrix. Default is True
sfr_obs (`bool`): flag to include observations of flow and aquifer exchange from
the sfr ASCII output file
hfb_pars (`bool`): add HFB parameters. uses pyemu.gw_utils.write_hfb_template(). the resulting
HFB pars have parval1 equal to the values in the original file and use the
spatial_list_geostruct to build geostatistical covariates between parameters
kl_props ([[`str`,[`int`]]]): karhunen-loeve based multiplier parameters.
A nested list of KL-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
kl_num_eig (`int`): the number of KL-based eigenvector multiplier parameters to use for each
KL parameter set. default is 100
kl_geostruct (`pyemu.geostats.Geostruct`): the geostatistical structure
to build the prior parameter covariance matrix
elements for KL-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
Note:
Setup up multiplier parameters for an existing MODFLOW model.
Does all kinds of coolness like building a
meaningful prior, assigning somewhat meaningful parameter groups and
bounds, writes a forward_run.py script with all the calls need to
implement multiplier parameters, run MODFLOW and post-process.
Works a lot better if TEMPCHEK, INSCHEK and PESTCHEK are available in the
system path variable
"""
def __init__(
self,
model,
new_model_ws,
org_model_ws=None,
pp_props=[],
const_props=[],
temporal_bc_props=[],
temporal_list_props=[],
grid_props=[],
grid_geostruct=None,
pp_space=None,
zone_props=[],
pp_geostruct=None,
par_bounds_dict=None,
sfr_pars=False,
temporal_sfr_pars=False,
temporal_list_geostruct=None,
remove_existing=False,
k_zone_dict=None,
mflist_waterbudget=True,
mfhyd=True,
hds_kperk=[],
use_pp_zones=False,
obssim_smp_pairs=None,
external_tpl_in_pairs=None,
external_ins_out_pairs=None,
extra_pre_cmds=None,
extra_model_cmds=None,
extra_post_cmds=None,
redirect_forward_output=True,
tmp_files=None,
model_exe_name=None,
build_prior=True,
sfr_obs=False,
spatial_bc_props=[],
spatial_list_props=[],
spatial_list_geostruct=None,
hfb_pars=False,
kl_props=None,
kl_num_eig=100,
kl_geostruct=None,
):
self.logger = pyemu.logger.Logger("PstFromFlopyModel.log")
self.log = self.logger.log
self.logger.echo = True
self.zn_suffix = "_zn"
self.gr_suffix = "_gr"
self.pp_suffix = "_pp"
self.cn_suffix = "_cn"
self.kl_suffix = "_kl"
self.arr_org = "arr_org"
self.arr_mlt = "arr_mlt"
self.list_org = "list_org"
self.list_mlt = "list_mlt"
self.forward_run_file = "forward_run.py"
self.remove_existing = remove_existing
self.external_tpl_in_pairs = external_tpl_in_pairs
self.external_ins_out_pairs = external_ins_out_pairs
self._setup_model(model, org_model_ws, new_model_ws)
self._add_external()
self.arr_mult_dfs = []
self.par_bounds_dict = par_bounds_dict
self.pp_props = pp_props
self.pp_space = pp_space
self.pp_geostruct = pp_geostruct
self.use_pp_zones = use_pp_zones
self.const_props = const_props
self.grid_props = grid_props
self.grid_geostruct = grid_geostruct
self.zone_props = zone_props
self.kl_props = kl_props
self.kl_geostruct = kl_geostruct
self.kl_num_eig = kl_num_eig
if len(temporal_bc_props) > 0:
if len(temporal_list_props) > 0:
self.logger.lraise(
"temporal_bc_props and temporal_list_props. "
+ "temporal_bc_props is deprecated and replaced by temporal_list_props"
)
self.logger.warn(
"temporal_bc_props is deprecated and replaced by temporal_list_props"
)
temporal_list_props = temporal_bc_props
if len(spatial_bc_props) > 0:
if len(spatial_list_props) > 0:
self.logger.lraise(
"spatial_bc_props and spatial_list_props. "
+ "spatial_bc_props is deprecated and replaced by spatial_list_props"
)
self.logger.warn(
"spatial_bc_props is deprecated and replaced by spatial_list_props"
)
spatial_list_props = spatial_bc_props
self.temporal_list_props = temporal_list_props
self.temporal_list_geostruct = temporal_list_geostruct
if self.temporal_list_geostruct is None:
v = pyemu.geostats.ExpVario(
contribution=1.0, a=180.0
) # 180 correlation length
self.temporal_list_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="temporal_list_geostruct"
)
self.spatial_list_props = spatial_list_props
self.spatial_list_geostruct = spatial_list_geostruct
if self.spatial_list_geostruct is None:
dist = 10 * float(
max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max())
)
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.spatial_list_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="spatial_list_geostruct"
)
self.obssim_smp_pairs = obssim_smp_pairs
self.hds_kperk = hds_kperk
self.sfr_obs = sfr_obs
self.frun_pre_lines = []
self.frun_model_lines = []
self.frun_post_lines = []
self.tmp_files = []
self.extra_forward_imports = []
if tmp_files is not None:
if not isinstance(tmp_files, list):
tmp_files = [tmp_files]
self.tmp_files.extend(tmp_files)
if k_zone_dict is None:
self.k_zone_dict = {
k: self.m.bas6.ibound[k].array for k in np.arange(self.m.nlay)
}
else:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in k_zone_dict.values()]):
# loop over outer keys
for par_key in k_zone_dict.keys():
for k, arr in k_zone_dict[par_key].items():
if k not in np.arange(self.m.nlay):
self.logger.lraise(
"k_zone_dict for par {1}, layer index not in nlay:{0}".format(
k, par_key
)
)
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise(
"k_zone_dict arr for k {0} for par{2} has wrong shape:{1}".format(
k, arr.shape, par_key
)
)
else:
for k, arr in k_zone_dict.items():
if k not in np.arange(self.m.nlay):
self.logger.lraise(
"k_zone_dict layer index not in nlay:{0}".format(k)
)
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise(
"k_zone_dict arr for k {0} has wrong shape:{1}".format(
k, arr.shape
)
)
self.k_zone_dict = k_zone_dict
# add any extra commands to the forward run lines
for alist, ilist in zip(
[self.frun_pre_lines, self.frun_model_lines, self.frun_post_lines],
[extra_pre_cmds, extra_model_cmds, extra_post_cmds],
):
if ilist is None:
continue
if not isinstance(ilist, list):
ilist = [ilist]
for cmd in ilist:
self.logger.statement("forward_run line:{0}".format(cmd))
alist.append("pyemu.os_utils.run('{0}')\n".format(cmd))
# add the model call
if model_exe_name is None:
model_exe_name = self.m.exe_name
self.logger.warn(
"using flopy binary to execute the model:{0}".format(model)
)
if redirect_forward_output:
line = "pyemu.os_utils.run('{0} {1} 1>{1}.stdout 2>{1}.stderr')".format(
model_exe_name, self.m.namefile
)
else:
line = "pyemu.os_utils.run('{0} {1} ')".format(
model_exe_name, self.m.namefile
)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_model_lines.append(line)
self.tpl_files, self.in_files = [], []
self.ins_files, self.out_files = [], []
self._setup_mult_dirs()
self.mlt_files = []
self.org_files = []
self.m_files = []
self.mlt_counter = {}
self.par_dfs = {}
self.mlt_dfs = []
self._setup_list_pars()
self._setup_array_pars()
if not sfr_pars and temporal_sfr_pars:
self.logger.lraise("use of `temporal_sfr_pars` requires `sfr_pars`")
if sfr_pars:
if isinstance(sfr_pars, str):
sfr_pars = [sfr_pars]
if isinstance(sfr_pars, list):
self._setup_sfr_pars(sfr_pars, include_temporal_pars=temporal_sfr_pars)
else:
self._setup_sfr_pars(include_temporal_pars=temporal_sfr_pars)
if hfb_pars:
self._setup_hfb_pars()
self.mflist_waterbudget = mflist_waterbudget
self.mfhyd = mfhyd
self._setup_observations()
self.build_pst()
if build_prior:
self.parcov = self.build_prior()
else:
self.parcov = None
self.log("saving intermediate _setup_<> dfs into {0}".format(self.m.model_ws))
for tag, df in self.par_dfs.items():
df.to_csv(
os.path.join(
self.m.model_ws,
"_setup_par_{0}_{1}.csv".format(
tag.replace(" ", "_"), self.pst_name
),
)
)
for tag, df in self.obs_dfs.items():
df.to_csv(
os.path.join(
self.m.model_ws,
"_setup_obs_{0}_{1}.csv".format(
tag.replace(" ", "_"), self.pst_name
),
)
)
self.log("saving intermediate _setup_<> dfs into {0}".format(self.m.model_ws))
self.logger.statement("all done")
def _setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(
self.org_model_ws, "{0}.sfr.out".format(self.m.name)
)
if not os.path.exists(org_sfr_out_file):
self.logger.lraise(
"setup_sfr_obs() error: could not locate existing sfr out file: {0}".format(
org_sfr_out_file
)
)
new_sfr_out_file = os.path.join(
self.m.model_ws, os.path.split(org_sfr_out_file)[-1]
)
shutil.copy2(org_sfr_out_file, new_sfr_out_file)
seg_group_dict = None
if isinstance(self.sfr_obs, dict):
seg_group_dict = self.sfr_obs
df = pyemu.gw_utils.setup_sfr_obs(
new_sfr_out_file,
seg_group_dict=seg_group_dict,
model=self.m,
include_path=True,
)
if df is not None:
self.obs_dfs["sfr"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_sfr_obs()")
def _setup_sfr_pars(self, par_cols=None, include_temporal_pars=None):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(
self.m, par_cols=par_cols, include_temporal_pars=include_temporal_pars
) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = pd.concat(par_dfs["sfr"])
self.frun_pre_lines.append(
"pyemu.gw_utils.apply_sfr_parameters(seg_pars={0}, reach_pars={1})".format(
seg_pars, reach_pars
)
)
else:
warnings.warn("No sfr parameters have been set up!", PyemuWarning)
def _setup_hfb_pars(self):
"""setup non-mult parameters for hfb (yuck!)"""
if self.m.hfb6 is None:
self.logger.lraise("couldn't find hfb pak")
tpl_file, df = pyemu.gw_utils.write_hfb_template(self.m)
self.in_files.append(os.path.split(tpl_file.replace(".tpl", ""))[-1])
self.tpl_files.append(os.path.split(tpl_file)[-1])
self.par_dfs["hfb"] = df
def _setup_mult_dirs(self):
"""setup the directories to use for multiplier parameterization. Directories
are make within the PstFromFlopyModel.m.model_ws directory
"""
# setup dirs to hold the original and multiplier model input quantities
set_dirs = []
# if len(self.pp_props) > 0 or len(self.zone_props) > 0 or \
# len(self.grid_props) > 0:
if (
self.pp_props is not None
or self.zone_props is not None
or self.grid_props is not None
or self.const_props is not None
or self.kl_props is not None
):
set_dirs.append(self.arr_org)
set_dirs.append(self.arr_mlt)
# if len(self.bc_props) > 0:
if len(self.temporal_list_props) > 0 or len(self.spatial_list_props) > 0:
set_dirs.append(self.list_org)
if len(self.spatial_list_props):
set_dirs.append(self.list_mlt)
for d in set_dirs:
d = os.path.join(self.m.model_ws, d)
self.log("setting up '{0}' dir".format(d))
if os.path.exists(d):
if self.remove_existing:
shutil.rmtree(d, onerror=remove_readonly)
else:
raise Exception("dir '{0}' already exists".format(d))
os.mkdir(d)
self.log("setting up '{0}' dir".format(d))
def _setup_model(self, model, org_model_ws, new_model_ws):
"""setup the flopy.mbase instance for use with multipler parameters.
Changes model_ws, sets external_path and writes new MODFLOW input
files
"""
split_new_mws = [i for i in os.path.split(new_model_ws) if len(i) > 0]
if len(split_new_mws) != 1:
self.logger.lraise(
"new_model_ws can only be 1 folder-level deep:{0}".format(
str(split_new_mws)
)
)
if isinstance(model, str):
self.log("loading flopy model")
try:
import flopy
except:
raise Exception("from_flopy_model() requires flopy")
# prepare the flopy model
self.org_model_ws = org_model_ws
self.new_model_ws = new_model_ws
self.m = flopy.modflow.Modflow.load(
model, model_ws=org_model_ws, check=False, verbose=True, forgive=False
)
self.log("loading flopy model")
else:
self.m = model
self.org_model_ws = str(self.m.model_ws)
self.new_model_ws = new_model_ws
self.log("updating model attributes")
self.m.array_free_format = True
self.m.free_format_input = True
self.m.external_path = "."
self.log("updating model attributes")
if os.path.exists(new_model_ws):
if not self.remove_existing:
self.logger.lraise("'new_model_ws' already exists")
else:
self.logger.warn("removing existing 'new_model_ws")
shutil.rmtree(new_model_ws, onerror=pyemu.os_utils._remove_readonly)
time.sleep(1)
self.m.change_model_ws(new_model_ws, reset_external=True)
self.m.exe_name = self.m.exe_name.replace(".exe", "")
self.m.exe = self.m.version
self.log("writing new modflow input files")
self.m.write_input()
self.log("writing new modflow input files")
def _get_count(self, name):
"""get the latest counter for a certain parameter type."""
if name not in self.mlt_counter:
self.mlt_counter[name] = 1
c = 0
else:
c = self.mlt_counter[name]
self.mlt_counter[name] += 1
# print(name,c)
return c
def _prep_mlt_arrays(self):
"""prepare multipler arrays. Copies existing model input arrays and
writes generic (ones) multiplier arrays
"""
par_props = [
self.pp_props,
self.grid_props,
self.zone_props,
self.const_props,
self.kl_props,
]
par_suffixs = [
self.pp_suffix,
self.gr_suffix,
self.zn_suffix,
self.cn_suffix,
self.kl_suffix,
]
# Need to remove props and suffixes for which no info was provided (e.g. still None)
del_idx = []
for i, cp in enumerate(par_props):
if cp is None:
del_idx.append(i)
for i in del_idx[::-1]:
del par_props[i]
del par_suffixs[i]
mlt_dfs = []
for par_prop, suffix in zip(par_props, par_suffixs):
if len(par_prop) == 2:
if not isinstance(par_prop[0], list):
par_prop = [par_prop]
if len(par_prop) == 0:
continue
for pakattr, k_org in par_prop:
attr_name = pakattr.split(".")[1]
pak, attr = self._parse_pakattr(pakattr)
ks = np.arange(self.m.nlay)
if isinstance(attr, flopy.utils.Transient2d):
ks = np.arange(self.m.nper)
try:
k_parse = self._parse_k(k_org, ks)
except Exception as e:
self.logger.lraise("error parsing k {0}:{1}".format(k_org, str(e)))
org, mlt, mod, layer = [], [], [], []
c = self._get_count(attr_name)
mlt_prefix = "{0}{1}".format(attr_name, c)
mlt_name = os.path.join(
self.arr_mlt, "{0}.dat{1}".format(mlt_prefix, suffix)
)
for k in k_parse:
# horrible kludge to avoid passing int64 to flopy
# this gift may give again...
if type(k) is np.int64:
k = int(k)
if isinstance(attr, flopy.utils.Util2d):
fname = self._write_u2d(attr)
layer.append(k)
elif isinstance(attr, flopy.utils.Util3d):
fname = self._write_u2d(attr[k])
layer.append(k)
elif isinstance(attr, flopy.utils.Transient2d):
fname = self._write_u2d(attr.transient_2ds[k])
layer.append(0) # big assumption here
mod.append(os.path.join(self.m.external_path, fname))
mlt.append(mlt_name)
org.append(os.path.join(self.arr_org, fname))
df = pd.DataFrame(
{
"org_file": org,
"mlt_file": mlt,
"model_file": mod,
"layer": layer,
}
)
df.loc[:, "suffix"] = suffix
df.loc[:, "prefix"] = mlt_prefix
df.loc[:, "attr_name"] = attr_name
mlt_dfs.append(df)
if len(mlt_dfs) > 0:
mlt_df = pd.concat(mlt_dfs, ignore_index=True)
return mlt_df
def _write_u2d(self, u2d):
"""write a flopy.utils.Util2D instance to an ASCII text file using the
Util2D filename
"""
filename = os.path.split(u2d.filename)[-1]
np.savetxt(
os.path.join(self.m.model_ws, self.arr_org, filename),
u2d.array,
fmt="%15.6E",
)
return filename
def _write_const_tpl(self, name, tpl_file, zn_array):
"""write a template file a for a constant (uniform) multiplier parameter"""
parnme = []
with open(os.path.join(self.m.model_ws, tpl_file), "w") as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i, j] < 1:
pname = " 1.0 "
else:
pname = "{0}{1}".format(name, self.cn_suffix)
if len(pname) > 12:
self.logger.warn(
"zone pname too long for pest:{0}".format(pname)
)
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(self.cn_suffix.replace("_", ""), name)
df.loc[:, "tpl"] = tpl_file
return df
def _write_grid_tpl(self, name, tpl_file, zn_array):
"""write a template file a for grid-based multiplier parameters"""
parnme, x, y = [], [], []
with open(os.path.join(self.m.model_ws, tpl_file), "w") as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i, j] < 1:
pname = " 1.0 "
else:
pname = "{0}{1:03d}{2:03d}".format(name, i, j)
if len(pname) > 12:
self.logger.warn(
"grid pname too long for pest:{0}".format(pname)
)
parnme.append(pname)
pname = " ~ {0} ~ ".format(pname)
x.append(self.m.sr.xcentergrid[i, j])
y.append(self.m.sr.ycentergrid[i, j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme, "x": x, "y": y}, index=parnme)
df.loc[:, "pargp"] = "{0}{1}".format(self.gr_suffix.replace("_", ""), name)
df.loc[:, "tpl"] = tpl_file
return df
def _grid_prep(self):
"""prepare grid-based parameterizations"""
if len(self.grid_props) == 0:
return
if self.grid_geostruct is None:
self.logger.warn(
"grid_geostruct is None,"
" using ExpVario with contribution=1 and a=(max(delc,delr)*10"
)
dist = 10 * float(
max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max())
)
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.grid_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="grid_geostruct", transform="log"
)
def _pp_prep(self, mlt_df):
"""prepare pilot point based parameterization"""
if len(self.pp_props) == 0:
return
if self.pp_space is None:
self.logger.warn("pp_space is None, using 10...\n")
self.pp_space = 10
if self.pp_geostruct is None:
self.logger.warn(
"pp_geostruct is None,"
" using ExpVario with contribution=1 and a=(pp_space*max(delr,delc))"
)
pp_dist = self.pp_space * float(
max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max())
)
v = pyemu.geostats.ExpVario(contribution=1.0, a=pp_dist)
self.pp_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="pp_geostruct", transform="log"
)
pp_df = mlt_df.loc[mlt_df.suffix == self.pp_suffix, :]
layers = pp_df.layer.unique()
layers.sort()
pp_dict = {
l: list(pp_df.loc[pp_df.layer == l, "prefix"].unique()) for l in layers
}
# big assumption here - if prefix is listed more than once, use the lowest layer index
pp_dict_sort = {}
for i, l in enumerate(layers):
p = set(pp_dict[l])
pl = list(p)
pl.sort()
pp_dict_sort[l] = pl
for ll in layers[i + 1 :]:
pp = set(pp_dict[ll])
d = list(pp - p)
d.sort()
pp_dict_sort[ll] = d
pp_dict = pp_dict_sort
pp_array_file = {p: m for p, m in zip(pp_df.prefix, pp_df.mlt_file)}
self.logger.statement("pp_dict: {0}".format(str(pp_dict)))
self.log("calling setup_pilot_point_grid()")
if self.use_pp_zones:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]):
ib = {
p.split(".")[-1]: k_dict for p, k_dict in self.k_zone_dict.items()
}
for attr in pp_df.attr_name.unique():
if attr not in [p.split(".")[-1] for p in ib.keys()]:
if "general_zn" not in ib.keys():
warnings.warn(
"Dictionary of dictionaries passed as zones, {0} not in keys: {1}. "
"Will use ibound for zones".format(attr, ib.keys()),
PyemuWarning,
)
else:
self.logger.statement(
"Dictionary of dictionaries passed as pp zones, "
"using 'general_zn' for {0}".format(attr)
)
if "general_zn" not in ib.keys():
ib["general_zn"] = {
k: self.m.bas6.ibound[k].array for k in range(self.m.nlay)
}
else:
ib = {"general_zn": self.k_zone_dict}
else:
ib = {}
for k in range(self.m.nlay):
a = self.m.bas6.ibound[k].array.copy()
a[a > 0] = 1
ib[k] = a
for k, i in ib.items():
if np.any(i < 0):
u, c = np.unique(i[i > 0], return_counts=True)
counts = dict(zip(u, c))
mx = -1.0e10
imx = None
for u, c in counts.items():
if c > mx:
mx = c
imx = u
self.logger.warn(
"resetting negative ibound values for PP zone"
+ "array in layer {0} : {1}".format(k + 1, u)
)
i[i < 0] = u
ib[k] = i
ib = {"general_zn": ib}
pp_df = pyemu.pp_utils.setup_pilotpoints_grid(
self.m,
ibound=ib,
use_ibound_zones=self.use_pp_zones,
prefix_dict=pp_dict,
every_n_cell=self.pp_space,
pp_dir=self.m.model_ws,
tpl_dir=self.m.model_ws,
shapename=os.path.join(self.m.model_ws, "pp.shp"),
)
self.logger.statement(
"{0} pilot point parameters created".format(pp_df.shape[0])
)
self.logger.statement(
"pilot point 'pargp':{0}".format(",".join(pp_df.pargp.unique()))
)
self.log("calling setup_pilot_point_grid()")
# calc factors for each layer
pargp = pp_df.pargp.unique()
pp_dfs_k = {}
fac_files = {}
pp_processed = set()
pp_df.loc[:, "fac_file"] = np.NaN
for pg in pargp:
ks = pp_df.loc[pp_df.pargp == pg, "k"].unique()
if len(ks) == 0:
self.logger.lraise(
"something is wrong in fac calcs for par group {0}".format(pg)
)
if len(ks) == 1:
if np.all(
[isinstance(v, dict) for v in ib.values()]
): # check is dict of dicts
if np.any([pg.startswith(p) for p in ib.keys()]):
p = next(p for p in ib.keys() if pg.startswith(p))
# get dict relating to parameter prefix
ib_k = ib[p][ks[0]]
else:
p = "general_zn"
ib_k = ib[p][ks[0]]
else:
ib_k = ib[ks[0]]
if len(ks) != 1: # TODO
# self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
self.logger.warn(
"multiple k values for {0},forming composite zone array...".format(
pg
)
)
ib_k = np.zeros((self.m.nrow, self.m.ncol))
for k in ks:
t = ib["general_zn"][k].copy()
t[t < 1] = 0
ib_k[t > 0] = t[t > 0]
k = int(ks[0])
kattr_id = "{}_{}".format(k, p)
kp_id = "{}_{}".format(k, pg)
if kp_id not in pp_dfs_k.keys():
self.log("calculating factors for p={0}, k={1}".format(pg, k))
fac_file = os.path.join(self.m.model_ws, "pp_k{0}.fac".format(kattr_id))
var_file = fac_file.replace(".fac", ".var.dat")
pp_df_k = pp_df.loc[pp_df.pargp == pg]
if kattr_id not in pp_processed:
self.logger.statement(
"saving krige variance file:{0}".format(var_file)
)
self.logger.statement(
"saving krige factors file:{0}".format(fac_file)
)
ok_pp = pyemu.geostats.OrdinaryKrige(self.pp_geostruct, pp_df_k)
ok_pp.calc_factors_grid(
self.m.sr,
var_filename=var_file,
zone_array=ib_k,
num_threads=10,
)
ok_pp.to_grid_factors_file(fac_file)
pp_processed.add(kattr_id)
fac_files[kp_id] = fac_file
self.log("calculating factors for p={0}, k={1}".format(pg, k))
pp_dfs_k[kp_id] = pp_df_k
for kp_id, fac_file in fac_files.items():
k = int(kp_id.split("_")[0])
pp_prefix = kp_id.split("_", 1)[-1]
# pp_files = pp_df.pp_filename.unique()
fac_file = os.path.split(fac_file)[-1]
# pp_prefixes = pp_dict[k]
# for pp_prefix in pp_prefixes:
self.log("processing pp_prefix:{0}".format(pp_prefix))
if pp_prefix not in pp_array_file.keys():
self.logger.lraise(
"{0} not in self.pp_array_file.keys()".format(
pp_prefix, ",".join(pp_array_file.keys())
)
)
out_file = os.path.join(
self.arr_mlt, os.path.split(pp_array_file[pp_prefix])[-1]
)
pp_files = pp_df.loc[
pp_df.pp_filename.apply(
lambda x: os.path.split(x)[-1].split(".")[0]
== "{0}pp".format(pp_prefix)
),
"pp_filename",
]
if pp_files.unique().shape[0] != 1:
self.logger.lraise(
"wrong number of pp_files found:{0}".format(",".join(pp_files))
)
pp_file = os.path.split(pp_files.iloc[0])[-1]
pp_df.loc[pp_df.pargp == pp_prefix, "fac_file"] = fac_file
pp_df.loc[pp_df.pargp == pp_prefix, "pp_file"] = pp_file
pp_df.loc[pp_df.pargp == pp_prefix, "out_file"] = out_file
pp_df.loc[:, "pargp"] = pp_df.pargp.apply(lambda x: "pp_{0}".format(x))
out_files = mlt_df.loc[
mlt_df.mlt_file.apply(lambda x: x.endswith(self.pp_suffix)), "mlt_file"
]
# mlt_df.loc[:,"fac_file"] = np.NaN
# mlt_df.loc[:,"pp_file"] = np.NaN
for out_file in out_files:
pp_df_pf = pp_df.loc[pp_df.out_file == out_file, :]
fac_files = pp_df_pf.fac_file
if fac_files.unique().shape[0] != 1:
self.logger.lraise(
"wrong number of fac files:{0}".format(str(fac_files.unique()))
)
fac_file = fac_files.iloc[0]
pp_files = pp_df_pf.pp_file
if pp_files.unique().shape[0] != 1:
self.logger.lraise(
"wrong number of pp files:{0}".format(str(pp_files.unique()))
)
pp_file = pp_files.iloc[0]
mlt_df.loc[mlt_df.mlt_file == out_file, "fac_file"] = fac_file
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_file"] = pp_file
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_fill_value"] = 1.0
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_lower_limit"] = 1.0e-10
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_upper_limit"] = 1.0e+10
self.par_dfs[self.pp_suffix] = pp_df
mlt_df.loc[mlt_df.suffix == self.pp_suffix, "tpl_file"] = np.NaN
def _kl_prep(self, mlt_df):
"""prepare KL based parameterizations"""
if len(self.kl_props) == 0:
return
if self.kl_geostruct is None:
self.logger.warn(
"kl_geostruct is None,"
" using ExpVario with contribution=1 and a=(10.0*max(delr,delc))"
)
kl_dist = 10.0 * float(
max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max())
)
v = pyemu.geostats.ExpVario(contribution=1.0, a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(
variograms=v, name="kl_geostruct", transform="log"
)
kl_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix, :]
layers = kl_df.layer.unique()
# kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
# for i,l in enumerate(layers):
# p = set(kl_dict[l])
# for ll in layers[i+1:]:
# pp = set(kl_dict[ll])
# d = pp - p
# kl_dict[ll] = list(d)
kl_prefix = list(kl_df.loc[:, "prefix"])
kl_array_file = {p: m for p, m in zip(kl_df.prefix, kl_df.mlt_file)}
self.logger.statement("kl_prefix: {0}".format(str(kl_prefix)))
fac_file = os.path.join(self.m.model_ws, "kl.fac")
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_df = kl_setup(
self.kl_num_eig,
self.m.sr,
self.kl_geostruct,
kl_prefix,
factors_file=fac_file,
basis_file=fac_file + ".basis.jcb",
tpl_dir=self.m.model_ws,
)
self.logger.statement("{0} kl parameters created".format(kl_df.shape[0]))
self.logger.statement("kl 'pargp':{0}".format(",".join(kl_df.pargp.unique())))
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_mlt_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix]
for prefix in kl_df.prefix.unique():
prefix_df = kl_df.loc[kl_df.prefix == prefix, :]
in_file = os.path.split(prefix_df.loc[:, "in_file"].iloc[0])[-1]
assert prefix in mlt_df.prefix.values, "{0}:{1}".format(
prefix, mlt_df.prefix
)
mlt_df.loc[mlt_df.prefix == prefix, "pp_file"] = in_file
mlt_df.loc[mlt_df.prefix == prefix, "fac_file"] = os.path.split(fac_file)[
-1
]
mlt_df.loc[mlt_df.prefix == prefix, "pp_fill_value"] = 1.0
mlt_df.loc[mlt_df.prefix == prefix, "pp_lower_limit"] = 1.0e-10
mlt_df.loc[mlt_df.prefix == prefix, "pp_upper_limit"] = 1.0e+10
print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN
self.par_dfs[self.kl_suffix] = kl_df
# calc factors for each layer
def _setup_array_pars(self):
"""main entry point for setting up array multipler parameters"""
mlt_df = self._prep_mlt_arrays()
if mlt_df is None:
return
mlt_df.loc[:, "tpl_file"] = mlt_df.mlt_file.apply(
lambda x: os.path.split(x)[-1] + ".tpl"
)
# mlt_df.loc[mlt_df.tpl_file.apply(lambda x:pd.notnull(x.pp_file)),"tpl_file"] = np.NaN
mlt_files = mlt_df.mlt_file.unique()
# for suffix,tpl_file,layer,name in zip(self.mlt_df.suffix,
# self.mlt_df.tpl,self.mlt_df.layer,
# self.mlt_df.prefix):
par_dfs = {}
for mlt_file in mlt_files:
suffixes = mlt_df.loc[mlt_df.mlt_file == mlt_file, "suffix"]
if suffixes.unique().shape[0] != 1:
self.logger.lraise("wrong number of suffixes for {0}".format(mlt_file))
suffix = suffixes.iloc[0]
tpl_files = mlt_df.loc[mlt_df.mlt_file == mlt_file, "tpl_file"]
if tpl_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of tpl_files for {0}".format(mlt_file))
tpl_file = tpl_files.iloc[0]
layers = mlt_df.loc[mlt_df.mlt_file == mlt_file, "layer"]
# if layers.unique().shape[0] != 1:
# self.logger.lraise("wrong number of layers for {0}"\
# .format(mlt_file))
layer = layers.iloc[0]
names = mlt_df.loc[mlt_df.mlt_file == mlt_file, "prefix"]
if names.unique().shape[0] != 1:
self.logger.lraise("wrong number of names for {0}".format(mlt_file))
name = names.iloc[0]
attr_names = mlt_df.loc[mlt_df.mlt_file == mlt_file, "attr_name"]
if attr_names.unique().shape[0] != 1:
self.logger.lraise(
"wrong number of attr_names for {0}".format(mlt_file)
)
attr_name = attr_names.iloc[0]
# ib = self.k_zone_dict[layer]
df = None
if suffix == self.cn_suffix:
self.log("writing const tpl:{0}".format(tpl_file))
# df = self.write_const_tpl(name,tpl_file,self.m.bas6.ibound[layer].array)
try:
df = write_const_tpl(
name,
os.path.join(self.m.model_ws, tpl_file),
self.cn_suffix,
self.m.bas6.ibound[layer].array,
(self.m.nrow, self.m.ncol),
self.m.sr,
)
except Exception as e:
self.logger.lraise(
"error writing const template: {0}".format(str(e))
)
self.log("writing const tpl:{0}".format(tpl_file))
elif suffix == self.gr_suffix:
self.log("writing grid tpl:{0}".format(tpl_file))
# df = self.write_grid_tpl(name,tpl_file,self.m.bas6.ibound[layer].array)
try:
df = write_grid_tpl(
name,
os.path.join(self.m.model_ws, tpl_file),
self.gr_suffix,
self.m.bas6.ibound[layer].array,
(self.m.nrow, self.m.ncol),
self.m.sr,
)
except Exception as e:
self.logger.lraise(
"error writing grid template: {0}".format(str(e))
)
self.log("writing grid tpl:{0}".format(tpl_file))
elif suffix == self.zn_suffix:
self.log("writing zone tpl:{0}".format(tpl_file))
if np.all(
[isinstance(v, dict) for v in self.k_zone_dict.values()]
): # check is dict of dicts
if attr_name in [p.split(".")[-1] for p in self.k_zone_dict.keys()]:
k_zone_dict = next(
k_dict
for p, k_dict in self.k_zone_dict.items()
if p.split(".")[-1] == attr_name
) # get dict relating to parameter prefix
else:
assert (
"general_zn" in self.k_zone_dict.keys()
), "Neither {0} nor 'general_zn' are in k_zone_dict keys: {1}".format(
attr_name, self.k_zone_dict.keys()
)
k_zone_dict = self.k_zone_dict["general_zn"]
else:
k_zone_dict = self.k_zone_dict
# df = self.write_zone_tpl(self.m, name, tpl_file, self.k_zone_dict[layer], self.zn_suffix, self.logger)
try:
df = write_zone_tpl(
name,
os.path.join(self.m.model_ws, tpl_file),
self.zn_suffix,
k_zone_dict[layer],
(self.m.nrow, self.m.ncol),
self.m.sr,
)
except Exception as e:
self.logger.lraise(
"error writing zone template: {0}".format(str(e))
)
self.log("writing zone tpl:{0}".format(tpl_file))
if df is None:
continue
if suffix not in par_dfs:
par_dfs[suffix] = [df]
else:
par_dfs[suffix].append(df)
for suf, dfs in par_dfs.items():
self.par_dfs[suf] = pd.concat(dfs)
if self.pp_suffix in mlt_df.suffix.values:
self.log("setting up pilot point process")
self._pp_prep(mlt_df)
self.log("setting up pilot point process")
if self.gr_suffix in mlt_df.suffix.values:
self.log("setting up grid process")
self._grid_prep()
self.log("setting up grid process")
if self.kl_suffix in mlt_df.suffix.values:
self.log("setting up kl process")
self._kl_prep(mlt_df)
self.log("setting up kl process")
mlt_df.to_csv(os.path.join(self.m.model_ws, "arr_pars.csv"))
ones = np.ones((self.m.nrow, self.m.ncol))
for mlt_file in mlt_df.mlt_file.unique():
self.log("save test mlt array {0}".format(mlt_file))
np.savetxt(os.path.join(self.m.model_ws, mlt_file), ones, fmt="%15.6E")
self.log("save test mlt array {0}".format(mlt_file))
tpl_files = mlt_df.loc[mlt_df.mlt_file == mlt_file, "tpl_file"]
if tpl_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of tpl_files for {0}".format(mlt_file))
tpl_file = tpl_files.iloc[0]
if pd.notnull(tpl_file):
self.tpl_files.append(tpl_file)
self.in_files.append(mlt_file)
# for tpl_file,mlt_file in zip(mlt_df.tpl_file,mlt_df.mlt_file):
# if pd.isnull(tpl_file):
# continue
# self.tpl_files.append(tpl_file)
# self.in_files.append(mlt_file)
os.chdir(self.m.model_ws)
try:
apply_array_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise(
"error test running apply_array_pars():{0}".format(str(e))
)
os.chdir("..")
line = "pyemu.helpers.apply_array_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
def _setup_observations(self):
"""main entry point for setting up observations"""
obs_methods = [
self._setup_water_budget_obs,
self._setup_hyd,
self._setup_smp,
self._setup_hob,
self._setup_hds,
self._setup_sfr_obs,
]
obs_types = [
"mflist water budget obs",
"hyd file",
"external obs-sim smp files",
"hob",
"hds",
"sfr",
]
self.obs_dfs = {}
for obs_method, obs_type in zip(obs_methods, obs_types):
self.log("processing obs type {0}".format(obs_type))
obs_method()
self.log("processing obs type {0}".format(obs_type))
def draw(self, num_reals=100, sigma_range=6, use_specsim=False, scale_offset=True):
"""draw from the geostatistically-implied parameter covariance matrix
Args:
num_reals (`int`): number of realizations to generate. Default is 100
sigma_range (`float`): number of standard deviations represented by
the parameter bounds. Default is 6.
use_specsim (`bool`): flag to use spectral simulation for grid-based
parameters. Requires a regular grid but is wicked fast. Default is False
scale_offset (`bool`, optional): flag to apply scale and offset to parameter
bounds when calculating variances - this is passed through to
`pyemu.Cov.from_parameter_data`. Default is True.
Note:
operates on parameters by groups to avoid having to construct a very large
covariance matrix for problems with more the 30K parameters.
uses `helpers.geostatitical_draw()`
Returns:
`pyemu.ParameterEnsemble`: The realized parameter ensemble
"""
self.log("drawing realizations")
struct_dict = {}
gr_par_pe = None
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
# pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
if not use_specsim:
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
# gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
else:
if not pyemu.geostats.SpecSim2d.grid_is_regular(
self.m.dis.delr.array, self.m.dis.delc.array
):
self.logger.lraise(
"draw() error: can't use spectral simulation with irregular grid"
)
gr_df.loc[:, "i"] = gr_df.parnme.apply(lambda x: int(x[-6:-3]))
gr_df.loc[:, "j"] = gr_df.parnme.apply(lambda x: int(x[-3:]))
if gr_df.i.max() > self.m.nrow - 1 or gr_df.i.min() < 0:
self.logger.lraise(
"draw(): error parsing grid par names for 'i' index"
)
if gr_df.j.max() > self.m.ncol - 1 or gr_df.j.min() < 0:
self.logger.lraise(
"draw(): error parsing grid par names for 'j' index"
)
self.log("spectral simulation for grid-scale pars")
ss = pyemu.geostats.SpecSim2d(
delx=self.m.dis.delr.array,
dely=self.m.dis.delc.array,
geostruct=self.grid_geostruct,
)
gr_par_pe = ss.grid_par_ensemble_helper(
pst=self.pst,
gr_df=gr_df,
num_reals=num_reals,
sigma_range=sigma_range,
logger=self.logger,
)
self.log("spectral simulation for grid-scale pars")
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:, "y"] = 0
bc_df.loc[:, "x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(p_df)
# bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
# p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
pe = geostatistical_draws(
self.pst,
struct_dict=struct_dict,
num_reals=num_reals,
sigma_range=sigma_range,
scale_offset=scale_offset,
)
if gr_par_pe is not None:
pe.loc[:, gr_par_pe.columns] = gr_par_pe.values
self.log("drawing realizations")
return pe
def build_prior(
self, fmt="ascii", filename=None, droptol=None, chunk=None, sigma_range=6
):
"""build and optionally save the prior parameter covariance matrix.
Args:
fmt (`str`, optional): the format to save the cov matrix. Options are "ascii","binary","uncfile", "coo".
Default is "ascii". If "none" (lower case string, not None), then no file is created.
filename (`str`, optional): the filename to save the prior cov matrix to. If None, the name is formed using
model nam_file name. Default is None.
droptol (`float`, optional): tolerance for dropping near-zero values when writing compressed binary.
Default is None.
chunk (`int`, optional): chunk size to write in a single pass - for binary only. Default
is None (no chunking).
sigma_range (`float`): number of standard deviations represented by the parameter bounds. Default
is 6.
Returns:
`pyemu.Cov`: the full prior parameter covariance matrix, generated by processing parameters by
groups
"""
fmt = fmt.lower()
acc_fmts = ["ascii", "binary", "uncfile", "none", "coo"]
if fmt not in acc_fmts:
self.logger.lraise(
"unrecognized prior save 'fmt':{0}, options are: {1}".format(
fmt, ",".join(acc_fmts)
)
)
self.log("building prior covariance matrix")
struct_dict = {}
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
# pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
# gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:, "y"] = 0
bc_df.loc[:, "x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(p_df)
# bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
# p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
if "hfb" in self.par_dfs.keys():
if self.spatial_list_geostruct in struct_dict.keys():
struct_dict[self.spatial_list_geostruct].append(self.par_dfs["hfb"])
else:
struct_dict[self.spatial_list_geostruct] = [self.par_dfs["hfb"]]
if "sfr" in self.par_dfs.keys():
self.logger.warn("geospatial prior not implemented for SFR pars")
if len(struct_dict) > 0:
cov = pyemu.helpers.geostatistical_prior_builder(
self.pst, struct_dict=struct_dict, sigma_range=sigma_range
)
else:
cov = pyemu.Cov.from_parameter_data(self.pst, sigma_range=sigma_range)
if filename is None:
filename = os.path.join(self.m.model_ws, self.pst_name + ".prior.cov")
if fmt != "none":
self.logger.statement(
"saving prior covariance matrix to file {0}".format(filename)
)
if fmt == "ascii":
cov.to_ascii(filename)
elif fmt == "binary":
cov.to_binary(filename, droptol=droptol, chunk=chunk)
elif fmt == "uncfile":
cov.to_uncfile(filename)
elif fmt == "coo":
cov.to_coo(filename, droptol=droptol, chunk=chunk)
self.log("building prior covariance matrix")
return cov
def build_pst(self, filename=None):
"""build the pest control file using the parameters and
observations.
Args:
filename (`str`): the filename to save the contorl file to. If None, the
name if formed from the model namfile name. Default is None. The control
is saved in the `PstFromFlopy.m.model_ws` directory.
Note:
calls pyemu.Pst.from_io_files
calls PESTCHEK
"""
self.logger.statement("changing dir in to {0}".format(self.m.model_ws))
os.chdir(self.m.model_ws)
tpl_files = copy.deepcopy(self.tpl_files)
in_files = copy.deepcopy(self.in_files)
try:
files = os.listdir(".")
new_tpl_files = [
f for f in files if f.endswith(".tpl") and f not in tpl_files
]
new_in_files = [f.replace(".tpl", "") for f in new_tpl_files]
tpl_files.extend(new_tpl_files)
in_files.extend(new_in_files)
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", "") for f in ins_files]
for tpl_file, in_file in zip(tpl_files, in_files):
if tpl_file not in self.tpl_files:
self.tpl_files.append(tpl_file)
self.in_files.append(in_file)
for ins_file, out_file in zip(ins_files, out_files):
if ins_file not in self.ins_files:
self.ins_files.append(ins_file)
self.out_files.append(out_file)
self.log("instantiating control file from i/o files")
self.logger.statement("tpl files: {0}".format(",".join(self.tpl_files)))
self.logger.statement("ins files: {0}".format(",".join(self.ins_files)))
pst = pyemu.Pst.from_io_files(
tpl_files=self.tpl_files,
in_files=self.in_files,
ins_files=self.ins_files,
out_files=self.out_files,
)
self.log("instantiating control file from i/o files")
except Exception as e:
os.chdir("..")
self.logger.lraise("error build Pst:{0}".format(str(e)))
os.chdir("..")
# more customization here
par = pst.parameter_data
for name, df in self.par_dfs.items():
if "parnme" not in df.columns:
continue
df.index = df.parnme
for col in par.columns:
if col in df.columns:
par.loc[df.parnme, col] = df.loc[:, col]
par.loc[:, "parubnd"] = 10.0
par.loc[:, "parlbnd"] = 0.1
for name, df in self.par_dfs.items():
if "parnme" not in df:
continue
df.index = df.parnme
for col in ["parubnd", "parlbnd", "pargp"]:
if col in df.columns:
par.loc[df.index, col] = df.loc[:, col]
for tag, [lw, up] in wildass_guess_par_bounds_dict.items():
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parubnd"] = up
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parlbnd"] = lw
if self.par_bounds_dict is not None:
for tag, [lw, up] in self.par_bounds_dict.items():
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parubnd"] = up
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parlbnd"] = lw
obs = pst.observation_data
for name, df in self.obs_dfs.items():
if "obsnme" not in df.columns:
continue
df.index = df.obsnme
for col in df.columns:
if col in obs.columns:
obs.loc[df.obsnme, col] = df.loc[:, col]
self.pst_name = self.m.name + ".pst"
pst.model_command = ["python forward_run.py"]
pst.control_data.noptmax = 0
self.log("writing forward_run.py")
self.write_forward_run()
self.log("writing forward_run.py")
if filename is None:
filename = os.path.join(self.m.model_ws, self.pst_name)
self.logger.statement("writing pst {0}".format(filename))
pst.write(filename)
self.pst = pst
self.log("running pestchek on {0}".format(self.pst_name))
os.chdir(self.m.model_ws)
try:
pyemu.os_utils.run("pestchek {0} >pestchek.stdout".format(self.pst_name))
except Exception as e:
self.logger.warn("error running pestchek:{0}".format(str(e)))
for line in open("pestchek.stdout"):
self.logger.statement("pestcheck:{0}".format(line.strip()))
os.chdir("..")
self.log("running pestchek on {0}".format(self.pst_name))
def _add_external(self):
"""add external (existing) template files and/or instruction files to the
Pst instance
"""
if self.external_tpl_in_pairs is not None:
if not isinstance(self.external_tpl_in_pairs, list):
external_tpl_in_pairs = [self.external_tpl_in_pairs]
for tpl_file, in_file in self.external_tpl_in_pairs:
if not os.path.exists(tpl_file):
self.logger.lraise(
"couldn't find external tpl file:{0}".format(tpl_file)
)
self.logger.statement("external tpl:{0}".format(tpl_file))
shutil.copy2(
tpl_file, os.path.join(self.m.model_ws, os.path.split(tpl_file)[-1])
)
if os.path.exists(in_file):
shutil.copy2(
in_file,
os.path.join(self.m.model_ws, os.path.split(in_file)[-1]),
)
if self.external_ins_out_pairs is not None:
if not isinstance(self.external_ins_out_pairs, list):
external_ins_out_pairs = [self.external_ins_out_pairs]
for ins_file, out_file in self.external_ins_out_pairs:
if not os.path.exists(ins_file):
self.logger.lraise(
"couldn't find external ins file:{0}".format(ins_file)
)
self.logger.statement("external ins:{0}".format(ins_file))
shutil.copy2(
ins_file, os.path.join(self.m.model_ws, os.path.split(ins_file)[-1])
)
if os.path.exists(out_file):
shutil.copy2(
out_file,
os.path.join(self.m.model_ws, os.path.split(out_file)[-1]),
)
self.logger.warn(
"obs listed in {0} will have values listed in {1}".format(
ins_file, out_file
)
)
else:
self.logger.warn("obs listed in {0} will have generic values")
def write_forward_run(self):
"""write the forward run script forward_run.py
Note:
This method can be called repeatedly, especially after any
changed to the pre- and/or post-processing routines.
"""
with open(os.path.join(self.m.model_ws, self.forward_run_file), "w") as f:
f.write(
"import os\nimport multiprocessing as mp\nimport numpy as np"
+ "\nimport pandas as pd\nimport flopy\n"
)
f.write("import pyemu\n")
f.write("def main():\n")
f.write("\n")
s = " "
for ex_imp in self.extra_forward_imports:
f.write(s + "import {0}\n".format(ex_imp))
for tmp_file in self.tmp_files:
f.write(s + "try:\n")
f.write(s + " os.remove('{0}')\n".format(tmp_file))
f.write(s + "except Exception as e:\n")
f.write(
s + " print('error removing tmp file:{0}')\n".format(tmp_file)
)
for line in self.frun_pre_lines:
f.write(s + line + "\n")
for line in self.frun_model_lines:
f.write(s + line + "\n")
for line in self.frun_post_lines:
f.write(s + line + "\n")
f.write("\n")
f.write("if __name__ == '__main__':\n")
f.write(" mp.freeze_support()\n main()\n\n")
def _parse_k(self, k, vals):
"""parse the iterable from a property or boundary condition argument"""
try:
k = int(k)
except:
pass
else:
assert k in vals, "k {0} not in vals".format(k)
return [k]
if k is None:
return vals
else:
try:
k_vals = vals[k]
except Exception as e:
raise Exception("error slicing vals with {0}:{1}".format(k, str(e)))
return k_vals
def _parse_pakattr(self, pakattr):
"""parse package-iterable pairs from a property or boundary condition
argument
"""
raw = pakattr.lower().split(".")
if len(raw) != 2:
self.logger.lraise("pakattr is wrong:{0}".format(pakattr))
pakname = raw[0]
attrname = raw[1]
pak = self.m.get_package(pakname)
if pak is None:
if pakname == "extra":
self.logger.statement("'extra' pak detected:{0}".format(pakattr))
ud = flopy.utils.Util3d(
self.m,
(self.m.nlay, self.m.nrow, self.m.ncol),
np.float32,
1.0,
attrname,
)
return "extra", ud
self.logger.lraise("pak {0} not found".format(pakname))
if hasattr(pak, attrname):
attr = getattr(pak, attrname)
return pak, attr
elif hasattr(pak, "stress_period_data"):
dtype = pak.stress_period_data.dtype
if attrname not in dtype.names:
self.logger.lraise(
"attr {0} not found in dtype.names for {1}.stress_period_data".format(
attrname, pakname
)
)
attr = pak.stress_period_data
return pak, attr, attrname
# elif hasattr(pak,'hfb_data'):
# dtype = pak.hfb_data.dtype
# if attrname not in dtype.names:
# self.logger.lraise('attr {0} not found in dtypes.names for {1}.hfb_data. Thanks for playing.'.\
# format(attrname,pakname))
# attr = pak.hfb_data
# return pak, attr, attrname
else:
self.logger.lraise("unrecognized attr:{0}".format(attrname))
def _setup_list_pars(self):
"""main entry point for setting up list multiplier
parameters
"""
tdf = self._setup_temporal_list_pars()
sdf = self._setup_spatial_list_pars()
if tdf is None and sdf is None:
return
os.chdir(self.m.model_ws)
try:
apply_list_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise(
"error test running apply_list_pars():{0}".format(str(e))
)
os.chdir("..")
line = "pyemu.helpers.apply_list_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
def _setup_temporal_list_pars(self):
if len(self.temporal_list_props) == 0:
return
self.log("processing temporal_list_props")
bc_filenames = []
bc_cols = []
bc_pak = []
bc_k = []
bc_dtype_names = []
bc_parnme = []
if len(self.temporal_list_props) == 2:
if not isinstance(self.temporal_list_props[0], list):
self.temporal_list_props = [self.temporal_list_props]
for pakattr, k_org in self.temporal_list_props:
pak, attr, col = self._parse_pakattr(pakattr)
k_parse = self._parse_k(k_org, np.arange(self.m.nper))
c = self._get_count(pakattr)
for k in k_parse:
bc_filenames.append(self._list_helper(k, pak, attr, col))
bc_cols.append(col)
pak_name = pak.name[0].lower()
bc_pak.append(pak_name)
bc_k.append(k)
bc_dtype_names.append(",".join(attr.dtype.names))
bc_parnme.append("{0}{1}_{2:03d}".format(pak_name, col, c))
df = pd.DataFrame(
{
"filename": bc_filenames,
"col": bc_cols,
"kper": bc_k,
"pak": bc_pak,
"dtype_names": bc_dtype_names,
"parnme": bc_parnme,
}
)
tds = pd.to_timedelta(np.cumsum(self.m.dis.perlen.array), unit="d")
dts = pd.to_datetime(self.m._start_datetime) + tds
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "timedelta"] = df.kper.apply(lambda x: tds[x])
df.loc[:, "val"] = 1.0
# df.loc[:,"kper"] = df.kper.apply(np.int)
# df.loc[:,"parnme"] = df.apply(lambda x: "{0}{1}_{2:03d}".format(x.pak,x.col,x.kper),axis=1)
df.loc[:, "tpl_str"] = df.parnme.apply(lambda x: "~ {0} ~".format(x))
df.loc[:, "list_org"] = self.list_org
df.loc[:, "model_ext_path"] = self.m.external_path
df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
names = [
"filename",
"dtype_names",
"list_org",
"model_ext_path",
"col",
"kper",
"pak",
"val",
]
df.loc[:, names].to_csv(
os.path.join(self.m.model_ws, "temporal_list_pars.dat"), sep=" "
)
df.loc[:, "val"] = df.tpl_str
tpl_name = os.path.join(self.m.model_ws, "temporal_list_pars.dat.tpl")
# f_tpl = open(tpl_name,'w')
# f_tpl.write("ptf ~\n")
# f_tpl.flush()
# df.loc[:,names].to_csv(f_tpl,sep=' ',quotechar=' ')
# f_tpl.write("index ")
# f_tpl.write(df.loc[:,names].to_string(index_names=True))
# f_tpl.close()
_write_df_tpl(
tpl_name, df.loc[:, names], sep=" ", index_label="index", quotechar=" "
)
self.par_dfs["temporal_list"] = df
self.log("processing temporal_list_props")
return True
def _setup_spatial_list_pars(self):
if len(self.spatial_list_props) == 0:
return
self.log("processing spatial_list_props")
bc_filenames = []
bc_cols = []
bc_pak = []
bc_k = []
bc_dtype_names = []
bc_parnme = []
if len(self.spatial_list_props) == 2:
if not isinstance(self.spatial_list_props[0], list):
self.spatial_list_props = [self.spatial_list_props]
for pakattr, k_org in self.spatial_list_props:
pak, attr, col = self._parse_pakattr(pakattr)
k_parse = self._parse_k(k_org, np.arange(self.m.nlay))
if len(k_parse) > 1:
self.logger.lraise(
"spatial_list_pars error: each set of spatial list pars can only be applied "
+ "to a single layer (e.g. [wel.flux,0].\n"
+ "You passed [{0},{1}], implying broadcasting to layers {2}".format(
pakattr, k_org, k_parse
)
)
# # horrible special case for HFB since it cannot vary over time
# if type(pak) != flopy.modflow.mfhfb.ModflowHfb:
for k in range(self.m.nper):
bc_filenames.append(self._list_helper(k, pak, attr, col))
bc_cols.append(col)
pak_name = pak.name[0].lower()
bc_pak.append(pak_name)
bc_k.append(k_parse[0])
bc_dtype_names.append(",".join(attr.dtype.names))
info_df = pd.DataFrame(
{
"filename": bc_filenames,
"col": bc_cols,
"k": bc_k,
"pak": bc_pak,
"dtype_names": bc_dtype_names,
}
)
info_df.loc[:, "list_mlt"] = self.list_mlt
info_df.loc[:, "list_org"] = self.list_org
info_df.loc[:, "model_ext_path"] = self.m.external_path
# check that all files for a given package have the same number of entries
info_df.loc[:, "itmp"] = np.NaN
pak_dfs = {}
for pak in info_df.pak.unique():
df_pak = info_df.loc[info_df.pak == pak, :]
itmp = []
for filename in df_pak.filename:
names = df_pak.dtype_names.iloc[0].split(",")
# mif pak != 'hfb6':
fdf = pd.read_csv(
os.path.join(self.m.model_ws, filename),
delim_whitespace=True,
header=None,
names=names,
)
for c in ["k", "i", "j"]:
fdf.loc[:, c] -= 1
# else:
# # need to navigate the HFB file to skip both comments and header line
# skiprows = sum(
# [1 if i.strip().startswith('#') else 0
# for i in open(os.path.join(self.m.model_ws, filename), 'r').readlines()]) + 1
# fdf = pd.read_csv(os.path.join(self.m.model_ws, filename),
# delim_whitespace=True, header=None, names=names, skiprows=skiprows ).dropna()
#
# for c in ['k', 'irow1','icol1','irow2','icol2']:
# fdf.loc[:, c] -= 1
itmp.append(fdf.shape[0])
pak_dfs[pak] = fdf
info_df.loc[info_df.pak == pak, "itmp"] = itmp
if np.unique(np.array(itmp)).shape[0] != 1:
info_df.to_csv("spatial_list_trouble.csv")
self.logger.lraise(
"spatial_list_pars() error: must have same number of "
+ "entries for every stress period for {0}".format(pak)
)
# make the pak dfs have unique model indices
for pak, df in pak_dfs.items():
# if pak != 'hfb6':
df.loc[:, "idx"] = df.apply(
lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k, x.i, x.j), axis=1
)
# else:
# df.loc[:, "idx"] = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}{2:04.0f}{2:04.0f}".format(x.k, x.irow1, x.icol1,
# x.irow2, x.icol2), axis=1)
if df.idx.unique().shape[0] != df.shape[0]:
self.logger.warn(
"duplicate entries in list pak {0}...collapsing".format(pak)
)
df.drop_duplicates(subset="idx", inplace=True)
df.index = df.idx
pak_dfs[pak] = df
# write template files - find which cols are parameterized...
par_dfs = []
for pak, df in pak_dfs.items():
pak_df = info_df.loc[info_df.pak == pak, :]
# reset all non-index cols to 1.0
for col in df.columns:
if col not in [
"k",
"i",
"j",
"inode",
"irow1",
"icol1",
"irow2",
"icol2",
]:
df.loc[:, col] = 1.0
in_file = os.path.join(self.list_mlt, pak + ".csv")
tpl_file = os.path.join(pak + ".csv.tpl")
# save an all "ones" mult df for testing
df.to_csv(os.path.join(self.m.model_ws, in_file), sep=" ")
parnme, pargp = [], []
# if pak != 'hfb6':
x = df.apply(
lambda x: self.m.sr.xcentergrid[int(x.i), int(x.j)], axis=1
).values
y = df.apply(
lambda x: self.m.sr.ycentergrid[int(x.i), int(x.j)], axis=1
).values
# else:
# # note -- for HFB6, only row and col for node 1
# x = df.apply(lambda x: self.m.sr.xcentergrid[int(x.irow1),int(x.icol1)],axis=1).values
# y = df.apply(lambda x: self.m.sr.ycentergrid[int(x.irow1),int(x.icol1)],axis=1).values
for col in pak_df.col.unique():
col_df = pak_df.loc[pak_df.col == col]
k_vals = col_df.k.unique()
npar = col_df.k.apply(lambda x: x in k_vals).shape[0]
if npar == 0:
continue
names = df.index.map(lambda x: "{0}{1}{2}".format(pak[0], col[0], x))
df.loc[:, col] = names.map(lambda x: "~ {0} ~".format(x))
df.loc[df.k.apply(lambda x: x not in k_vals), col] = 1.0
par_df = pd.DataFrame(
{"parnme": names, "x": x, "y": y, "k": df.k.values}, index=names
)
par_df = par_df.loc[par_df.k.apply(lambda x: x in k_vals)]
if par_df.shape[0] == 0:
self.logger.lraise(
"no parameters found for spatial list k,pak,attr {0}, {1}, {2}".format(
k_vals, pak, col
)
)
par_df.loc[:, "pargp"] = df.k.apply(
lambda x: "{0}{1}_k{2:02.0f}".format(pak, col, int(x))
).values
par_df.loc[:, "tpl_file"] = tpl_file
par_df.loc[:, "in_file"] = in_file
par_dfs.append(par_df)
# with open(os.path.join(self.m.model_ws,tpl_file),'w') as f:
# f.write("ptf ~\n")
# f.flush()
# df.to_csv(f)
# f.write("index ")
# f.write(df.to_string(index_names=False)+'\n')
_write_df_tpl(
os.path.join(self.m.model_ws, tpl_file),
df,
sep=" ",
quotechar=" ",
index_label="index",
)
self.tpl_files.append(tpl_file)
self.in_files.append(in_file)
par_df = pd.concat(par_dfs)
self.par_dfs["spatial_list"] = par_df
info_df.to_csv(os.path.join(self.m.model_ws, "spatial_list_pars.dat"), sep=" ")
self.log("processing spatial_list_props")
return True
def _list_helper(self, k, pak, attr, col):
"""helper to setup list multiplier parameters for a given
k, pak, attr set.
"""
# special case for horrible HFB6 exception
# if type(pak) == flopy.modflow.mfhfb.ModflowHfb:
# filename = pak.file_name[0]
# else:
filename = attr.get_filename(k)
filename_model = os.path.join(self.m.external_path, filename)
shutil.copy2(
os.path.join(self.m.model_ws, filename_model),
os.path.join(self.m.model_ws, self.list_org, filename),
)
return filename_model
def _setup_hds(self):
"""setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
"""
if self.hds_kperk is None or len(self.hds_kperk) == 0:
return
from .gw_utils import setup_hds_obs
# if len(self.hds_kperk) == 2:
# try:
# if len(self.hds_kperk[0] == 2):
# pass
# except:
# self.hds_kperk = [self.hds_kperk]
oc = self.m.get_package("OC")
if oc is None:
raise Exception("can't find OC package in model to setup hds grid obs")
if not oc.savehead:
raise Exception("OC not saving hds, can't setup grid obs")
hds_unit = oc.iuhead
hds_file = self.m.get_output(unit=hds_unit)
assert os.path.exists(
os.path.join(self.org_model_ws, hds_file)
), "couldn't find existing hds file {0} in org_model_ws".format(hds_file)
shutil.copy2(
os.path.join(self.org_model_ws, hds_file),
os.path.join(self.m.model_ws, hds_file),
)
inact = None
if self.m.lpf is not None:
inact = self.m.lpf.hdry
elif self.m.upw is not None:
inact = self.m.upw.hdry
if inact is None:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x
else:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x
print(self.hds_kperk)
frun_line, df = setup_hds_obs(
os.path.join(self.m.model_ws, hds_file),
kperk_pairs=self.hds_kperk,
skip=skip,
)
self.obs_dfs["hds"] = df
self.frun_post_lines.append(
"pyemu.gw_utils.apply_hds_obs('{0}')".format(hds_file)
)
self.tmp_files.append(hds_file)
def _setup_smp(self):
"""setup observations from PEST-style SMP file pairs"""
if self.obssim_smp_pairs is None:
return
if len(self.obssim_smp_pairs) == 2:
if isinstance(self.obssim_smp_pairs[0], str):
self.obssim_smp_pairs = [self.obssim_smp_pairs]
for obs_smp, sim_smp in self.obssim_smp_pairs:
self.log("processing {0} and {1} smp files".format(obs_smp, sim_smp))
if not os.path.exists(obs_smp):
self.logger.lraise("couldn't find obs smp: {0}".format(obs_smp))
if not os.path.exists(sim_smp):
self.logger.lraise("couldn't find sim smp: {0}".format(sim_smp))
new_obs_smp = os.path.join(self.m.model_ws, os.path.split(obs_smp)[-1])
shutil.copy2(obs_smp, new_obs_smp)
new_sim_smp = os.path.join(self.m.model_ws, os.path.split(sim_smp)[-1])
shutil.copy2(sim_smp, new_sim_smp)
pyemu.smp_utils.smp_to_ins(new_sim_smp)
def _setup_hob(self):
"""setup observations from the MODFLOW HOB package"""
if self.m.hob is None:
return
hob_out_unit = self.m.hob.iuhobsv
new_hob_out_fname = os.path.join(
self.m.model_ws, self.m.get_output_attribute(unit=hob_out_unit)
)
org_hob_out_fname = os.path.join(
self.org_model_ws, self.m.get_output_attribute(unit=hob_out_unit)
)
if not os.path.exists(org_hob_out_fname):
self.logger.warn(
"could not find hob out file: {0}...skipping".format(hob_out_fname)
)
return
shutil.copy2(org_hob_out_fname, new_hob_out_fname)
hob_df = pyemu.gw_utils.modflow_hob_to_instruction_file(new_hob_out_fname)
self.obs_dfs["hob"] = hob_df
self.tmp_files.append(os.path.split(hob_out_fname))
def _setup_hyd(self):
"""setup observations from the MODFLOW HYDMOD package"""
if self.m.hyd is None:
return
if self.mfhyd:
org_hyd_out = os.path.join(self.org_model_ws, self.m.name + ".hyd.bin")
if not os.path.exists(org_hyd_out):
self.logger.warn(
"can't find existing hyd out file:{0}...skipping".format(
org_hyd_out
)
)
return
new_hyd_out = os.path.join(self.m.model_ws, os.path.split(org_hyd_out)[-1])
shutil.copy2(org_hyd_out, new_hyd_out)
df = pyemu.gw_utils.modflow_hydmod_to_instruction_file(new_hyd_out)
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: "_".join(x.split("_")[:-1]))
line = "pyemu.gw_utils.modflow_read_hydmod_file('{0}')".format(
os.path.split(new_hyd_out)[-1]
)
self.logger.statement("forward_run line: {0}".format(line))
self.frun_post_lines.append(line)
self.obs_dfs["hyd"] = df
self.tmp_files.append(os.path.split(new_hyd_out)[-1])
def _setup_water_budget_obs(self):
"""setup observations from the MODFLOW list file for
volume and flux water buget information
"""
if self.mflist_waterbudget:
org_listfile = os.path.join(self.org_model_ws, self.m.lst.file_name[0])
if os.path.exists(org_listfile):
shutil.copy2(
org_listfile, os.path.join(self.m.model_ws, self.m.lst.file_name[0])
)
else:
self.logger.warn(
"can't find existing list file:{0}...skipping".format(org_listfile)
)
return
list_file = os.path.join(self.m.model_ws, self.m.lst.file_name[0])
flx_file = os.path.join(self.m.model_ws, "flux.dat")
vol_file = os.path.join(self.m.model_ws, "vol.dat")
df = pyemu.gw_utils.setup_mflist_budget_obs(
list_file,
flx_filename=flx_file,
vol_filename=vol_file,
start_datetime=self.m.start_datetime,
)
if df is not None:
self.obs_dfs["wb"] = df
# line = "try:\n os.remove('{0}')\nexcept:\n pass".format(os.path.split(list_file)[-1])
# self.logger.statement("forward_run line:{0}".format(line))
# self.frun_pre_lines.append(line)
self.tmp_files.append(os.path.split(list_file)[-1])
line = "pyemu.gw_utils.apply_mflist_budget_obs('{0}',flx_filename='{1}',vol_filename='{2}',start_datetime='{3}')".format(
os.path.split(list_file)[-1],
os.path.split(flx_file)[-1],
os.path.split(vol_file)[-1],
self.m.start_datetime,
)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_post_lines.append(line)
def apply_list_and_array_pars(arr_par_file="mult2model_info.csv", chunk_len=50):
"""Apply multiplier parameters to list and array style model files
Args:
arr_par_file (str):
chunk_len (`int`): the number of files to process per multiprocessing
chunk in appl_array_pars(). default is 50.
Returns:
Note:
Used to implement the parameterization constructed by
PstFrom during a forward run
Should be added to the forward_run.py script
"""
df = pd.read_csv(arr_par_file, index_col=0)
arr_pars = df.loc[df.index_cols.isna()].copy()
list_pars = df.loc[df.index_cols.notna()].copy()
# extract lists from string in input df
list_pars["index_cols"] = list_pars.index_cols.apply(lambda x: literal_eval(x))
list_pars["use_cols"] = list_pars.use_cols.apply(lambda x: literal_eval(x))
list_pars["lower_bound"] = list_pars.lower_bound.apply(lambda x: literal_eval(x))
list_pars["upper_bound"] = list_pars.upper_bound.apply(lambda x: literal_eval(x))
# TODO check use_cols is always present
apply_genericlist_pars(list_pars, chunk_len=chunk_len)
apply_array_pars(arr_pars, chunk_len=chunk_len)
def _process_chunk_fac2real(chunk, i):
for args in chunk:
pyemu.geostats.fac2real(**args)
print("process", i, " processed ", len(chunk), "fac2real calls")
def _process_chunk_array_files(chunk, i, df):
for model_file in chunk:
_process_array_file(model_file, df)
print("process", i, " processed ", len(chunk), "process_array_file calls")
def _process_array_file(model_file, df):
# find all mults that need to be applied to this array
df_mf = df.loc[df.model_file == model_file, :]
results = []
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".format(model_file))
org_arr = np.loadtxt(org_file[0])
if "mlt_file" in df_mf.columns:
for mlt in df_mf.mlt_file:
if pd.isna(mlt):
continue
mlt_data = np.loadtxt(mlt)
if org_arr.shape != mlt_data.shape:
raise Exception(
"shape of org file {}:{} differs from mlt file {}:{}".format(
org_file, org_arr.shape, mlt, mlt_data.shape
)
)
org_arr *= np.loadtxt(mlt)
if "upper_bound" in df.columns:
ub_vals = df_mf.upper_bound.value_counts().dropna().to_dict()
if len(ub_vals) == 0:
pass
elif len(ub_vals) > 1:
print(ub_vals)
raise Exception("different upper bound values for {0}".format(org_file))
else:
ub = float(list(ub_vals.keys())[0])
org_arr[org_arr > ub] = ub
if "lower_bound" in df.columns:
lb_vals = df_mf.lower_bound.value_counts().dropna().to_dict()
if len(lb_vals) == 0:
pass
elif len(lb_vals) > 1:
raise Exception("different lower bound values for {0}".format(org_file))
else:
lb = float(list(lb_vals.keys())[0])
org_arr[org_arr < lb] = lb
np.savetxt(model_file, np.atleast_2d(org_arr), fmt="%15.6E", delimiter="")
def apply_array_pars(arr_par="arr_pars.csv", arr_par_file=None, chunk_len=50):
"""a function to apply array-based multipler parameters.
Args:
arr_par (`str` or `pandas.DataFrame`): if type `str`,
path to csv file detailing parameter array multipliers.
This file can be written by PstFromFlopy.
if type `pandas.DataFrame` is Dataframe with columns of
['mlt_file', 'model_file', 'org_file'] and optionally
['pp_file', 'fac_file'].
chunk_len (`int`) : the number of files to process per chunk
with multiprocessing - applies to both fac2real and process_
input_files. Default is 50.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
This function should be added to the forward_run.py script but can
be called on any correctly formatted csv
This function using multiprocessing, spawning one process for each
model input array (and optionally pp files). This speeds up
execution time considerably but means you need to make sure your
forward run script uses the proper multiprocessing idioms for
freeze support and main thread handling.
"""
if arr_par_file is not None:
warnings.warn(
"`arr_par_file` argument is deprecated and replaced "
"by arr_par. Method now support passing DataFrame as "
"arr_par arg.",
PyemuWarning,
)
arr_par = arr_par_file
if isinstance(arr_par, str):
df = pd.read_csv(arr_par, index_col=0)
elif isinstance(arr_par, pd.DataFrame):
df = arr_par
else:
raise TypeError(
"`arr_par` argument must be filename string or "
"Pandas DataFrame, "
"type {0} passed".format(type(arr_par))
)
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if "pp_file" in df.columns:
print("starting fac2real", datetime.now())
pp_df = df.loc[df.pp_file.notna(), ["pp_file", "fac_file", "mlt_file",
"pp_fill_value","pp_lower_limit","pp_upper_limit"]].rename(
columns={"fac_file": "factors_file", "mlt_file": "out_file",
"pp_fill_value":"fill_value","pp_lower_limit":"lower_lim","pp_upper_limit":"upper_lim"}
)
# don't need to process all (e.g. if const. mults apply across kper...)
pp_args = pp_df.drop_duplicates().to_dict("records")
num_ppargs = len(pp_args)
num_chunk_floor = num_ppargs // chunk_len
main_chunks = (
np.array(pp_args)[: num_chunk_floor * chunk_len]
.reshape([-1, chunk_len])
.tolist()
)
remainder = np.array(pp_args)[num_chunk_floor * chunk_len :].tolist()
chunks = main_chunks + [remainder]
print("number of chunks to process:",len(chunks))
if len(chunks) == 1:
_process_chunk_fac2real(chunks[0], 0)
else:
pool = mp.Pool()
x = [
pool.apply_async(_process_chunk_fac2real, args=(chunk, i))
for i, chunk in enumerate(chunks)
]
[xx.get() for xx in x]
pool.close()
pool.join()
# procs = []
# for chunk in chunks:
# p = mp.Process(target=_process_chunk_fac2real, args=[chunk])
# p.start()
# procs.append(p)
# for p in procs:
# p.join()
print("finished fac2real", datetime.now())
print("starting arr mlt", datetime.now())
uniq = df.model_file.unique() # unique model input files to be produced
num_uniq = len(uniq) # number of input files to be produced
# number of files to send to each processor
# lazy plitting the files to be processed into even chunks
num_chunk_floor = num_uniq // chunk_len # number of whole chunks
main_chunks = (
uniq[: num_chunk_floor * chunk_len].reshape([-1, chunk_len]).tolist()
) # the list of files broken down into chunks
remainder = uniq[num_chunk_floor * chunk_len :].tolist() # remaining files
chunks = main_chunks + [remainder]
print("number of chunks to process:", len(chunks))
if len(chunks) == 1:
_process_chunk_array_files(chunks[0],0,df)
# procs = []
# for chunk in chunks: # now only spawn processor for each chunk
# p = mp.Process(target=_process_chunk_model_files, args=[chunk, df])
# p.start()
# procs.append(p)
# for p in procs:
# r = p.get(False)
# p.join()
else:
pool = mp.Pool()
x = [
pool.apply_async(_process_chunk_array_files, args=(chunk, i, df))
for i, chunk in enumerate(chunks)
]
[xx.get() for xx in x]
pool.close()
pool.join()
print("finished arr mlt", datetime.now())
def apply_list_pars():
"""a function to apply boundary condition multiplier parameters.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
Requires either "temporal_list_pars.csv" or "spatial_list_pars.csv"
Should be added to the forward_run.py script
"""
temp_file = "temporal_list_pars.dat"
spat_file = "spatial_list_pars.dat"
temp_df, spat_df = None, None
if os.path.exists(temp_file):
temp_df = pd.read_csv(temp_file, delim_whitespace=True)
temp_df.loc[:, "split_filename"] = temp_df.filename.apply(
lambda x: os.path.split(x)[-1]
)
org_dir = temp_df.list_org.iloc[0]
model_ext_path = temp_df.model_ext_path.iloc[0]
if os.path.exists(spat_file):
spat_df = pd.read_csv(spat_file, delim_whitespace=True)
spat_df.loc[:, "split_filename"] = spat_df.filename.apply(
lambda x: os.path.split(x)[-1]
)
mlt_dir = spat_df.list_mlt.iloc[0]
org_dir = spat_df.list_org.iloc[0]
model_ext_path = spat_df.model_ext_path.iloc[0]
if temp_df is None and spat_df is None:
raise Exception("apply_list_pars() - no key dfs found, nothing to do...")
# load the spatial mult dfs
sp_mlts = {}
if spat_df is not None:
for f in os.listdir(mlt_dir):
pak = f.split(".")[0].lower()
df = pd.read_csv(
os.path.join(mlt_dir, f), index_col=0, delim_whitespace=True
)
# if pak != 'hfb6':
df.index = df.apply(
lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k, x.i, x.j), axis=1
)
# else:
# df.index = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}{2:04.0f}{2:04.0f}".format(x.k, x.irow1, x.icol1,
# x.irow2, x.icol2), axis = 1)
if pak in sp_mlts.keys():
raise Exception("duplicate multiplier csv for pak {0}".format(pak))
if df.shape[0] == 0:
raise Exception("empty dataframe for spatial list file: {0}".format(f))
sp_mlts[pak] = df
org_files = os.listdir(org_dir)
# for fname in df.filename.unique():
for fname in org_files:
# need to get the PAK name to handle stupid horrible expceptions for HFB...
# try:
# pakspat = sum([True if fname in i else False for i in spat_df.filename])
# if pakspat:
# pak = spat_df.loc[spat_df.filename.str.contains(fname)].pak.values[0]
# else:
# pak = 'notHFB'
# except:
# pak = "notHFB"
names = None
if temp_df is not None and fname in temp_df.split_filename.values:
temp_df_fname = temp_df.loc[temp_df.split_filename == fname, :]
if temp_df_fname.shape[0] > 0:
names = temp_df_fname.dtype_names.iloc[0].split(",")
if spat_df is not None and fname in spat_df.split_filename.values:
spat_df_fname = spat_df.loc[spat_df.split_filename == fname, :]
if spat_df_fname.shape[0] > 0:
names = spat_df_fname.dtype_names.iloc[0].split(",")
if names is not None:
df_list = pd.read_csv(
os.path.join(org_dir, fname),
delim_whitespace=True,
header=None,
names=names,
)
df_list.loc[:, "idx"] = df_list.apply(
lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(
x.k - 1, x.i - 1, x.j - 1
),
axis=1,
)
df_list.index = df_list.idx
pak_name = fname.split("_")[0].lower()
if pak_name in sp_mlts:
mlt_df = sp_mlts[pak_name]
mlt_df_ri = mlt_df.reindex(df_list.index)
for col in df_list.columns:
if col in [
"k",
"i",
"j",
"inode",
"irow1",
"icol1",
"irow2",
"icol2",
"idx",
]:
continue
if col in mlt_df.columns:
# print(mlt_df.loc[mlt_df.index.duplicated(),:])
# print(df_list.loc[df_list.index.duplicated(),:])
df_list.loc[:, col] *= mlt_df_ri.loc[:, col].values
if temp_df is not None and fname in temp_df.split_filename.values:
temp_df_fname = temp_df.loc[temp_df.split_filename == fname, :]
for col, val in zip(temp_df_fname.col, temp_df_fname.val):
df_list.loc[:, col] *= val
fmts = ""
for name in names:
if name in ["i", "j", "k", "inode", "irow1", "icol1", "irow2", "icol2"]:
fmts += " %9d"
else:
fmts += " %9G"
np.savetxt(
os.path.join(model_ext_path, fname), df_list.loc[:, names].values, fmt=fmts
)
def calc_array_par_summary_stats(arr_par_file="mult2model_info.csv"):
"""read and generate summary statistics for the resulting model input arrays from
applying array par multipliers
Args:
arr_par_file (`str`): the array multiplier key file
Returns:
pd.DataFrame: dataframe of summary stats for each model_file entry
Note:
this function uses an optional "zone_file" column. If multiple zones
files are used, then zone arrays are aggregated to a single array
"dif" values are original array values minus model input array values
"""
df = pd.read_csv(arr_par_file, index_col=0)
df = df.loc[df.index_cols.isna(),:].copy()
if df.shape[0] == 0:
return None
model_input_files = df.model_file.unique()
model_input_files.sort()
records = dict()
stat_dict = {"mean":np.nanmean,"stdev":np.nanstd,"median":np.nanmedian,"min":np.nanmin,"max":np.nanmax}
quantiles = [0.05,0.25,0.75,0.95]
for stat in stat_dict.keys():
records[stat] = []
records[stat+"_org"] = []
records[stat + "_dif"] = []
for q in quantiles:
records["quantile_{0}".format(q)] = []
records["quantile_{0}_org".format(q)] = []
records["quantile_{0}_dif".format(q)] = []
records["upper_bound"] = []
records["lower_bound"] = []
records["upper_bound_org"] = []
records["lower_bound_org"] = []
records["upper_bound_dif"] = []
records["lower_bound_dif"] = []
for model_input_file in model_input_files:
arr = np.loadtxt(model_input_file)
org_file = df.loc[df.model_file==model_input_file,"org_file"].values
org_file = org_file[0]
org_arr = np.loadtxt(org_file)
if "zone_file" in df.columns:
zone_file = df.loc[df.model_file == model_input_file,"zone_file"].dropna().unique()
if len(zone_file) > 1:
zone_arr = np.zeros_like(arr)
for zf in zone_file:
za = np.loadtxt(zf)
zone_arr[za!=0] = 1
else:
zone_arr = np.loadtxt(zone_file[0])
arr[zone_arr==0] = np.NaN
org_arr[zone_arr==0] = np.NaN
for stat,func in stat_dict.items():
v = func(arr)
records[stat].append(v)
ov = func(org_arr)
records[stat+"_org"].append(ov)
records[stat+"_dif"].append(ov-v)
for q in quantiles:
v = np.nanquantile(arr,q)
ov = np.nanquantile(org_arr,q)
records["quantile_{0}".format(q)].append(v)
records["quantile_{0}_org".format(q)].append(ov)
records["quantile_{0}_dif".format(q)].append(ov-v)
ub = df.loc[df.model_file==model_input_file,"upper_bound"].max()
lb = df.loc[df.model_file == model_input_file, "lower_bound"].min()
if | pd.isna(ub) | pandas.isna |
#!/usr/bin/env python3
"""
Author: <NAME>
Load data, extract feature set, train model with feature set, make predictions.
usage: ./predict-tm.py -d [Pfam expression profiles] -t [MMETSP training data] -l [Training labels] -o [Output path and file name for results]
-f [Feature set] -use-rf [make predictions with random forest model]
"""
import pandas as pd
import numpy as np
import warnings
import argparse
from sklearn.preprocessing import MinMaxScaler
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
parser = argparse.ArgumentParser()
parser.add_argument('-d', action='store', dest='data', help="Path to pfam expression profiles for classification.")
parser.add_argument('-t', action='store', dest='train', help='Path to MMETSP training data.')
parser.add_argument('-f', action='store', dest='feat', help="Path to feature set file.")
parser.add_argument('-l', action='store', dest='labels', help="Path to training labels file.")
parser.add_argument('-o', action='store', dest='out', help="Results destination.")
parser.add_argument('-use-rf', action='store', dest='rf', type=bool, default=False, help="Use the random forest model for classification. [Not recommended]")
# Define a warning just to be a bit more specific.
class DataFormatWarning(Warning):
pass
class PfamLoader():
# Initialize the class with the path to profiles you want to classify, the path to the MMETSP training dataset,
# and the list of features (common, all selected features).
def __init__(self, data, train_data, train_labels, features):
self.pfam_path = data
self.train_path = train_data
self.feat_path = features
self.labels_path = train_labels
def load_pfam_profiles(self):
d = pd.read_csv(self.pfam_path)
# A common part of pfam annotation leaves you with pfam ids that have decimal + number. We want to
# get rid of those!
_cols = [col for col in d.columns if '.' in col]
if len(_cols) != 0:
d.columns = d.columns.str.split(".").str[0]
# Run simple checks on the data content. We want more than 800 non-zero pfams.
# Very simple check.
if d.shape[1] < 800:
warnings.warn("Pfam profiles have less than the suggested 800 non-zero columns.", DataFormatWarning)
# After the simple check passes, its possible that some rows still have less than 800 non-zero elements.
counts = list(d.astype(bool).sum(axis=1))
if min(counts) < 800:
warnings.warn("Pfam profiles have less than the suggested 800 non-zero columns.", DataFormatWarning)
self.pfam_data = d
def get_subset_profiles(self):
feats = pd.read_csv(self.feat_path)
train = | pd.read_csv(self.train_path) | pandas.read_csv |
import argparse
from collections import defaultdict
from multiprocessing import Queue, Process
import os
import random
import numpy as np
import pandas as pd
import torch
from data_model.simple_continuous_model import SimpleContinuousModel
from direct_method.simple_direct import SimpleDirectModel
from direct_method.two_stage_direct import TwoStageDirectModel
from evaluation.estimate_policy_value import estimate_policy_value
from evaluation.estimate_policy_value_direct import \
get_mu_t_direct_train_test, get_dr_policy_value
from utils.hide_output import HideOutput
from weights_learning.quadprog_learning_x_continuous import \
BalancedWeightsLearningContinuousQuadprogX
from weights_learning.quadprog_learning_z_continuous import \
BalancedWeightsLearningContinuousQuadprog
def y_activation_cubic(y):
return y ** 3
def y_activation_sign(y):
return 3 * np.sign(1.0 * y)
def y_activation_exp(y):
return np.exp(1.0 * y)
def toy_continuous_policy(x, t):
# score = (x * np.array([-1, 1, -1, 2])).sum() / 10
score = (x * np.array([-1, 2, 2, -1, -1, -1, 1, 1, 1, -1])).sum() * 0.1
zero_probability = np.exp(score) / (np.exp(score) + np.exp(-score))
if t == 0:
return zero_probability
else:
return 1 - zero_probability
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--random_seed", default=527, type=int,
help="initial random seed of first process "
"(i'th process starts with seed random_seed+i)")
parser.add_argument("-l", "--link_function", default="step",
help="link function to use with data model (available"
" options are: step, exp, cubic, linear)",
type=str)
parser.add_argument("-n", "--num_reps", default=64, type=int,
help="number of repetitions")
parser.add_argument("-p", "--num_procs", default=1, type=int,
help="number of parallel processes")
parser.add_argument("-d", "--num_data_range", default="2000,1000,500,200",
help="range of num data values")
parser.add_argument("-m", "--sigma_mul_range", default="0.001,0.2,1.0,5.0",
help="range of sigma matrix multipliers to test")
parser.add_argument("save_path", help="path to save results '.csv' file to")
# parser.add_argument()
args = parser.parse_args()
num_treatment = 2
if args.link_function == "step":
y_activation = y_activation_sign
elif args.link_function == "exp":
y_activation = y_activation_exp
elif args.link_function == "cubic":
y_activation = y_activation_cubic
elif args.link_function == "linear":
y_activation = None
else:
raise ValueError("Invalid link activation:", args.link_function)
data_model_class = SimpleContinuousModel
data_model_args = {"y_activation": y_activation}
kernel = "rbf"
policy = toy_continuous_policy
num_data_policy_estimate = 1000000
# num_data_range = (2000, 1000, 500, 200)
num_data_range = tuple([int(n) for n in args.num_data_range.split(",")])
sigma_mul_range = tuple([float(n) for n in args.sigma_mul_range.split(",")])
save_path = args.save_path
save_dir = os.path.dirname(save_path)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
job_queue = Queue()
results_queue = Queue()
num_jobs = 0
for num_data in num_data_range:
for rep in range(args.num_reps):
num_jobs += 1
job_queue.put((num_data, rep))
procs = []
for p_i in range(args.num_procs):
job_queue.put("STOP")
config = {
"num_treatment": num_treatment,
"data_model_class": data_model_class,
"data_model_args": data_model_args,
"kernel": kernel,
"policy": policy,
"link_function": args.link_function,
"seed": args.random_seed + p_i,
"sigma_mul_range": sigma_mul_range,
}
p_args = (job_queue, results_queue, config)
p = Process(target=worker_loop, args=p_args)
p.start()
procs.append(p)
rows = []
all_results = defaultdict(lambda: defaultdict(list))
num_done = 0
while num_done < num_jobs:
num_data, rep, results = results_queue.get()
for method, tau in results.items():
row = {"method": method, "num_data": num_data, "rep": rep,
"tau": tau}
rows.append(row)
all_results[num_data][method].append(tau)
num_done += 1
for p in procs:
p.join()
data_model = data_model_class(**data_model_args)
policy_value = estimate_policy_value(data_model, policy, num_treatment,
num_data=num_data_policy_estimate)
print("")
print("true policy value with %s link function: %f"
% (args.link_function, policy_value))
for num_data, results in sorted(all_results.items()):
print("")
print("printing results for num-data=%d" % num_data)
for method, tau_list in sorted(results.items()):
mse = ((np.array(tau_list) - policy_value) ** 2).mean()
bias = np.abs(np.array(tau_list).mean() - policy_value)
bias_std = np.array(tau_list).std(ddof=1)
bias_std /= (args.num_reps ** 0.5)
variance = mse - bias ** 2
mse_se = ((np.array(tau_list) - policy_value) ** 2).std(ddof=1)
mse_se /= (args.num_reps ** 0.5)
print("method=%s, mse=%.3f±%.3f, bias=%.3f±%.3f, variance=%.3f"
% (method, mse, mse_se, bias, bias_std, variance))
print("")
for row in rows:
row["policy_value"] = policy_value
row["se"] = (row["tau"] - policy_value) ** 2
data = | pd.DataFrame(rows) | pandas.DataFrame |
import os
import math
import pandas as pd
import sklearn.metrics as metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, accuracy_score
from sklearn.metrics import average_precision_score
from numpyencoder import NumpyEncoder
from get_data import read_params
import argparse
import joblib
import json
def train_and_evaluate(config_path):
"""
This function trains and evaluates a machine learning model on the dataset.
:param config_path: the path of the config file to use
"""
# Read configuration options
config = read_params(config_path)
model_type = config["train_and_evaluate"]["model"]
test_data_path = config["split_data"]["test_path"]
train_data_path = config["split_data"]["train_path"]
random_state = config["base"]["random_state"]
model_dir = config["model_dir"]
target = [config["base"]["target_col"]]
scores_file = config["reports"]["scores"]
prc_file = config["reports"]["prc"]
roc_file = config["reports"]["roc"]
# Load training and validation datasets
train = | pd.read_csv(train_data_path, sep=",") | pandas.read_csv |
from pathlib import Path
import pandas as pd
import numpy as np
import click
from ...exceptions import InvalidFileExtension
from ...utilities import safe_load
def merge_command(files, output, **kwargs):
verbose = kwargs.pop("verbose", False)
tscol = kwargs.pop("tscol", "timestamp_iso")
# create an array of timestamp column names to try
tscols = [tscol, "timestamp", "timestamp_local"]
# make sure the extension is either a csv or feather format
output = Path(output)
if output.suffix not in (".csv", ".feather"):
raise InvalidFileExtension("Invalid file extension")
save_as_csv = True if output.suffix == ".csv" else False
# concat everything in filepath
if verbose:
click.secho("Files to read: {}".format(files), fg='green')
df = pd.DataFrame()
with click.progressbar(files, label="Parsing files") as bar:
for f in bar:
tmp = safe_load(f)
# check for the column name and set to best guess
for c in tscols:
if c in tmp.columns:
tscol = c
break
if not tscol in tmp.columns:
click.secho("Time tscol was not found in the file; skipping file.", fg='red')
continue
# convert the timestamp column to a pandas datetime
if not pd.core.dtypes.common.is_datetime_or_timedelta_dtype(tmp[tscol]):
tmp[tscol] = tmp[tscol].apply(lambda x: pd.to_datetime(x, errors='coerce'))
# drop the bad rows
tmp = tmp.dropna(how='any', subset=[tscol])
# re-convert to timestamp in case it's not
if not | pd.core.dtypes.common.is_datetime_or_timedelta_dtype(tmp[tscol]) | pandas.core.dtypes.common.is_datetime_or_timedelta_dtype |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[ | lrange(2, 6) | pandas.compat.lrange |
import os
import time
import json
import orjson
import pandas as pd
import numpy as np
import glob2
import pickle
from itertools import repeat
from multiprocessing import Pool, Process, current_process, cpu_count
def get_file_list(folder_path, extensions =['zip']):
''' Returns the list of files with all given extensions inside the specified folder in the current working directory, '''
file_list = []
for extension in extensions:
file_list.extend(glob2.glob(os.path.join(folder_path, '*.' + extension)))
return file_list
def json_to_dict(json_file):
'''This function returns dictionary format of the given json file'''
with open(json_file, 'r') as f:
json_report = orjson.loads(f.read())
return json_report
def get_index(json_report, index_value):
'''This function returns index value from "file information" of json report'''
try:
return json_report['file_information'][index_value]
except Exception as e:
raise Exception('get_index ERROR', e)
def get_label(json_report, category):
'''This function is used to retrieve the sample label, e.g. malware, ransomware and goodware'''
try:
return json_report['file_class'][category + '_label']
except Exception as e:
raise Exception('get_label ERROR', e)
def set_metadata(selection, json_report, index_value):
'''This function is used to set metadata, e.g. label is used for malware-goodware
case and sublabel is used for ransomware-malware case respectively'''
selection[index_value] = get_index(json_report, index_value)
selection['label'] = get_label(json_report, 'class')
selection['sublabel'] = get_label(json_report, 'subclass')
def get_encoded_apistats(json_file, one_hot = False, index_value = 'md5'):
'''This function returns one hot encoded API stats data from json report'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
apistats = json_report['api_calls']['apistats']
set_metadata(apistats, json_report, index_value)
encoded_apistats = pd.json_normalize(apistats, max_level=0).set_index(index_value)
if one_hot == True:
labels = encoded_apistats[['label', 'sublabel']]
features = encoded_apistats.drop(['label', 'sublabel'], axis=1)
features[features!=0] = 1
encoded_apistats = features.join(labels)
print('Processed :', json_file)
return encoded_apistats
except Exception as e:
print('KEY ERROR', e)
def remove_dll(dlls, substring = ['virusshare', 'cucko'], unique=True):
'''This function returns DLL feature list, leaving only unique values'''
occurences = []
if unique:
dlls = list(set([ os.path.splitext(dll.lower().replace(os.path.sep, '/'))[0] for dll in dlls ]))
for dll in dlls:
for target in substring:
if target in dll :
if dll not in occurences:
occurences.append(dll)
for dll in occurences:
dlls.remove(dll)
return dlls
def get_encoded_dll_loaded(json_file, index_value = 'md5'):
'''this function returns one hot encoded DLL data'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
dll_loaded = json_report['dll_loaded']['loaded_dll']
pruned_dll_loaded = remove_dll(dll_loaded, substring = ['virusshare', 'cucko'], unique=True)
dll_loaded_todict = { dll : 1 for dll in pruned_dll_loaded }
set_metadata(dll_loaded_todict, json_report, index_value)
encoded_dll_loaded = pd.json_normalize(dll_loaded_todict, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_dll_loaded
except Exception as e:
print('dll_loaded KEY ERROR', e)
def get_file_operations_counts(json_file, index_value = 'md5'):
'''This function retrieves file operations count features.'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
file_operations_counts = json_report['file_operations']['files_counts']
set_metadata(file_operations_counts, json_report, index_value)
encoded_file_operations = pd.json_normalize(file_operations_counts, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_file_operations
except Exception as e:
print('file_operations_counts KEY ERROR', e)
def get_regkeys_counts(json_file, index_value = 'md5'):
'''This function retrieves registry keys operations count features.'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
regkeys_counts = json_report['regkeys']['regkey_counts']
set_metadata(regkeys_counts, json_report, index_value)
encoded_regkeys_counts = pd.json_normalize(regkeys_counts, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_regkeys_counts
except Exception as e:
print('regkeys_counts KEY ERROR', e)
def get_encoded_pe_imports(json_file, dll_name = None, index_value = 'md5'):
'''This function retrieves one hot encoded PE Imports features'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
pe_imports = json_report['pe_analysis']['pe_imports']
if dll_name is not None:
pe_imports_todict = { import_ : 1 for import_ in pe_imports[dll_name] }
else:
pe_imports_todict = { dll_name_ : 1 for dll_name_ in pe_imports.keys() }
set_metadata(pe_imports_todict, json_report, index_value)
encoded_pe_imports = pd.json_normalize(pe_imports_todict, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_pe_imports
except Exception as e:
print('pe_imports KEY ERROR', e)
def get_regkeys(json_file, category, index_value = 'md5'):
'''This function retrieves nested registry keys features.'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
regkeys = json_report['regkeys']['regkey_values'][category]
regkeys_todict = {k:1 for k in regkeys}
set_metadata(regkeys_todict, json_report, index_value)
encoded_regkeys = pd.json_normalize(regkeys_todict, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_regkeys
except Exception as e:
print(f'regkeys {category} KEY ERROR', e)
def get_key(nested_key, level=1, sep=os.path.sep):
'''This function deals with separating values in registry keys features,
as they ususally contain path information and other details'''
keys = [key.lower() for key in nested_key.split(sep)]
try:
return keys[:level]
except:
if level > 1:
return get_key(nested_key, level=level-1)
else:
pass
def get_all_keys(regkeys, level=1, unique=True, sep='/'):
'''This function returns list of unique nested registry key values up to a certain level'''
results = []
for nested_key in regkeys:
results.extend(get_key(nested_key, level=level, sep=sep))
if unique:
results = list(set(results))
return results
def remove_keys(keys, substring=None, numeric=True):
'''This function removes unwanted symbols in nested registry keys data'''
occurences = []
for key in keys:
if numeric:
if key.replace('.', '').replace('-', '').isnumeric():
occurences.append(key)
for target in substring:
if target in key:
occurences.append(key)
for key in occurences:
keys.remove(key)
return keys
def get_nested_regkeys(json_file, category, level = 15, index_value = 'md5'):
'''This function retrieves all nested registry keys from json, cleaned and encoded.'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
regkeys = json_report['regkeys']['regkey_values'][category]
nested_regkeys = get_all_keys(regkeys, level=level, sep='\\')
pruned_nested_regkeys = remove_keys(nested_regkeys, substring=['virusshare', 'cucko', 'default'], numeric=True)
nested_regkeys_todict = {k:1 for k in pruned_nested_regkeys}
set_metadata(nested_regkeys_todict, json_report, index_value)
encoded_nested_regkeys = pd.json_normalize(nested_regkeys_todict, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_nested_regkeys
except Exception as e:
print(f'Nested Regkeys {category} KEY ERROR', e)
def get_nested_fileops(json_file, category, level = 15, index_value = 'md5'):
'''This function retrieve nested file operations from json, cleaned and encoded'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
fileops = json_report['file_operations']['files_values'][category]
nested_files = get_all_keys(fileops, level=level, sep='\\')
pruned_nested_files = remove_keys(nested_files, substring=['virusshare', 'cucko', 'default'], numeric=True)
nested_files_todict = {k:1 for k in pruned_nested_files}
set_metadata(nested_files_todict, json_report, index_value)
encoded_nested_files = pd.json_normalize(nested_files_todict, max_level=0).set_index(index_value)
print('Processed :', json_file)
return encoded_nested_files
except Exception as e:
print(f'Nested File Operations {category} KEY ERROR', e)
def get_pe_entropy(json_file, index_value = 'md5'):
'''This function retrieves PE entropy features from json report.'''
try:
print('Processing :', json_file)
json_report = json_to_dict(json_file)
pe_entropy = json_report['pe_analysis']['pe_entropy']
pe_entropy_values = {k:v for k,v in zip(pe_entropy['names'], pe_entropy['entropy_values'])}
set_metadata(pe_entropy_values, json_report, index_value)
encoded_pe_entropy = | pd.json_normalize(pe_entropy_values, max_level=0) | pandas.json_normalize |
import pickle
import inflection
import pandas as pd
import numpy as np
import math
import datetime
class Rossmann (object ):
def __init__(self):
state = 1
self.competition_distance_scaler = pickle.load(open('parameter/competition_distance_scaler.pkl', 'rb') )
self.competition_time_month_scaler = pickle.load(open('parameter/competition_time_month_scaler.pkl', 'rb') )
self.promo_time_week_scaler = pickle.load(open('parameter/promo_time_week_scaler.pkl', 'rb') )
self.year_scaler = pickle.load(open('parameter/year_scaler.pkl', 'rb') )
self.store_type_scaler = pickle.load(open('parameter/store_type_scaler.pkl', 'rb') )
self.year_scaler = pickle.load(open('parameter/year_scaler.pkl', 'rb') )
def data_cleaning(self,df1):
## 1.1. Rename Columns
cols_old = ['Store', 'DayOfWeek', 'Date', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday',
'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth','CompetitionOpenSinceYear',
'Promo2', 'Promo2SinceWeek','Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore( x )
cols_new = list(map(snakecase, cols_old))
#Rename
df1.columns = cols_new
## 1.3. Data Type
#mudando DateTime
df1['date'] = | pd.to_datetime(df1['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
| tm.assert_frame_equal(expected, df) | pandas.util.testing.assert_frame_equal |
#!/bin/env python3
"""create_csv_of_kp_predicate_triples.py
Creates a CSV of all predicate triples of the form (node type, edge type, node type) for KG1, KG2, and BTE (ARAX's current knowledge providers).
Resulting columns are: subject_type, edge_type, object_type
Usage: python create_csv_of_kp_predicate_triples.py
"""
# adapted from <NAME> code in create_csv_of_kp_node_pairs.py
import requests
import sys
import os
import csv
import time
import pandas as pd
from neo4j import GraphDatabase
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # code directory
from RTXConfiguration import RTXConfiguration
def run_neo4j_query(cypher, kg_name, data_type):
rtx_config = RTXConfiguration()
if kg_name != "KG1":
rtx_config.live = kg_name
driver = GraphDatabase.driver(rtx_config.neo4j_bolt, auth=(rtx_config.neo4j_username, rtx_config.neo4j_password))
with driver.session() as session:
start = time.time()
print(f"Grabbing {data_type} from {kg_name} neo4j...")
results = session.run(cypher).data()
print(f"...done. Query took {round((time.time() - start) / 60, 2)} minutes.")
driver.close()
return results
def get_kg1_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly that makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG1", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg2_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly this makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG2", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg2c_predicate_triples():
cypher = 'match (n)-[r]->(m) with distinct labels(n) as n1s, type(r) as rel, '+\
'labels(m) as n2s unwind n1s as n1 unwind n2s as n2 with distinct n1 as '+\
'node1, rel as relationship, n2 as node2 where node1 <> "Base" and '+\
'node2 <> "Base" return node1, relationship, node2'
# Changed this from using n.category so that it can handle node with multiple labels
# Unfortunetly this makes the cypher a little more unweildly and likely slows a query a bit.
results = run_neo4j_query(cypher, "KG2c", "predicate triples")
triples_dict = {"subject":[], "predicate":[], "object":[]}
for result in results:
subject_type = result.get('node1')
object_type = result.get('node2')
predicate = result.get('relationship')
triples_dict['subject'].append(subject_type)
triples_dict['object'].append(object_type)
triples_dict['predicate'].append(predicate)
return pd.DataFrame(triples_dict)
def get_kg1_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG1", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return pd.DataFrame(labels_dict)
def get_kg2_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG2", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return pd.DataFrame(labels_dict)
def get_kg2c_node_labels():
cypher = 'call db.labels()'
results = run_neo4j_query(cypher, "KG2c", "node labels")
labels_dict = {"label":[]}
for result in results:
label = result.get('label')
labels_dict["label"].append(label)
return pd.DataFrame(labels_dict)
def get_kg1_relationship_types():
cypher = 'call db.relationshipTypes()'
results = run_neo4j_query(cypher, "KG1", "relationship types")
predicate_dict = {"predicate":[]}
for result in results:
predicate = result.get('relationshipType')
predicate_dict["predicate"].append(predicate)
return | pd.DataFrame(predicate_dict) | pandas.DataFrame |
from . import constants as c
import numpy as np
import json
class Struct(object):
""" Struct-like container object """
def __init__(self, **kwds): # keyword args define attribute names and values
self.__dict__.update(**kwds)
def __repr__(self):
#s = "\n"
s = "\nCCParser Struct keys:\n"
for key in self.__dict__.keys():
s += f"'{key}'\n"
return s
class ParseContainer(object):
""" Generic container object which keeps track of the parsed quantities.
It allows to parse the same quantity several times.
There should be one instance/parsed quantity. """
def __init__(self):
self.nversion = 0
self.data = []
self.lines = []
self.serializable = False
@classmethod
def from_obj(cls, line, parsed_obj):
"""Alternative constructor. Initialize directly with line and parsed
object.
Parameters
----------
line : int
line number
parsed_obj : any
parsed object
"""
pc = cls()
pc.add(line, parsed_obj)
return pc
def add(self, hook_line, new_obj):
#self.data[hook_line] = new_pvalue
self.data.append(new_obj)
self.lines.append(hook_line)
self.nversion += 1
def get_first(self):
idx = self.lines.index(min(self.lines))#not needed if assuming ordered parsing (line by line)
#return self.data[0]
return self.data[idx]
def get_last(self):
idx = self.lines.index(max(self.lines))#not needed if assuming ordered parsing (line by line)
# return self.data[-1]
return self.data[idx]
def get_data(self):
return self.data
def get_lines(self):
return self.lines
def make_serializable(self):
"""Turn fancy data types into sth that json.dump can recognize. """
try:
dt = type(self.data[0])
except IndexError:
raise ParserDataError(("ParseContainer not serializable (data list"
" empty)."))
# take care of numpy data types
if dt.__module__ == "numpy" or "numpy." in dt.__module__:
encoder = NumpyEncoder()
self.data = [encoder.default(obj) for obj in self.data]
# CCParser data types
elif dt == MolecularOrbitals or dt == Amplitudes:
self.data = [obj.encode() for obj in self.data]
# assume other datatypes are serializable
self.serializable = True
def to_tuple(self):
if self.serializable:
return tuple(zip(self.data, self.lines))
else:
self.make_serializable()
return tuple(zip(self.data, self.lines))
def to_list(self):
if self.serializable:
return list(zip(self.data, self.lines))
else:
self.make_serializable()
return list(zip(self.data, self.lines))
def __len__(self):
assert len(self.data) == len(self.lines)
return len(self.data)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.data[idx.start : idx.stop : idx.step]
else:
if idx >= len(self.data) or abs(idx) > len(self.data):
raise IndexError("ParseContainer: Index out of range")
return self.data[idx]
def __setitem__(self, idx, value):
""" Setter method which expects value tuple (line, parsed_obj) """
self.lines[idx] = value[0]
self.data[idx] = value[1]
def __delitem__(self, idx):
self.data.remove(idx)
self.lines.remove(idx)
def __iter__(self):
return iter(self.data)
# def __next__(self):
# if self.n <= self.nversion:
# return self.data[self.n]
# else:
# raise StopIteration
def __contains__(self, line):
if type(line) == str:
line = int(line)
return True if line in self.lines else False
def __str__(self):
s = "\n"
s+= "Line" + 3*" " + "Parsed Value\n"
for i in range(self.nversion):
s+= str(self.lines[i]) + 3*" " + str(self.data[i]) + "\n"
return s
def __repr__(self):
s = "\n"
s+= "Line" + 3*" " + "Parsed Value\n"
for i in range(self.nversion):
s+= str(self.lines[i]) + 3*" " + str(self.data[i]) + "\n"
return s
class ParserDataError(Exception):
"""Raise for ParserData related errors. """
class StructEncoder(json.JSONEncoder):
def default(self, struct):
if isinstance(struct, Struct):
results = {}
for label, pc in struct.__dict__.items():
results[label] = pc.to_list()
return results
else:
super().default(self, struct)
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): #### This is the fix
return obj.tolist()
else:
super().default(self, obj)
class MolecularOrbitals(object):
# TODO: change name? "OrbitalEnergies"
# TODO: add symmetries
""" General molecular orbital class, which has more functionality than
simple arrays.
"""
N_ORB_PER_LINE = 10
def __init__(self, o, v):
self.occ = list(map(float, o))
self.virt = list(map(float, v))
self.n_occ = len(o)
self.n_virt = len(v)
self.n_mo = self.n_occ + self.n_virt
self.homo = max(self.occ ) if self.n_occ > 0 else 0
self.lumo = min(self.virt) if self.n_virt > 0 else 0
@classmethod
def from_dict(cls, d):
try:
return cls(d["occ"], d["virt"])
except (KeyError, TypeError) as e:
raise ParserDataError(("Dictionary not suitable to create "
"MolecularOrbitals object."))
#TODO: from_string method as implicit list conversion is not great
@classmethod
def from_tuples(cls, t):
# find first occurrence of virt
idx = next(t.index(i) for i in t if i[1] == "virt" or i[1] == "v")
# create lists using the index
o, dummy = zip(*t[:idx])
v, dummy = zip(*t[idx:])
return cls(o, v)
def __str__(self):
n1 = [i for i in range(1, self.n_occ+1)]
n2 = [i for i in range(self.n_occ +1, self.n_mo+1)]
s = "\n"
for i in range(0, len(self.occ), self.N_ORB_PER_LINE):
s += 4*" " + " ".join("{:>8}".format(j) for j in n1[i:i+self.N_ORB_PER_LINE])+"\n"
if i == 0:
s += " occ: " +' '.join("{:8.3f}".format(j) for j in self.occ[i:i+self.N_ORB_PER_LINE])+"\n"
else:
s += 6*" "+' '.join("{:8.3f}".format(j) for j in self.occ[i:i+self.N_ORB_PER_LINE])+"\n"
s += 7*" "+88*"-"+"\n"
for i in range(0, len(self.virt), self.N_ORB_PER_LINE):
s += 4*" " + " ".join("{:>8}".format(j) for j in n2[i:i+self.N_ORB_PER_LINE])+"\n"
if i == 0:
s += " virt:" +' '.join("{:8.3f}".format(j) for j in self.virt[i:i+self.N_ORB_PER_LINE])+"\n"
else:
s += 6*" "+' '.join("{:8.3f}".format(j) for j in self.virt[i:i+self.N_ORB_PER_LINE])+"\n"
return s
def RVS(self, gap):
""" Determine amount of virtual orbitals to freeze based on energy gap (in eV) """
if gap <= 0:
raise ValueError("Negative or Zero energy gap not allowed for restriction of virtual space.")
else:
thr = gap/c.Hartree2eV + self.homo
# print("THR: ",thr)
# print("N_VIRT: ", self.n_virt)
idx = min(range(len(self.virt)), key=lambda i: abs(self.virt[i]-thr))
freeze = self.n_virt - (idx +1)
part_of_v = float(freeze)/float(self.n_virt)
s = "Index: {0:3d}, Number of frozen virtuals: {1:3d}, ({2:.1%})".format(idx, freeze, part_of_v)
print(s)
def to_dict(self):
return {"occ" : self.occ, "virt" : self.virt}
def to_tuples(self):
return list(zip(self.occ, ["occ" for i in range(self.n_occ )])) \
+ list(zip(self.virt, ["virt" for i in range(self.n_virt)]))
def encode(self, fmt=tuple):
if fmt == tuple:
return self.to_tuples()
elif fmt == dict:
return self.to_dict()
else:
raise ValueError("Export format not recognized.")
class Amplitudes(object):
""" General container for amplitudes of one state for easier access to and export of amplitude data """
def __init__(self, occ, virt, v, factor=1.0):
self.occ = occ # list of lists, even if only single int in sublist
self.virt= virt
self.v = v
self.factor = factor
self.weights = list(map(lambda x: self.factor * x**2, self.v))
self.print_thr = 0.05
def __str__(self):
s = "Amplitudes: Weights > {0:.0%}\n".format(self.print_thr)
for i in range(len(self.occ)):
if self.weights[i] > self.print_thr:
if len(self.occ[i]) == 1:
s += "{0:>4} -> {1:>4} : {2:.1f}\n".format(self.occ[i][0],
self.virt[i][0], 100*self.weights[i])
elif len(self.occ[i]) == 2:
s += "{0:>4}, {1:>4} -> {2:>4}, {3:>4} : {4:.1f}\n".format(
self.occ[i][0], self.occ[i][1], self.virt[i][0],
self.virt[i][1], 100*self.weights[i])
return s
@classmethod
def from_list(cls, allinone, factor=1.0):
""" Alternative constructor which expects single list.
Format: [[occ_i, occ_j,..., virt_a, virt_b,..., ampl], ...] """
occ, virt, v = [], [], []
for transition in allinone:
assert(len(transition) % 2 != 0)
n_mo = int((len(transition)-1)/2)
occ.append(transition[0:n_mo])# slices yield list, even if only one element
virt.append(transition[n_mo:-1])
v.append(transition[-1])# index yields float
return cls(occ, virt, v, factor)
def to_dataframe(self, thresh=0.05):
""" Converts the amplitude data to handy pandas.DataFrame object """
try:
import pandas as pd
except ImportError:
raise ImportError("Module 'pandas' needed for 'Amplitudes.to_dataframe()' ")
# TODO: improve this clunky part
max_exc = max(list(map(len,self.occ)))
occ, virt = [], []
for i in range(len(self.occ)):
occ.append(self.occ[i] + [0]*(max_exc - len(self.occ[i])))
virt.append(self.virt[i] + [0]*(max_exc - len(self.virt[i])))
idx_o = list(map(lambda x: "occ_"+str(x), [n for n in range(1,max_exc+1)]))
idx_v = list(map(lambda x: "virt_"+str(x), [n for n in range(1,max_exc+1)]))
df = pd.concat([pd.DataFrame(occ, columns=idx_o),
| pd.DataFrame(virt, columns=idx_v) | pandas.DataFrame |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearBegin(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((YearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),}))
tests.append((YearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),}))
tests.append((YearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1),}))
tests.append((YearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEndLagged(unittest.TestCase):
def test_bad_month_fail(self):
self.assertRaises(Exception, BYearEnd, month=13)
self.assertRaises(Exception, BYearEnd, month=0)
def test_offset(self):
tests = []
tests.append((BYearEnd(month=6),
{datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)},
))
tests.append((BYearEnd(n=-1, month=6),
{datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)},
))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
self.assertEqual(baseDate + dateOffset, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
self.assertEqual(offset.rollforward(date), datetime(2010, 6, 30))
self.assertEqual(offset.rollback(date), datetime(2009, 6, 30))
def test_onOffset(self):
tests = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BYearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29),}))
tests.append((BYearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29),}))
tests.append((BYearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),}))
tests.append((BYearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((YearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31),}))
tests.append((YearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),}))
tests.append((YearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31),}))
tests.append((YearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def testOnOffset():
tests = [#(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
#(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
#(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def assertEq(dateOffset, baseDate, expected):
actual = dateOffset + baseDate
assert actual == expected
def test_Hour():
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert (Hour(3) + Hour(2)) == Hour(5)
assert (Hour(3) - Hour(2)) == Hour()
def test_Minute():
assertEq(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assertEq(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assertEq(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert (Minute(3) + Minute(2)) == Minute(5)
assert (Minute(3) - Minute(2)) == Minute()
def test_Second():
assertEq(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assertEq(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2))
assertEq(-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assert (Second(3) + | Second(2) | pandas.core.datetools.Second |
import os
import sys
import argparse
import pandas as pd
from scipy import stats
sys.path.append(os.path.abspath(".."))
from survey._app import CODE_DIR, app
from core.models.metrics import gain_mean, rejection_ratio, gain
from utils import get_con_and_dfs, get_all_con_and_dfs
import metrics
STATS_FUNCTIONS = {}
# overwritten by a command line flag. If true, percentage will be generated instead of frequency
USE_PERCENTAGE = None
USE_LABELS = None
SELECTION = None
AI_FEEDBACK_ACCURACY_SCALAS = {
"ai_much_worse": "AI much worse than PROPOSER",
"ai_worse": "AI worse",
"ai_sligthly_worse": "AI slighly worse",
"ai_equal_to_proposer": "AI equal to PROPOSER",
"ai_slighly_better": "AI slighly better",
"ai_better": "AI better",
"ai_much_better": "AI much better than the PROPOSER",
}
AI_FEEDBACK_SCALAS = {
1: "strongly_disagree",
2: "disagree",
3: "slightly_disagree",
4: "neutral",
5: "slightly_agree",
6: "agree",
7: "strongly_agree"
}
def get_parser():
parser = argparse.ArgumentParser(description='Generate statistics for a given treatment')
parser.add_argument('--use-percentage', help='Generate percentages instead of frequencies', action='store_true')
parser.add_argument('--use-latex', help='Print results as latex table', action='store_true')
parser.add_argument('--use-labels', help='Print results using description labels', action='store_true')
parser.add_argument('--output-dir', help='Output directory where csv files were exported')
parser.add_argument('--selection', help='Whether to restrict the stats to responder or proposers', choices=['prop', 'resp'])
parser.add_argument('treatments', help='Comma separated treatments')
return parser
ALL_CONS, ALL_DFS = get_all_con_and_dfs()
def mark_for_stats(label=None):
def _mark_for_stats(function, label=label):
if label is None:
label = function.__name__[4:]
STATS_FUNCTIONS[label] = function
return function
return _mark_for_stats
def get_count_participants(treatment, con, dfs=None, use_percentage=None, use_labels=None):
sql = f"""
select * from result__{treatment}_survey
where worker_id not in (
select worker_id from main__txx where worker_code == 'dropped'
)
"""
if SELECTION == "resp":
sql = f"""
select worker_id from result__{treatment}_resp
"""
elif SELECTION == "prop":
sql = f"""
select worker_id from result__{treatment}_prop
"""
else:
sql = f"""
select * from (
select worker_id from result__{treatment}_resp
union
select worker_id from result__{treatment}_prop
)
"""
df = | pd.read_sql(sql, con) | pandas.read_sql |
import numpy as np
import pandas as pd
from scripts.utils import remove_outliers, linear_regression
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.preprocessing import normalize
#from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import torch
lan = "all"
if lan == "es":
language = "Spanish"
elif lan == "fr":
language = "French"
elif lan == "all":
language = "French & Spanish"
# Load time taken to translate and calculate sentence length
if lan == "all":
es = | pd.read_csv("data/un-timed-sentences/en-es.processed", sep='\t') | pandas.read_csv |
#!/usr/bin/env python
import datetime
import pandas as pd
from pathlib import Path
new_tutorial_name = {
'16S Microbial Analysis with Mothur': '16S Microbial Analysis with mothur (extended)',
'16S Microbial Analysis with mothur': '16S Microbial Analysis with mothur (extended)',
'Quality Control ': 'Quality Control',
'RNA-Seq reads to counts': '1: RNA-Seq reads to counts',
'RNA-seq counts to genes': '2: RNA-seq counts to genes',
'RNA-seq genes to pathways': '3: RNA-seq genes to pathways',
'Introduction to Genome Assembly': 'An Introduction to Genome Assembly',
'EWAS data analysis of 450k data': 'Infinium Human Methylation BeadChip',
'Creating a new tutorial - Defining the technical infrastructure': 'Tools, Data, and Workflows for tutorials',
'Creating a new tutorial - Writing content in Markdown': 'Creating content in Markdown',
'Running the Galaxy Training material website locally': 'Running the GTN website locally',
'Visualizations: JavaScript plugins': 'JavaScript plugins',
'Compute and analyze Essential Biodiversity Variables with PAMPA toolsuite': 'Compute and analyze biodiversity metrics with PAMPA toolsuite',
'Ephemeris for Galaxy Tool Management': 'Galaxy Tool Management with Ephemeris',
'Collections: Rule Based Uploader': 'Rule Based Uploader',
'Collections: Using dataset collection': 'Using dataset collections',
'Data: Downloading and Deleting Data in Galaxy': 'Downloading and Deleting Data in Galaxy',
'Histories: Understanding Galaxy history system': 'Understanding Galaxy history system',
'Jupyter: Use Jupyter notebooks in Galaxy': 'Use Jupyter notebooks in Galaxy',
'Using dataset collection': 'Using dataset collections',
'Workflows: Extracting Workflows from Histories': 'Extracting Workflows from Histories',
'Workflows: Using Workflow Parameters': 'Using Workflow Parameters',
'Exome sequencing data analysis': 'Exome sequencing data analysis for diagnosing a genetic disease',
'Galaxy Tool Management': 'Galaxy Tool Management with Ephemeris',
'Virtual screening of the SARS-CoV-2 main protease with rDock and pose scoring': 'Virtual screening of the SARS-CoV-2 main protease with rxDock and pose scoring'
}
new_topic_for_tuto = {
'Formation of the Super-Structures on the Inactive X': 'Epigenetics',
'Identification of the binding sites of the Estrogen receptor': 'Epigenetics',
'Identification of the binding sites of the T-cell acute lymphocytic leukemia protein 1 (TAL1)': 'Epigenetics',
'RAD-Seq Reference-based data analysis': 'Ecology',
'RAD-Seq de-novo data analysis': 'Ecology',
'RAD-Seq to construct genetic maps': 'Ecology'
}
new_topics = {
'User Interface and Features': 'Using Galaxy and Managing your Data',
'Data Manipulation': 'Using Galaxy and Managing your Data',
'User Interface and Data Manipulation': 'Using Galaxy and Managing your Data',
'Assembly) is not working I can do up to multiQC and after unicycler not working': 'Assembly'
}
acceptable_topics = [
"Assembly",
"Climate",
"Computational chemistry",
"Contributing to the Galaxy Training Material",
"Development in Galaxy",
"Ecology",
"Epigenetics",
"Galaxy Server administration",
"Genome Annotation",
"Imaging",
"Introduction to Galaxy Analyses",
"Metabolomics",
"Metagenomics",
"Proteomics",
"Sequence analysis",
"Statistics and machine learning",
"Teaching and Hosting Galaxy training",
"Transcriptomics",
"Using Galaxy and Managing your Data",
"Variant Analysis",
"Visualisation"
]
def extract_tutorial_feedbacks(topic_df, topic_name):
'''Extract pro/con per tutorial for a topic and
write them in a file
:topic_df: dataframe object for the topic
:topic_name: name for the topic, name for the file
'''
grouped_by_tuto = topic_df.groupby(by="tutorial")
with open('../results/%s.md' % topic_name, 'w') as f:
for tuto, group in grouped_by_tuto:
# get groups
tuto_df = grouped_by_tuto.get_group(tuto)
pros = []
cons = []
# get pros/cons
for index, row in tuto_df.iterrows():
if row['pro'] != 'nan':
pros.append("%s (*%s*)" % (row['pro'], row['timestamp']))
if row['con'] != 'nan':
cons.append("%s (*%s*)" % (row['con'], row['timestamp']))
# write in report file
f.write("- **%s**\n" % tuto)
if len(pros) > 0:
f.write(" - Pro:\n - ")
f.write("\n - ".join(pros))
if len(cons) > 0:
f.write("\n - Con:\n - ")
f.write("\n - ".join(cons))
f.write("\n\n")
def fix_tutorial_info(df):
'''Change tutorial topic or title
:param df: dataframe
'''
df = df.copy()
# change topic for some tutorials
for tuto in new_topic_for_tuto:
df.loc[df.tutorial == tuto, 'topic'] = new_topic_for_tuto[tuto]
# rename topic for all tutorials in a topic
for topic in new_topics:
df.topic = (df
.topic
.replace(to_replace=topic, value=new_topics[topic]))
# rename some tutorials
for tuto in new_tutorial_name:
df.loc[df.tutorial == tuto, 'tutorial'] = new_tutorial_name[tuto]
return df
def extract_topic_tutorial_name(df):
'''Extract topic from tutorial name
:param df: dataframe
'''
df = df.copy()
new = df['tutorial_topic'].str[::-1].str.split('(', n = 1, expand = True)
df["tutorial"]= new[1].str[::-1].str[:-1]
df["topic"]= new[0].str[::-1].str[:-1]
return df
def prepare_feedback(url, out_file):
'''Get and prepare feedback CSV file
:param url: URL to Google sheet with feedback answers
:param out_file: Path to output file
'''
df = (pd.read_csv(url, sep='\t')
# rename column
.rename(columns = {'Timestamp': 'timestamp',
'How much did you like this tutorial?': 'note',
'What did you like?': 'pro',
'What could be improved?': 'con',
'Tutorial': 'tutorial_topic',
'Your feedback is always anonymous. Also make it confidential (only visible to admins)?': 'anonymous'})
# extract topic from tutorial name
.pipe(extract_topic_tutorial_name)
# remove rows with NaN on note, pro and con
.dropna(subset=['note', 'pro', 'con'], how='all')
# replace NaN in note by 0
.fillna(value={'note': 0})
# fill other NaN by empty string
.fillna('')
# format
.assign(
#note to integer
note=lambda x: x['note'].astype(int),
# format pro and con to string
pro=lambda x: x['pro'].astype(str),
con=lambda x: x['con'].astype(str),
# extract month and date
timestamp=lambda x: | pd.to_datetime(x['timestamp'], dayfirst=True) | pandas.to_datetime |
import pandas as pd
import pytest
import dask.dataframe as dd
pytestmark = pytest.mark.skipif(
not dd._compat.PANDAS_GT_100, reason="BooleanArray added in 1.0.0"
)
def test_meta():
values = pd.array([True, False, None], dtype="boolean")
ds = dd.from_pandas(pd.Series(values), 2)
assert ds.dtype == | pd.BooleanDtype() | pandas.BooleanDtype |
"""Autograding process internals for Otter-Grader"""
import os
import json
import pandas as pd
import pickle
import zipfile
from glob import glob
from .runners import create_runner
from .utils import OtterRuntimeError
from ...version import LOGO_WITH_VERSION
from ...utils import chdir
def main(autograder_dir, **kwargs):
"""
Run the autograding process.
Args:
autograder_dir (``str``): the absolute path of the directory in which autograding is occurring
(e.g. on Gradescope, this is ``/autograder``)
**kwargs: keyword arguments for updating configurations in the default configurations
``otter.run.run_autograder.constants.DEFAULT_OPTIONS``; these values override anything
present in ``otter_config.json``
"""
config_fp = os.path.join(autograder_dir, "source", "otter_config.json")
if os.path.isfile(config_fp):
with open(config_fp, encoding="utf-8") as f:
config = json.load(f)
else:
config = {}
config["autograder_dir"] = autograder_dir
runner = create_runner(config, **kwargs)
if runner.get_option("logo"):
# ASCII 8207 is an invisible non-whitespace character; this should prevent gradescope from
# incorrectly left-stripping the whitespace at the beginning of the logo
print(f"{chr(8207)}\n", LOGO_WITH_VERSION, "\n", sep="")
abs_ag_path = os.path.abspath(runner.get_option("autograder_dir"))
with chdir(abs_ag_path):
try:
if runner.get_option("zips"):
with chdir("./submission"):
zips = glob("*.zip")
if len(zips) > 1:
raise OtterRuntimeError("More than one zip file found in submission and 'zips' config is true")
with zipfile.ZipFile(zips[0]) as zf:
zf.extractall()
runner.prepare_files()
scores = runner.run()
with open("results/results.pkl", "wb+") as f:
pickle.dump(scores, f)
output = scores.to_gradescope_dict(runner.get_options())
except OtterRuntimeError as e:
output = {
"score": 0,
"stdout_visibility": "hidden",
"tests": [
{
"name": "Autograder Error",
"output": f"Otter encountered an error when grading this submission:\n\n{e}",
},
],
}
raise e
finally:
if "output" in vars():
with open("./results/results.json", "w+") as f:
json.dump(output, f, indent=4)
print("\n\n", end="")
df = | pd.DataFrame(output["tests"]) | pandas.DataFrame |
"""This module implements two solution for artificial dataset generation. Both
of them use multiple datasets of reference and a given mixing coefficient profile
to generate artificially contaminated dataset by spiking the sources together.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from itertools import count, cycle
from pathlib import Path
from FlowCytometryTools import PolyGate, FCMeasurement
from bactoml.fcdataset import FCDataSet
from bactoml.df_pipeline import DFLambdaFunction
from bactoml.decision_tree_classifier import HistogramTransform
from bactoml.fcdataset import MissingPathError
class SpikingPoissonModel():
"""
Generate artificial dataset by spiking multiple datasets together.
The sum of independent Poisson variables with rate l1 and l2
follows a Poisson law with rate l1 + l2.
A random Poisson variable following a Poisson law with rate l
over an inteval T can be rescaled to an interval T' by adjusting
the rate to l * T' / T.
Here we assume that the distribution of event over the bins of a
same histogram are independent (*). We also assume that the datasets
used as reference are independent.
The number of events on a histogram bin for a mixture of water A and B
such that V = V_A + V_B = 0.9 mL, the standard volume of a measurement,
follows a Poisson law of rate V_A/V l_A + V_B/V l_B.
(*) we loose whole histogram pattern variations seen in some periodic sources.
"""
def __init__(self, datasets, histogram_pipeline, random_state=None):
"""
Parameters:
-----------
datasets : list of strings,
Path of the directories containing all
the FCS files used as reference.
histogram_pipeline : sklearn Pipeline.
Pipeline applying preprocessing and
histogram transform to a FCDataset.
The histogram transform step should
be called 'histogram' in the sklearn
Pipeline.
random_state : int or None,
Seed for the random number generator.
Can also be interpreted as the ID of the
artificial dataset generated.
"""
#initialize the random number generator.
if random_state:
self.random_state = random_state
self.random = np.random.RandomState(seed=self.random_state)
else:
self.random_state = None
self.random = np.random.RandomState()
#histogram dimensions:
try:
hist_step = histogram_pipeline.named_steps['histogram'].edges
self.dct_dimensions = {channel : len(edges)-1 for channel, edges in hist_step.items()}
except KeyError:
print('The preprocessing pipeline must implement an HistogramTransform step named "histogram".')
#estimate Poisson distribution parameter for all the histogram bins
"""The Poisson rate is given by the mean count per bin scaled to the same
volume for each FCS file (here 0.9 mL). The scaling step is already
implemented in the HistogramTransform.
"""
self.poisson_lambdas = []
for data in datasets:
lambdas = np.zeros(np.prod(list(self.dct_dimensions.values())))
for n, fcs in enumerate(FCDataSet(data)):
h = histogram_pipeline.fit_transform(fcs)
lambdas += (np.round(h['counts'].values) - lambdas) / (n + 1)
self.poisson_lambdas.append(lambdas)
def spike_single_concentration(self, mixing_coeff):
"""Given mixing coefficients, generate artificial histograms for a
mix of different sources.
Parameters:
-----------
mixing_coeff : 1D array containing the mixing coefficients.
Returns:
--------
(i, sample) : generator returning artificial samples and their
index i.
"""
#mixing coefficients shouls sum to 1
mixing_coeff = np.array(mixing_coeff) / np.sum(mixing_coeff)
mix_lambdas = np.zeros(np.prod(list(self.dct_dimensions.values())))
for coeff, lambdas in zip(mixing_coeff, self.poisson_lambdas):
mix_lambdas += lambdas * coeff
#sample the Poisson distribution of the mixed samples
for i in count():
sample = np.array([self.random.poisson(lambdas) for lambdas in mix_lambdas]).reshape(list(self.dct_dimensions.values()))
yield (i, sample)
def spike_single_profile(self, profile):
"""Given a sequence of mixing coefficients, generate a sequence of
artificial histograms for mixes of sources with different composition.
Parameters:
-----------
profile : 2D array, (N_steps, N_sources).
For each step of the profile contains the mixing coefficient.
Returns:
--------
(i, sample) : generator returning artificial samples and their index i.
"""
for i, step in enumerate(profile):
mixing_coeff = np.array(step) / np.sum(step)
mix_lambdas = np.zeros(np.prod(list(self.dct_dimensions.values())))
for coeff, lambdas in zip(mixing_coeff, self.poisson_lambdas):
mix_lambdas += lambdas * coeff
sample = np.array([self.random.poisson(lambdas) for lambdas in mix_lambdas]).reshape(list(self.dct_dimensions.values()))
yield (i, sample)
def spike_periodic_profile(self, profile):
"""Given a squence of mixing coefficients, generate a periodic infinite
sequence of artificial historgrams for mixes of sources with different
compositions.
Parameters:
-----------
profile : 2D array, (N_steps, N_sources).
For each step of the profile contains the mixing coefficient.
Returns:
--------
(i, sample) : generator returning artificial samples and their index i.
"""
for i, step in enumerate(cycle(profile)):
mixing_coeff = np.array(step) / np.sum(step)
mix_lambdas = np.zeros(np.prod(list(self.dct_dimensions.values())))
for coeff, lambdas in zip(mixing_coeff, self.poisson_lambdas):
mix_lambdas += lambdas * coeff
sample = np.array([self.random.poisson(lambdas) for lambdas in mix_lambdas]).reshape(list(self.dct_dimensions.values()))
yield (i, sample)
class SpikingEventModel():
"""Generate artificial dataset by spiking multiple datasets together.
Select a random number of FCS files in the dataset and for each
select a subset of events. The number of events selected for each
dataset is deteminded by the mixing coefficients.
"""
def __init__(self, directories, columns, random_state=None):
"""
Parameters:
-----------
directories : list of strings,
Path of the directories containing all
the FCS files used as reference.
columns : list of strings,
List of the FCS column names to use when
building the artificial datasets,
e.g. ['SSC', 'FL1', 'FL2']
random_state : int or None,
Seed for the random number generator.
Can also be interpreted as the ID of the
artificial dataset generated.
"""
#initialize the random number generator.
if random_state:
self.random_state = random_state
self.random = np.random.RandomState(seed=self.random_state)
else:
self.random_state = None
self.random = np.random.RandomState()
self.dir_paths = [] #1D list containing the path of the directories containing the FCS files
self.fcs_paths = [] #2D list containing the FCS file paths
for dir_path in directories:
if Path(dir_path).exists():
path = Path(dir_path).resolve()
self.dir_paths.append(path)
self.fcs_paths.append(list(path.glob('**/*.fcs')))
else:
raise MissingPathError(dir_path)
self.columns = columns
def spike_single_concentration(self, mixing_coeff):
"""Given mixing coefficients, generate artificial histograms for a
mix of different sources.
Parameters:
-----------
mixing_coeff : 1D array containing the mixing coefficients.
Returns:
--------
(i, sample) : generator returning artificial samples and their
index i.
"""
#mixing coefficients should sum to 1
mixing_coeff = np.array(mixing_coeff) / np.sum(mixing_coeff)
for i in count():
sample = pd.DataFrame()
for paths, coeff in zip(self.fcs_paths, mixing_coeff):
#number of FCS files to sample from
n_fcs = self.random.randint(0, len(paths))
#fraction of each FCS file event to subsample
fraction = self.random.rand(n_fcs)
fraction *= coeff / np.sum(fraction)
#FCS file indices
indices = self.random.randint(0, len(paths), n_fcs)
#build the artificial FCS file by sampling the reference datasets
for idx, f in zip(indices, fraction):
data = FCMeasurement(ID='', datafile=paths[idx])
sample = sample.append(data.get_data().sample(frac=f, random_state=self.random)[self.columns], ignore_index=True)
yield (i, sample)
def spike_single_profile(self, profile):
"""Given mixing coefficients, generate artificial histograms for a
mix of different sources.
Parameters:
-----------
profile : 2D array, (N_steps, N_sources).
For each step of the profile contains the mixing coefficient.
Returns:
--------
(i, sample) : generator returning artificial samples and their
index i.
"""
for i, step in enumerate(profile):
#mixing coefficients should sum to 1
mixing_coeff = np.array(step) / np.sum(step)
sample = pd.DataFrame()
for paths, coeff in zip(self.fcs_paths, mixing_coeff):
#number of FCS files to sample from
n_fcs = self.random.randint(0, len(paths))
#fraction of each FCS file event to subsample
fraction = self.random.rand(n_fcs)
fraction *= coeff / np.sum(fraction)
#FCS file indices
indices = self.random.randint(0, len(paths), n_fcs)
#build the artificial FCS file by sampling the reference datasets
for idx, f in zip(indices, fraction):
data = FCMeasurement(ID='', datafile=paths[idx])
sample = sample.append(data.get_data().sample(frac=f, random_state=self.random)[self.columns], ignore_index=True)
yield (i, sample)
def spike_periodic_profile(self, profile):
"""Given a squence of mixing coefficients, generate a periodic infinite
sequence of artificial historgrams for mixes of sources with different
compositions.
Parameters:
-----------
profile : 2D array, (N_steps, N_sources).
For each step of the profile contains the mixing coefficient.
Returns:
--------
(i, sample) : generator returning artificial samples and their index i.
"""
for i, step in enumerate(cycle(profile)):
#mixing coefficients should sum to 1
mixing_coeff = np.array(step) / np.sum(step)
sample = | pd.DataFrame() | pandas.DataFrame |
import os
import gc
import re
import json
import random
import numpy as np
import pandas as pd
import scipy.io as sio
from tqdm import tqdm
import matplotlib.pyplot as plt
from daisy.utils.data import incorporate_in_ml100k
from scipy.sparse import csr_matrix
from collections import defaultdict
from IPython import embed
def convert_unique_idx(df, col):
column_dict = {x: i for i, x in enumerate(df[col].unique())}
df[col] = df[col].apply(column_dict.get)
assert df[col].min() == 0
assert df[col].max() == len(column_dict) - 1
return df
def cut_down_data_half(df):
cut_df = pd.DataFrame([])
for u in np.unique(df.user):
aux = df[df['user'] == u].copy()
cut_df = cut_df.append(df.sample(int(len(aux) / 2)))
return cut_df
def filter_users_and_items(df, num_users=None, freq_items=None, top_items=None, keys=['user', 'item']):
'''
Reduces the dataframe to a number of users = num_users and it filters the items by frequency
'''
if num_users is not None:
# df = df[df['user_id'].isin(np.unique(df.user_id)[:num_users])]
df = df[df[keys[0]].isin(np.unique(df[keys[0]])[:num_users])]
# Get top5k books
if top_items is not None:
top5k_books = df[keys[1]].value_counts()[:top_items].index
df = df[df[keys[1]].isin(top5k_books)]
if freq_items is not None:
frequent_items = df['item'].value_counts()[df['item'].value_counts() > freq_items].index
df = df[df[keys[1]].isin(frequent_items)]
return df
def run_statistics(df, src):
path = f'histograms/{src}'
bins = 30
os.makedirs(path, exist_ok=True)
f = open(os.path.join(path, "information.txt"), "w+")
f.write("Information:\n")
f.write("==========================\n")
f.write(f"Interactions: {len(df)}\n")
f.write(f"#users = {df['user'].nunique()}\n")
f.write(f"#items = {df['item'].nunique()}\n")
f.close()
for key in ['user', 'item']:
# OPCIÓ A: HISTOGRAMA
a = pd.DataFrame(df.groupby([key])[key].count())
a.columns = ['value_counts']
a.reset_index(level=[0], inplace=True)
dims = (15, 5)
fig, ax = plt.subplots(figsize=dims)
a["value_counts"].hist(bins=200)
# fig.savefig('hist.jpg')
fig.savefig(os.path.join(path, f'{src}_histogram_{key}_bins={bins}.png'))
fig.clf()
# OPCIÓ : BARPLOT
# a = pd.DataFrame(df_year.groupby(['user'])['user'].count())
# a.columns = ['value_counts']
# a.reset_index(level=[0], inplace=True)
# dims = (15, 5)
# fig, ax = plt.subplots(figsize=dims)
# sns.set_style("darkgrid")
# sns.barplot(ax=ax, x="user", y="value_counts", data=a, palette="Blues_d")
# ax.set(xlabel="User", ylabel="Value Counts")
# plt.xticks(rotation=45)
# plt.show()
# fig.savefig('data.jpg')
def load_rate(src='ml-100k', prepro='origin', binary=True, pos_threshold=None, level='ui', context=False,
gce_flag=False, cut_down_data=False, side_info=False, context_type='', context_as_userfeat=False,
flag_run_statistics=False, remove_top_users=0, remove_on='item'):
"""
Method of loading certain raw data
Parameters
----------
src : str, the name of dataset
prepro : str, way to pre-process raw data input, expect 'origin', f'{N}core', f'{N}filter', N is integer value
binary : boolean, whether to transform rating to binary label as CTR or not as Regression
pos_threshold : float, if not None, treat rating larger than this threshold as positive sample
level : str, which level to do with f'{N}core' or f'{N}filter' operation (it only works when prepro contains 'core' or 'filter')
Returns
-------
df : pd.DataFrame, rating information with columns: user, item, rating, (options: timestamp)
user_num : int, the number of users
item_num : int, the number of items
"""
df = pd.DataFrame()
# import mat73
# a = mat73.loadmat('data/gen-disease/genes_phenes.mat')
# which dataset will use
if src == 'ml-100k':
df = pd.read_csv(f'./data/{src}/u.data', sep='\t', header=None,
names=['user', 'item', 'rating', 'timestamp'], engine='python')
if cut_down_data:
df = cut_down_data_half(df) # from 100k to 49.760 interactions
elif src == 'drugs':
union = False
if union == True:
df = | pd.read_csv(f'./data/{src}/train_data_contextUNION_sideeffect.csv', engine='python', index_col=0) | pandas.read_csv |
# generate stats for each processing steps
import pandas as pd
from collections import Counter
from functools import reduce
import gzip
import re
import os, sys
def count_reads(in_fq, n=4):
# support either text or gzip file
try:
f = gzip.open(in_fq, "rt")
except:
f = open(in_fq, "rt")
# count reads
count = 0
# fastq line
fastq_n = 0
for line in f:
fastq_n += 1
if fastq_n == n:
count += 1
fastq_n = 0
# close file
f.close()
return count
def reads_length(in_fq, min_read_len, max_read_len, seq_n=2, n=4):
# support either text or gzip file
try:
f = gzip.open(in_fq, "rt")
except:
f = open(in_fq, "rt")
# read lengths
reads_len = []
# fastq line
fastq_n = 0
for line in f:
fastq_n += 1
if fastq_n == seq_n: # sequence line
read_len = len(line.rstrip())
if fastq_n == n:
reads_len.append(read_len)
fastq_n = 0
# read length count
reads_len_count = []
counter = dict(Counter(reads_len))
for i in range(min_read_len, max_read_len + 1):
if i in counter:
reads_len_count.append([i, counter[i]])
else:
reads_len_count.append([i, 0])
f.close()
return reads_len_count
def reads_length_null(min_read_len, max_read_len):
reads_len_count = []
for i in range(min_read_len, max_read_len + 1):
reads_len_count.append([i, None])
return reads_len_count
# simple parser for single-end star log file
def star_se_parser(star_log):
input_reads = -1
unique_reads = -1
multi_reads = -1
with open(star_log, "rt") as f:
for line in f:
if "Number of input reads" in line: # input reads
m = re.search("^Number of input reads \|(.*)$", line.strip())
if m:
input_reads = int(m.group(1).strip())
if "Uniquely mapped reads number" in line: # unique reads
m = re.search("^Uniquely mapped reads number \|(.*)$", line.strip())
if m:
unique_reads = int(m.group(1).strip())
if "Number of reads mapped to multiple loci" in line: # multi reads
m = re.search("^Number of reads mapped to multiple loci \|(.*)$", line.strip())
if m:
multi_reads = int(m.group(1).strip())
if any(count == -1 for count in [input_reads, unique_reads, multi_reads]):
return [None, None, None]
else:
return [input_reads, unique_reads, multi_reads]
if __name__ == "__main__":
# sample sheet
samples = | pd.read_table(snakemake.config["samples"]) | pandas.read_table |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pydub
import librosa
import multiprocessing
from joblib import Parallel, delayed
DATA_ROOT = '../data'
CPU_COUNT = multiprocessing.cpu_count()
def load_audio(path, duration):
# duration (ms)
audio = pydub.AudioSegment.silent(duration=duration)
try:
audio = audio.overlay(pydub.AudioSegment.from_file(path).set_frame_rate(22050).set_channels(1))[:duration]
except:
return None
# -32768 - 32767
raw = np.fromstring(audio._data, dtype='int16')
# -1 - +1
raw = (raw + 0.5) / (32767 + 0.5)
return raw
def load_urbansound():
"""Load raw audio and metadata from the UrbanSound8K dataset."""
if os.path.isfile(os.path.join(DATA_ROOT, 'urban_meta.pkl')) and os.path.isfile(os.path.join(DATA_ROOT, 'urban_audio.npy')):
rows_meta = pd.read_pickle(os.path.join(DATA_ROOT, 'urban_meta.pkl'))
rows_audio = np.load(os.path.join(DATA_ROOT, 'urban_audio.npy'))
return rows_meta, rows_audio
metadata = pd.read_csv(os.path.join(DATA_ROOT, 'UrbanSound8K', 'metadata', 'UrbanSound8K.csv'))
b = 0
batch_size = 1000
rows_meta = []
rows_audio = []
while len(metadata[b * batch_size:(b + 1) * batch_size]):
for key, row in metadata[b * batch_size:(b + 1) * batch_size].iterrows():
filename = row['slice_file_name']
fold = row['fold']
category = row['classID']
category_name = row['class']
rows_meta.append(pd.DataFrame({'filename':filename,
'fold':fold,
'category':category,
'category_name':category_name}, index=[0]))
audio_path = os.path.join(DATA_ROOT, 'UrbanSound8K', 'audio', 'fold%d' % fold, filename)
audio = load_audio(audio_path, 4000)
if audio is not None:
rows_audio.append(load_audio(audio_path, 4000))
b = b + 1
# この2行必要?最後に1回だけやればよいのでは?
rows_meta = [ | pd.concat(rows_meta, ignore_index=True) | pandas.concat |
# <NAME>, 16 different variants
# 0: None
# Ligands, 4 different variations
# 0: XPhos
# 1: t-BuXPhos
# 2: t-BuBrettPhos
# 3: AdBrettPhosV
# Bases, 3 different variations
# 0: P2Et
# 1: BTMG
# 2: MTBD
# Additives, 24 different variations
# 0: None
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
#####################################################
############### REACTION VARIABLES ##################
#####################################################
additivePlate1 = np.array(
[
0,
2,
4,
6,
0,
2,
4,
6,
0,
2,
4,
6,
0,
2,
4,
6,
1,
3,
5,
7,
1,
3,
5,
7,
1,
3,
5,
7,
1,
3,
5,
7,
]
)
additivePlate2 = np.array(
[
8,
10,
12,
14,
8,
10,
12,
14,
8,
10,
12,
14,
8,
10,
12,
14,
9,
11,
13,
15,
9,
11,
13,
15,
9,
11,
13,
15,
9,
11,
13,
15,
]
)
additivePlate3 = np.array(
[
23,
17,
19,
21,
23,
17,
19,
21,
23,
17,
19,
21,
23,
17,
19,
21,
16,
18,
20,
22,
16,
18,
20,
22,
16,
18,
20,
22,
16,
18,
20,
22,
]
)
ligand = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
2,
2,
2,
2,
3,
3,
3,
3,
0,
0,
0,
0,
1,
1,
1,
1,
2,
2,
2,
2,
3,
3,
3,
3,
]
)
base = np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
arylHalide = np.array(
[
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
]
)
###############################################
############### CREATE DATA ###################
###############################################
df_plate = | pd.read_csv("./data_table.csv") | pandas.read_csv |
import os
import pandas as pd
from unittest import TestCase, main
from metapool.metapool import parse_sample_sheet
from metapool.prep import (preparations_for_run, remove_qiita_id,
get_run_prefix, is_nonempty_gz_file,
get_machine_code, get_model_and_center,
sample_sheet_to_dataframe, parse_illumina_run_id,
_check_invalid_names, agp_transform)
class Tests(TestCase):
def setUp(self):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.good_run = os.path.join(data_dir, 'runs',
'191103_D32611_0365_G00DHB5YXX')
self.good_run_new_version = os.path.join(
data_dir, 'runs', '191104_D32611_0365_G00DHB5YXZ')
self.OKish_run_new_version = os.path.join(
data_dir, 'runs', '191104_D32611_0365_OK15HB5YXZ')
self.ss = os.path.join(self.good_run, 'sample-sheet.csv')
def _check_run_191103_D32611_0365_G00DHB5YXX(self, obs):
"Convenience method to check the output of a whole run"
exp = {'191103_D32611_0365_G00DHB5YXX.Baz.1',
'191103_D32611_0365_G00DHB5YXX.Baz.3',
'191103_D32611_0365_G00DHB5YXX.FooBar_666.3'}
self.assertEqual(set(obs.keys()), exp)
columns = ['sample_name', 'experiment_design_description',
'library_construction_protocol', 'platform', 'run_center',
'run_date', 'run_prefix', 'sequencing_meth', 'center_name',
'center_project_name', 'instrument_model', 'runid',
'sample_plate', 'sample_well', 'i7_index_id', 'index',
'i5_index_id', 'index2', 'lane', 'sample_project',
'well_description']
data = [['importantsample1', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample1_S11_L003', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A3',
'iTru7_107_09', 'GCCTTGTT', 'iTru5_01_A', 'AACACCAC', '3',
'Baz', 'FooBar_666_p1.sample1.A3'],
['importantsample44', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample44_S14_L003', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'Baz_p3', 'B99',
'iTru7_107_14', 'GTCCTAAG', 'iTru5_01_A', 'CATCTGCT', '3',
'Baz', 'Baz_p3.sample44.B99']]
exp = pd.DataFrame(data=data, columns=columns)
obs_df = obs['191103_D32611_0365_G00DHB5YXX.Baz.3']
# make sure the columns are in the same order before comparing
obs_df = obs_df[exp.columns].copy()
pd.testing.assert_frame_equal(obs_df, exp)
data = [['importantsample1', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample1_S11_L001', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A1',
'iTru7_107_07', 'CCGACTAT', 'iTru5_01_A', 'ACCGACAA', '1',
'Baz', 'FooBar_666_p1.sample1.A1'],
['importantsample2', 'EXPERIMENT_DESC',
'LIBRARY_PROTOCOL', 'Illumina', 'UCSDMI', '2019-11-03',
'sample2_S10_L001', 'sequencing by synthesis', 'CENTER_NAME',
'Baz', 'Illumina HiSeq 2500',
'191103_D32611_0365_G00DHB5YXX', 'FooBar_666_p1', 'A2',
'iTru7_107_08', 'CCGACTAT', 'iTru5_01_A', 'CTTCGCAA', '1',
'Baz', 'FooBar_666_p1.sample2.A2']]
exp = | pd.DataFrame(columns=columns, data=data) | pandas.DataFrame |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import os
import altair as alt
import statsmodels.api as sm
from scipy import stats
from sklearn.metrics import make_scorer, mean_squared_error, r2_score, mean_absolute_error, explained_variance_score, roc_auc_score, max_error, log_loss, average_precision_score, precision_recall_curve, auc, roc_curve, confusion_matrix, recall_score, precision_score, f1_score, accuracy_score, balanced_accuracy_score, cohen_kappa_score
from sklearn.model_selection import train_test_split
import scipy
import sys
import platform
import base64
from io import BytesIO
from linearmodels import PanelOLS
from linearmodels import RandomEffects
from linearmodels import PooledOLS
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
st.legacy_caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
# workaround for Firefox bug- hide the scrollbar while keeping the scrolling functionality
st.markdown("""
<style>
.ReactVirtualized__Grid::-webkit-scrollbar {
display: none;
}
.ReactVirtualized__Grid {
-ms-overflow-style: none; /* IE and Edge */
scrollbar-width: none; /* Firefox */
}
</style>
""", unsafe_allow_html=True)
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
#Session state
if 'key' not in st.session_state:
st.session_state['key'] = 0
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
st.session_state['key'] = st.session_state['key'] + 1
st.sidebar.markdown("")
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"], key = st.session_state['key'])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.expander('Upload settings')
with separator_expander:
a4,a5=st.columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = st.session_state['key'])
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = st.session_state['key'])
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = st.session_state['key'])
a4,a5=st.columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = st.session_state['key'])
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = st.session_state['key'])
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = st.session_state['key'])
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = st.session_state['key'])
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
else:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=int(st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4, key = st.session_state['key']))
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False, key = st.session_state['key'])
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False, key = st.session_state['key'])
sett_theme = st.selectbox('Theme', ["Light", "Dark"], key = st.session_state['key'])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
st.header("**Panel data**")
st.markdown("Get your data ready for powerfull methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
# Check if enough data is available
if n_cols >= 2 and n_rows > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
# Specify entity and time
st.markdown("**Panel data specification**")
col1, col2 = st.columns(2)
with col1:
entity_na_warn = False
entity_options = df.columns
entity = st.selectbox("Select variable for entity", entity_options, key = st.session_state['key'])
with col2:
time_na_warn = False
time_options = df.columns
time_options = list(time_options[time_options.isin(df.drop(entity, axis = 1).columns)])
time = st.selectbox("Select variable for time", time_options, key = st.session_state['key'])
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "ERROR: The variable selected for entity has NAs!"
st.error(entity_na_warn)
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "ERROR: The variable selected for time has NAs!"
st.error(time_na_warn)
if df[time].dtypes != "float64" and df[time].dtypes != "float32" and df[time].dtypes != "int64" and df[time].dtypes != "int32":
time_na_warn = "ERROR: Time variable must be numeric!"
st.error(time_na_warn)
run_models = False
if time_na_warn == False and entity_na_warn == False:
data_empty_container = st.container()
with data_empty_container:
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
# Make sure time is numeric
df[time] = pd.to_numeric(df[time])
data_exploration_container2 = st.container()
with data_exploration_container2:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.expander("Explore raw panel data info and stats", expanded = False)
st.empty()
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if st.checkbox("Show data description", value = False, key = st.session_state['key']):
st.markdown("**Data source:**")
st.markdown("This is the original 11-firm data set from Grunfeld’s Ph.D. thesis (*Grunfeld, 1958, The Determinants of Corporate Investment, Department of Economics, University of Chicago*). For more details see online complements for the article [The Grunfeld Data at 50] (https://www.zeileis.org/grunfeld/).")
st.markdown("**Citation:**")
st.markdown("<NAME>, <NAME> (2010). “The Grunfeld Data at 50,” German Economic Review, 11(4), 404-417. [doi:10.1111/j.1468-0475.2010.00513.x] (https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1468-0475.2010.00513.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.columns(2)
col1.write("invest")
col2.write("Gross investment, defined as additions to plant and equipment plus maintenance and repairs in millions of dollars deflated by the implicit price deflator of producers’ durable equipment (base 1947)")
col1,col2=st.columns(2)
col1.write("value")
col2.write("Market value of the firm, defined as the price of common shares at December 31 (or, for WH, IBM and CH, the average price of December 31 and January 31 of the following year) times the number of common shares outstanding plus price of preferred shares at December 31 (or average price of December 31 and January 31 of the following year) times number of preferred shares plus total book value of debt at December 31 in millions of dollars deflated by the implicit GNP price deflator (base 1947)")
col1,col2=st.columns(2)
col1.write("capital")
col2.write("Stock of plant and equipment, defined as the accumulated sum of net additions to plant and equipment deflated by the implicit price deflator for producers’ durable equipment (base 1947) minus depreciation allowance deflated by depreciation expense deflator (10 years moving average of wholesale price index of metals and metal products, base1947)")
col1,col2=st.columns(2)
col1.write("firm")
col2.write("General Motors (GM), US Steel (US), General Electric (GE), Chrysler (CH), Atlantic Refining (AR), IBM, Union Oil (UO), Westinghouse (WH), Goodyear (GY), Diamond Match (DM), American Steel (AS)")
col1,col2=st.columns(2)
col1.write("year")
col2.write("Year ranging from 1935 to 1954")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data", value = False, key = st.session_state['key']):
st.write(df)
#st.info("Data shape: "+ str(n_rows) + " rows and " + str(n_cols) + " columns")
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info", value = False, key = st.session_state['key'])
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info', value = False, key = st.session_state['key']):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data)', value = False, key = st.session_state['key']):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
dev_expander_anovPre = st.expander("ANOVA for raw panel data", expanded = False)
with dev_expander_anovPre:
if df.shape[1] > 2:
# Target variable
target_var = st.selectbox('Select target variable ', df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if df[target_var].dtypes == "int64" or df[target_var].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var, axis = 1).columns)]
clas_var = st.selectbox('Select classifier variable ', [entity, time], key = st.session_state['key'])
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by entity
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var] = df[target_var]
grouped_boxchart_ent = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size1, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(entity, scale = alt.Scale(zero = False)),
y = alt.Y(target_var, scale = alt.Scale(zero = False)),
tooltip = [target_var, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_ent, use_container_width=True)
with col10:
st.write("Boxplot grouped by time:")
box_size2 = st.slider("Select box size ", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by time
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var] = df[target_var]
grouped_boxchart_time = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size2, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(time, scale = alt.Scale(domain = [min(df[time]), max(df[time])])),
y = alt.Y(target_var, scale = alt.Scale(zero = False)),
tooltip = [target_var, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_time, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_boxplot")))
st.write("")
# Count for entity and time
col5, col6 = st.columns(2)
with col5:
st.write("Number of observations per entity:")
counts_ent = pd.DataFrame(df_grouped_ent.count()[target_var])
counts_ent.columns = ["count"]
st.write(counts_ent.transpose())
with col6:
st.write("Number of observations per time:")
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write(counts_time.transpose())
if sett_hints:
st.info(str(fc.learning_hints("de_anova_count")))
st.write("")
# ANOVA calculation
df_grouped = df[[target_var,clas_var]].groupby(clas_var)
overall_mean = (df_grouped.mean()*df_grouped.count()).sum()/df_grouped.count().sum()
dof_between = len(df_grouped.count())-1
dof_within = df_grouped.count().sum()-len(df_grouped.count())
dof_tot = dof_between + dof_within
SS_between = (((df_grouped.mean()-overall_mean)**2)*df_grouped.count()).sum()
SS_within = (df_grouped.var()*(df_grouped.count()-1)).sum()
SS_total = SS_between + SS_within
MS_between = SS_between/dof_between
MS_within = SS_within/dof_within
F_stat = MS_between/MS_within
p_value = scipy.stats.f.sf(F_stat, dof_between, dof_within)
anova_table=pd.DataFrame({
"DF": [dof_between, dof_within.values[0], dof_tot.values[0]],
"SS": [SS_between.values[0], SS_within.values[0], SS_total.values[0]],
"MS": [MS_between.values[0], MS_within.values[0], ""],
"F-statistic": [F_stat.values[0], "", ""],
"p-value": [p_value[0], "", ""]},
index = ["Between", "Within", "Total"],)
st.write("ANOVA:")
st.write(anova_table)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_table")))
st.write("")
#Anova (OLS)
codes = pd.factorize(df[clas_var])[0]
ano_ols = sm.OLS(df[target_var], sm.add_constant(codes))
ano_ols_output = ano_ols.fit()
residuals = ano_ols_output.resid
col7, col8 = st.columns(2)
with col7:
# QQ-plot
st.write("Normal QQ-plot:")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data[entity] = df[entity]
qq_plot_data[time] = df[time]
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 300).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", entity, time, "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
with col8:
# Residuals histogram
st.write("Residuals histogram:")
residuals_hist = pd.DataFrame(residuals)
residuals_hist.columns = ["residuals"]
binNo_res = st.slider("Select maximum number of bins ", 5, 100, 25, key = st.session_state['key'])
hist_plot_res = alt.Chart(residuals_hist, height = 300).mark_bar().encode(
x = alt.X("residuals", title = "residuals", bin = alt.BinParams(maxbins = binNo_res), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip("residuals", bin = alt.BinParams(maxbins = binNo_res))]
)
st.altair_chart(hist_plot_res, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_residuals")))
# Download link for ANOVA statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_grouped_ent.mean()[target_var].to_excel(excel_file, sheet_name="entity_mean")
df_grouped_ent.std()[target_var].to_excel(excel_file, sheet_name="entity_sd")
df_grouped_time.mean()[target_var].to_excel(excel_file, sheet_name="time_mean")
df_grouped_time.std()[target_var].to_excel(excel_file, sheet_name="time_sd")
counts_ent.transpose().to_excel(excel_file, sheet_name="entity_obs")
counts_time.transpose().to_excel(excel_file, sheet_name="time_obs")
anova_table.to_excel(excel_file, sheet_name="ANOVA table")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANOVA statistics__" + target_var + "__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download ANOVA statistics</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.error("ERROR: The target variable must be a numerical one!")
else: st.error("ERROR: No variables available for ANOVA!")
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
dev_expander_dm_sb = st.expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.columns(3)
else: a1, a3 = st.columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
group_by_num = None
group_by_other = None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = st.session_state['key'])
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = st.session_state['key'])
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = st.session_state['key'])
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = st.session_state['key'])
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = st.session_state['key'])
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = st.session_state['key'])
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete", df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin([entity, time] + sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = st.session_state['key'])
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = st.session_state['key'])
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = st.session_state['key'])
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = st.session_state['key'])
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables", ["Mean", "Median", "Random value"], key = st.session_state['key'])
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables", ["Mode", "Random value"], key = st.session_state['key'])
group_by_num = st.selectbox("Group imputation by", ["None", "Entity", "Time"], key = st.session_state['key'])
group_by_other = group_by_num
df = fc.data_impute_panel(df, sb_DM_dImp_num, sb_DM_dImp_other, group_by_num, group_by_other, entity, time)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.drop([entity, time], axis = 1).select_dtypes([np.number]).columns
numCat_options = df.drop([entity, time], axis = 1).columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = st.session_state['key'])
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = st.session_state['key'])
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = st.session_state['key'])
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = st.session_state['key'])
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = st.session_state['key'])
mult_var2 = st.selectbox(text2, transform_options, key = st.session_state['key'])
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = st.session_state['key'])
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = st.session_state['key'])
div_var2 = st.selectbox(text2, transform_options, key = st.session_state['key'])
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences', value = False, key = st.session_state['key']):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were manually deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was manually deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was manually deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
st.write("- Imputation grouped by:", group_by_num)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.expander("Explore cleaned and transformed panel data info and stats", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 2 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data", value = False):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed)", value = False)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info", value = False):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data)', value = False):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link for cleaned data statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_panel_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
dev_expander_anovPost = st.expander("ANOVA for cleaned and transformed panel data", expanded = False)
with dev_expander_anovPost:
if df.shape[1] > 2 and df.shape[0] > 0:
# Target variable
target_var2 = st.selectbox('Select target variable', df.drop([entity, time], axis = 1).columns)
if df[target_var2].dtypes == "int64" or df[target_var2].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var2, axis = 1).columns)]
clas_var2 = st.selectbox('Select classifier variable', [entity, time],)
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var2])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var2])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var2])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var2])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var2])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size ", 1, 50, 5)
# Grouped boxplot by entity
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var2] = df[target_var2]
grouped_boxchart_ent = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size1, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(entity, scale = alt.Scale(zero = False)),
y = alt.Y(target_var2, scale = alt.Scale(zero = False)),
tooltip = [target_var2, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_ent, use_container_width=True)
with col10:
st.write("Boxplot grouped by time:")
box_size2 = st.slider("Select box size ", 1, 50, 5)
# Grouped boxplot by time
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var2] = df[target_var2]
grouped_boxchart_time = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size2, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(time, scale = alt.Scale(domain = [min(df[time]), max(df[time])])),
y = alt.Y(target_var2, scale = alt.Scale(zero = False)),
tooltip = [target_var2, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_time, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_boxplot")))
st.write("")
# Count for entity and time
col5, col6 = st.columns(2)
with col5:
st.write("Number of observations per entity:")
counts_ent = pd.DataFrame(df_grouped_ent.count()[target_var2])
counts_ent.columns = ["count"]
st.write(counts_ent.transpose())
with col6:
st.write("Number of observations per time:")
counts_time = pd.DataFrame(df_grouped_time.count()[target_var2])
counts_time.columns = ["count"]
st.write(counts_time.transpose())
if sett_hints:
st.info(str(fc.learning_hints("de_anova_count")))
st.write("")
# ANOVA calculation
df_grouped = df[[target_var2,clas_var2]].groupby(clas_var2)
overall_mean = (df_grouped.mean()*df_grouped.count()).sum()/df_grouped.count().sum()
dof_between = len(df_grouped.count())-1
dof_within = df_grouped.count().sum()-len(df_grouped.count())
dof_tot = dof_between + dof_within
SS_between = (((df_grouped.mean()-overall_mean)**2)*df_grouped.count()).sum()
SS_within = (df_grouped.var()*(df_grouped.count()-1)).sum()
SS_total = SS_between + SS_within
MS_between = SS_between/dof_between
MS_within = SS_within/dof_within
F_stat = MS_between/MS_within
p_value = scipy.stats.f.sf(F_stat, dof_between, dof_within)
anova_table=pd.DataFrame({
"DF": [dof_between, dof_within.values[0], dof_tot.values[0]],
"SS": [SS_between.values[0], SS_within.values[0], SS_total.values[0]],
"MS": [MS_between.values[0], MS_within.values[0], ""],
"F-statistic": [F_stat.values[0], "", ""],
"p-value": [p_value[0], "", ""]},
index = ["Between", "Within", "Total"],)
st.write("ANOVA:")
st.write(anova_table)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_table")))
st.write("")
#Anova (OLS)
codes = pd.factorize(df[clas_var2])[0]
ano_ols = sm.OLS(df[target_var2], sm.add_constant(codes))
ano_ols_output = ano_ols.fit()
residuals = ano_ols_output.resid
col7, col8 = st.columns(2)
with col7:
# QQ-plot
st.write("Normal QQ-plot:")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data[entity] = df[entity]
qq_plot_data[time] = df[time]
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 300).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", entity, time, "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
with col8:
# Residuals histogram
st.write("Residuals histogram:")
residuals_hist = pd.DataFrame(residuals)
residuals_hist.columns = ["residuals"]
binNo_res2 = st.slider("Select maximum number of bins ", 5, 100, 25)
hist_plot = alt.Chart(residuals_hist, height = 300).mark_bar().encode(
x = alt.X("residuals", title = "residuals", bin = alt.BinParams(maxbins = binNo_res2), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip("residuals", bin = alt.BinParams(maxbins = binNo_res2))]
)
st.altair_chart(hist_plot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_residuals")))
# Download link for ANOVA statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_grouped_ent.mean()[target_var2].to_excel(excel_file, sheet_name="entity_mean")
df_grouped_ent.std()[target_var2].to_excel(excel_file, sheet_name="entity_sd")
df_grouped_time.mean()[target_var2].to_excel(excel_file, sheet_name="time_mean")
df_grouped_time.std()[target_var2].to_excel(excel_file, sheet_name="time_sd")
counts_ent.transpose().to_excel(excel_file, sheet_name="entity_obs")
counts_time.transpose().to_excel(excel_file, sheet_name="time_obs")
anova_table.to_excel(excel_file, sheet_name="ANOVA table")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned ANOVA statistics__" + target_var2 + "__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned ANOVA statistics</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.error("ERROR: The target variable must be a numerical one!")
else:
st.error("ERROR: No data available for ANOVA!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.container()
with data_visualization_container:
#st.write("")
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.expander("Explore visualization types", expanded = False)
with dev_expander_dv:
if df.shape[1] > 2 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
varl_sel_options = varl_sel_options[varl_sel_options.isin(df.drop([entity, time], axis = 1).columns)]
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = st.session_state['key'])
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy_options = yy_options[yy_options.isin(df.drop([entity, time], axis = 1).columns)]
yy = st.selectbox('Select variable for y-axis', yy_options, key = st.session_state['key'])
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig_data[entity] = df[entity]
fig_data[time] = df[time]
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, entity, time, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = st.session_state['key'])
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot_data[entity] = df[entity]
boxplot_data[time] = df[time]
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred")).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data[entity] = df[entity]
qqplot_data[time] = df[time]
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", entity, time, "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# Check again after processing
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "WARNING: The variable selected for entity has NAs!"
else:entity_na_warn = False
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "WARNING: The variable selected for time has NAs!"
else:time_na_warn = False
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# PANEL DATA MODELLING
data_modelling_container = st.container()
with data_modelling_container:
#st.write("")
#st.write("")
#st.write("")
st.write("")
st.write("")
st.header("**Panel data modelling**")
st.markdown("Go for creating predictive models of your panel data using panel data modelling! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
PDM_settings = st.expander("Specify model", expanded = False)
with PDM_settings:
if time_na_warn == False and entity_na_warn == False:
# Initial status for running models
model_full_results = None
do_modval = "No"
model_val_results = None
model_full_results = None
panel_model_fit = None
if df.shape[1] > 2 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var_options = response_var_options[response_var_options.isin(df.drop(entity, axis = 1).columns)]
if time != "NA":
response_var_options = response_var_options[response_var_options.isin(df.drop(time, axis = 1).columns)]
response_var = st.selectbox("Select response variable", response_var_options, key = st.session_state['key'])
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please select a numeric response variable!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Categorical variable is treated as continuous variable!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_na != False:
st.error(response_var_message_na)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = response_var_options[response_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = st.session_state['key'])
var_list = list([entity]) + list([time]) + list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithm**")
# Algorithms selection
col1, col2 = st.columns(2)
algorithms = ["Entity Fixed Effects", "Time Fixed Effects", "Two-ways Fixed Effects", "Random Effects", "Pooled"]
with col1:
PDM_alg = st.selectbox("Select modelling technique", algorithms)
# Covariance type
with col2:
PDM_cov_type = st.selectbox("Select covariance type", ["homoskedastic", "heteroskedastic", "clustered"])
PDM_cov_type2 = None
if PDM_cov_type == "clustered":
PDM_cov_type2 = st.selectbox("Select cluster type", ["entity", "time", "both"])
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "No":
df_new = pd.DataFrame()
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns as well as entity and time
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
if any(a for a in df_new.columns if a == entity) and any(a for a in df_new.columns if a == time):
st.info("All variables are available for predictions!")
elif any(a for a in df_new.columns if a == entity) == False:
st.error("ERROR: Entity variable is missing!")
return
elif any(a for a in df_new.columns if a == time) == False:
st.error("ERROR: Time variable is missing!")
return
# Check if NAs are present
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[list([entity]) + list([time]) + expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[list([entity]) + list([time]) + expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show modelling settings
if st.checkbox('Show a summary of modelling settings', value = False):
#--------------------------------------------------------------------------------------
# ALOGRITHMS
st.write("Algorithms summary:")
st.write("- ",PDM_alg)
st.write("- Covariance type: ", PDM_cov_type)
if PDM_cov_type2 is not None:
st.write("- Cluster type: ", PDM_cov_type2)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# General settings summary
st.write("General settings summary:")
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
st.write("- Entity:", entity)
st.write("- Time:", time)
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run model")
st.write("")
# Run everything on button click
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
# Define clustered cov matrix "entity", "time", "both"
cluster_entity = True
cluster_time = False
if PDM_cov_type == "clustered":
if PDM_cov_type2 == "entity":
cluster_entity = True
cluster_time = False
if PDM_cov_type2 == "time":
cluster_entity = False
cluster_time = True
if PDM_cov_type2 == "both":
cluster_entity = True
cluster_time = True
# Prepare data
data = df.set_index([entity, time])
Y_data = data[response_var]
X_data1 = data[expl_var] # for efe, tfe, twfe
X_data2 = sm.add_constant(data[expl_var]) # for re, pool
# Model validation
if do_modval == "Yes":
# Progress bar
st.info("Validation progress")
my_bar = st.progress(0.0)
progress1 = 0
# Model validation
# R²
model_eval_r2 = pd.DataFrame(index = range(val_runs), columns = [response_var])
# MSE
model_eval_mse = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# RMSE
model_eval_rmse = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# MAE
model_eval_mae = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# MaxERR
model_eval_maxerr = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# EVRS
model_eval_evrs = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# SSR
model_eval_ssr = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# Model validation summary
model_eval_mean = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
model_eval_sd = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
# Collect all residuals in test runs
resdiuals_allruns = {}
for val in range(val_runs):
# Split data into train/ test data
if PDM_alg != "Pooled" and PDM_alg != "Random Effects":
X_data = X_data1.copy()
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data = X_data2.copy()
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, train_size = train_frac, random_state = val)
# Train selected panel model
# efe
if PDM_alg == "Entity Fixed Effects":
panel_model_efe_val = PanelOLS(Y_train, X_train, entity_effects = True, time_effects = False)
panel_model_fit_efe_val = panel_model_efe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# tfe
if PDM_alg == "Time Fixed Effects":
panel_model_tfe_val = PanelOLS(Y_train, X_train, entity_effects = False, time_effects = True)
panel_model_fit_tfe_val = panel_model_tfe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# twfe
if PDM_alg == "Two-ways Fixed Effects":
panel_model_twfe_val = PanelOLS(Y_train, X_train, entity_effects = True, time_effects = True)
panel_model_fit_twfe_val = panel_model_twfe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# re
if PDM_alg == "Random Effects":
panel_model_re_val = RandomEffects(Y_train, X_train)
panel_model_fit_re_val = panel_model_re_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# pool
if PDM_alg == "Pooled":
panel_model_pool_val = PooledOLS(Y_train, X_train)
panel_model_fit_pool_val = panel_model_pool_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# save selected model
if PDM_alg == "Entity Fixed Effects":
panel_model_fit_val = panel_model_fit_efe_val
if PDM_alg == "Time Fixed Effects":
panel_model_fit_val = panel_model_fit_tfe_val
if PDM_alg == "Two-ways Fixed Effects":
panel_model_fit_val = panel_model_fit_twfe_val
if PDM_alg == "Random Effects":
panel_model_fit_val = panel_model_fit_re_val
if PDM_alg == "Pooled":
panel_model_fit_val = panel_model_fit_pool_val
# Extract effects
if PDM_alg != "Pooled":
comb_effects = panel_model_fit_val.estimated_effects
ent_effects = pd.DataFrame(index = X_train.reset_index()[entity].drop_duplicates(), columns = ["Value"])
time_effects = pd.DataFrame(index = sorted(list(X_train.reset_index()[time].drop_duplicates())), columns = ["Value"])
# Use LSDV for estimating effects
if PDM_alg == "Entity Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[entity])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = model_mlr_fit_val.params[e]
for t in time_effects.index:
time_effects.loc[t]["Value"] = 0
if PDM_alg == "Time Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[time])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = 0
for t in time_effects.index:
time_effects.loc[t]["Value"] = model_mlr_fit_val.params[t]
if PDM_alg == "Two-ways Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[entity]), pd.get_dummies(X_train.reset_index()[time])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = model_mlr_fit_val.params[e]
for t in time_effects.index:
time_effects.loc[t]["Value"] = model_mlr_fit_val.params[t]
if PDM_alg == "Random Effects":
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = comb_effects.loc[e,].reset_index(drop = True).iloc[0][0]
# Prediction for Y_test (without including effects)
Y_test_pred = panel_model_fit_val.predict(X_test)
# Add effects for predictions
for p in range(Y_test_pred.size):
entity_ind = Y_test_pred.index[p][0]
time_ind = Y_test_pred.index[p][1]
# if effects are available, add effect
if PDM_alg == "Entity Fixed Effects":
if any(a for a in ent_effects.index if a == entity_ind):
effect = ent_effects.loc[entity_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Time Fixed Effects":
if any(a for a in time_effects.index if a == time_ind):
effect = time_effects.loc[time_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Two-ways Fixed Effects":
if any(a for a in time_effects.index if a == time_ind):
effect_time = time_effects.loc[time_ind][0]
else: effect_time = 0
if any(a for a in ent_effects.index if a == entity_ind):
effect_entity = ent_effects.loc[entity_ind][0]
else: effect_entity = 0
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect_entity + effect_time
if PDM_alg == "Random Effects":
if any(a for a in ent_effects.index if a == entity_ind):
effect = ent_effects.loc[entity_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
# Adjust format
Y_test_pred = Y_test_pred.reset_index()["predictions"]
Y_test = Y_test.reset_index()[response_var]
# Save R² for test data
model_eval_r2.iloc[val][response_var] = r2_score(Y_test, Y_test_pred)
# Save MSE for test data
model_eval_mse.iloc[val]["Value"] = mean_squared_error(Y_test, Y_test_pred, squared = True)
# Save RMSE for test data
model_eval_rmse.iloc[val]["Value"] = mean_squared_error(Y_test, Y_test_pred, squared = False)
# Save MAE for test data
model_eval_mae.iloc[val]["Value"] = mean_absolute_error(Y_test, Y_test_pred)
# Save MaxERR for test data
model_eval_maxerr.iloc[val]["Value"] = max_error(Y_test, Y_test_pred)
# Save explained variance regression score for test data
model_eval_evrs.iloc[val]["Value"] = explained_variance_score(Y_test, Y_test_pred)
# Save sum of squared residuals for test data
model_eval_ssr.iloc[val]["Value"] = ((Y_test-Y_test_pred)**2).sum()
# Save residual values for test data
res = Y_test-Y_test_pred
resdiuals_allruns[val] = res
progress1 += 1
my_bar.progress(progress1/(val_runs))
# Calculate mean performance statistics
# Mean
model_eval_mean.loc["% VE"]["Value"] = model_eval_r2[response_var].mean()
model_eval_mean.loc["MSE"]["Value"] = model_eval_mse["Value"].mean()
model_eval_mean.loc["RMSE"]["Value"] = model_eval_rmse["Value"].mean()
model_eval_mean.loc["MAE"]["Value"] = model_eval_mae["Value"].mean()
model_eval_mean.loc["MaxErr"]["Value"] = model_eval_maxerr["Value"].mean()
model_eval_mean.loc["EVRS"]["Value"] = model_eval_evrs["Value"].mean()
model_eval_mean.loc["SSR"]["Value"] = model_eval_ssr["Value"].mean()
# Sd
model_eval_sd.loc["% VE"]["Value"] = model_eval_r2[response_var].std()
model_eval_sd.loc["MSE"]["Value"] = model_eval_mse["Value"].std()
model_eval_sd.loc["RMSE"]["Value"] = model_eval_rmse["Value"].std()
model_eval_sd.loc["MAE"]["Value"] = model_eval_mae["Value"].std()
model_eval_sd.loc["MaxErr"]["Value"] = model_eval_maxerr["Value"].std()
model_eval_sd.loc["EVRS"]["Value"] = model_eval_evrs["Value"].std()
model_eval_sd.loc["SSR"]["Value"] = model_eval_ssr["Value"].std()
# Residuals
residuals_collection = pd.DataFrame()
for x in resdiuals_allruns:
residuals_collection = residuals_collection.append(pd.DataFrame(resdiuals_allruns[x]), ignore_index = True)
residuals_collection.columns = [response_var]
# Collect validation results
model_val_results = {}
model_val_results["mean"] = model_eval_mean
model_val_results["sd"] = model_eval_sd
model_val_results["residuals"] = residuals_collection
model_val_results["variance explained"] = model_eval_r2
# Full model
# Progress bar
st.info("Full model progress")
my_bar_fm = st.progress(0.0)
progress2 = 0
# efe
panel_model_efe = PanelOLS(Y_data, X_data1, entity_effects = True, time_effects = False)
panel_model_fit_efe = panel_model_efe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# tfe
panel_model_tfe = PanelOLS(Y_data, X_data1, entity_effects = False, time_effects = True)
panel_model_fit_tfe = panel_model_tfe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# twfe
panel_model_twfe = PanelOLS(Y_data, X_data1, entity_effects = True, time_effects = True)
panel_model_fit_twfe = panel_model_twfe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# re
panel_model_re = RandomEffects(Y_data, X_data2)
panel_model_fit_re = panel_model_re.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# pool
panel_model_pool = PooledOLS(Y_data, X_data2)
panel_model_fit_pool = panel_model_pool.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# save selected model
if PDM_alg == "Entity Fixed Effects":
panel_model_fit = panel_model_fit_efe
if PDM_alg == "Time Fixed Effects":
panel_model_fit = panel_model_fit_tfe
if PDM_alg == "Two-ways Fixed Effects":
panel_model_fit = panel_model_fit_twfe
if PDM_alg == "Random Effects":
panel_model_fit = panel_model_fit_re
if PDM_alg == "Pooled":
panel_model_fit = panel_model_fit_pool
# Entity information
ent_inf = pd.DataFrame(index = ["No. entities", "Avg observations", "Median observations", "Min observations", "Max observations"], columns = ["Value"])
ent_inf.loc["No. entities"] = panel_model_fit.entity_info["total"]
ent_inf.loc["Avg observations"] = panel_model_fit.entity_info["mean"]
ent_inf.loc["Median observations"] = panel_model_fit.entity_info["median"]
ent_inf.loc["Min observations"] = panel_model_fit.entity_info["min"]
ent_inf.loc["Max observations"] = panel_model_fit.entity_info["max"]
# Time information
time_inf = pd.DataFrame(index = ["No. time periods", "Avg observations", "Median observations", "Min observations", "Max observations"], columns = ["Value"])
time_inf.loc["No. time periods"] = panel_model_fit.time_info["total"]
time_inf.loc["Avg observations"] = panel_model_fit.time_info["mean"]
time_inf.loc["Median observations"] = panel_model_fit.time_info["median"]
time_inf.loc["Min observations"] = panel_model_fit.time_info["min"]
time_inf.loc["Max observations"] = panel_model_fit.time_info["max"]
# Regression information
reg_inf = pd.DataFrame(index = ["Dep. variable", "Estimator", "Method", "No. observations", "DF residuals", "DF model", "Covariance type"], columns = ["Value"])
reg_inf.loc["Dep. variable"] = response_var
reg_inf.loc["Estimator"] = panel_model_fit.name
if PDM_alg == "Entity Fixed Effects" or PDM_alg == "Time Fixed Effects" or "Two-ways Fixed":
reg_inf.loc["Method"] = "Within"
if PDM_alg == "Random Effects":
reg_inf.loc["Method"] = "Quasi-demeaned"
if PDM_alg == "Pooled":
reg_inf.loc["Method"] = "Least squares"
reg_inf.loc["No. observations"] = panel_model_fit.nobs
reg_inf.loc["DF residuals"] = panel_model_fit.df_resid
reg_inf.loc["DF model"] = panel_model_fit.df_model
reg_inf.loc["Covariance type"] = panel_model_fit._cov_type
# Regression statistics
fitted = df[response_var]-panel_model_fit.resids.values
obs = df[response_var]
reg_stats = pd.DataFrame(index = ["R²", "R² (between)", "R² (within)", "R² (overall)", "Log-likelihood", "SST", "SST (overall)"], columns = ["Value"])
reg_stats.loc["R²"] = panel_model_fit._r2
reg_stats.loc["R² (between)"] = panel_model_fit._c2b**2
reg_stats.loc["R² (within)"] = panel_model_fit._c2w**2
reg_stats.loc["R² (overall)"] = panel_model_fit._c2o**2
reg_stats.loc["Log-likelihood"] = panel_model_fit._loglik
reg_stats.loc["SST"] = panel_model_fit.total_ss
reg_stats.loc["SST (overall)"] = ((obs-obs.mean())**2).sum()
# Overall performance metrics (with effects)
reg_overall = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
reg_overall.loc["% VE"] = r2_score(obs, fitted)
reg_overall.loc["MSE"] = mean_squared_error(obs, fitted, squared = True)
reg_overall.loc["RMSE"] = mean_squared_error(obs, fitted, squared = False)
reg_overall.loc["MAE"] = mean_absolute_error(obs, fitted)
reg_overall.loc["MaxErr"] = max_error(obs, fitted)
reg_overall.loc["EVRS"] = explained_variance_score(obs, fitted)
reg_overall.loc["SSR"] = ((obs-fitted)**2).sum()
# ANOVA
if PDM_alg == "Pooled":
Y_data_mlr = df[response_var]
X_data_mlr = sm.add_constant(df[expl_var])
full_model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
full_model_fit = full_model_mlr.fit()
reg_anova = pd.DataFrame(index = ["Regression", "Residual", "Total"], columns = ["DF", "SS", "MS", "F-statistic"])
reg_anova.loc["Regression"]["DF"] = full_model_fit.df_model
reg_anova.loc["Regression"]["SS"] = full_model_fit.ess
reg_anova.loc["Regression"]["MS"] = full_model_fit.ess/full_model_fit.df_model
reg_anova.loc["Regression"]["F-statistic"] = full_model_fit.fvalue
reg_anova.loc["Residual"]["DF"] = full_model_fit.df_resid
reg_anova.loc["Residual"]["SS"] = full_model_fit.ssr
reg_anova.loc["Residual"]["MS"] = full_model_fit.ssr/full_model_fit.df_resid
reg_anova.loc["Residual"]["F-statistic"] = ""
reg_anova.loc["Total"]["DF"] = full_model_fit.df_resid + full_model_fit.df_model
reg_anova.loc["Total"]["SS"] = full_model_fit.ssr + full_model_fit.ess
reg_anova.loc["Total"]["MS"] = ""
reg_anova.loc["Total"]["F-statistic"] = ""
# Coefficients
if PDM_alg == "Entity Fixed Effects" or PDM_alg == "Time Fixed Effects" or "Two-ways Fixed Effects":
reg_coef = pd.DataFrame(index = expl_var, columns = ["coeff", "std err", "t-statistic", "p-value", "lower 95%", "upper 95%"])
for c in expl_var:
reg_coef.loc[c]["coeff"] = panel_model_fit.params[expl_var.index(c)]
reg_coef.loc[c]["std err"] = panel_model_fit.std_errors.loc[c]
reg_coef.loc[c]["t-statistic"] = panel_model_fit.tstats.loc[c]
reg_coef.loc[c]["p-value"] = panel_model_fit.pvalues.loc[c]
reg_coef.loc[c]["lower 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["lower"]
reg_coef.loc[c]["upper 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["upper"]
if PDM_alg == "Random Effects" or PDM_alg == "Pooled":
reg_coef = pd.DataFrame(index = ["const"]+ expl_var, columns = ["coeff", "std err", "t-statistic", "p-value", "lower 95%", "upper 95%"])
for c in ["const"] + expl_var:
reg_coef.loc[c]["coeff"] = panel_model_fit.params[(["const"]+ expl_var).index(c)]
reg_coef.loc[c]["std err"] = panel_model_fit.std_errors.loc[c]
reg_coef.loc[c]["t-statistic"] = panel_model_fit.tstats.loc[c]
reg_coef.loc[c]["p-value"] = panel_model_fit.pvalues.loc[c]
reg_coef.loc[c]["lower 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["lower"]
reg_coef.loc[c]["upper 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["upper"]
# Effects
reg_ent_effects = pd.DataFrame(index = df[entity].drop_duplicates(), columns = ["Value"])
reg_time_effects = pd.DataFrame(index = sorted(list(df[time].drop_duplicates())), columns = ["Value"])
reg_comb_effects = panel_model_fit.estimated_effects
reg_comb_effects.columns = ["Value"]
# Use LSDV for estimating effects
Y_data_mlr = df[response_var]
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data_mlr = sm.add_constant(df[expl_var])
else: X_data_mlr = df[expl_var]
if PDM_alg == "Entity Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[entity])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = model_mlr_fit.params[e]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = 0
if PDM_alg == "Time Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[time])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = 0
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = model_mlr_fit.params[t]
if PDM_alg == "Two-ways Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[entity]), | pd.get_dummies(df[time]) | pandas.get_dummies |
# data
import pandas as pd
import numpy as np
from pandas_datareader import data as web
from datetime import datetime as dt
import functools
import config as cfg
def get_stockP_raw(l_of_stocks, start = dt(2020, 1, 1), end = dt.now()):
# data preparation
df = web.DataReader(l_of_stocks, 'yahoo', start, end)
df = df.loc[:, df.columns.get_level_values(0).isin({'Close'})].round(4)
df.columns =df.columns.droplevel()
return df
def get_stockP_return(df_stockP):
df_stock_return = df_stockP.pct_change().round(4)
df_merged = df_stockP.merge(df_stock_return,
left_index = True,
right_index = True,
how = 'left',
suffixes=('_price', '_return'))
return df_merged
def add_moving_features(df_raw):
""" feature engineering """
cols = df_raw.columns.tolist()
# make statistical features
l_new_features = []
for col in cols:
new_features = df_raw[col].rolling(5, min_periods=2).aggregate([np.min, np.max, np.mean]) #, np.std
new_features = new_features.add_suffix('_{colname}'.format(colname = col))
l_new_features.append(new_features)
# add statistical features
df_new_features = functools.reduce(lambda x, y: | pd.merge(x, y, left_index=True, right_index=True, how='left') | pandas.merge |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = | DataFrame({"A": [np.nan, np.nan, 'a', 'a']}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Read hwm data
diff -> 'Add this differential if going from NAVD88 to MSL assuming a positive upwards z coordinate system'
navd88 + diff = msl
# Delta or convert2msl is always for going from vertical datum to msl by an addition to that datum
# MSL = Vert_datam + convert2msl
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "<EMAIL>"
#import netCDF4 as n4
#from collections import defaultdict
import os,sys
#sys.path.append('/home/moghimis/linux_working/00-working/04-test-adc_plot/')
#sys.path.append('/home/moghimis/linux_working/00-working/04-test-adc_plot/csdlpy')
from pynmd.plotting.vars_param import *
from pynmd.plotting import plot_routines as pr
from pynmd.plotting import plot_settings as ps
from pynmd.plotting import colormaps as cmaps
from pynmd.models.adcirc.post import adcirc_post as adcp
from pynmd.tools.compute_statistics import find_nearest1d,statatistics
import time
from scipy import stats
from geo_regions import get_region_extent
#import cPickle as pickle
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import numpy as np
import datetime
import string
import glob
#import time
import string
import pandas as pd
import netCDF4 as n4
import seaborn as sns
#sys.path.append('/scratch2/COASTAL/coastal/save/Saeed.Moghimi/opt/pycodes/csdlpy')
#import adcirc
#sns.set_style(style='dark')
sns.set_style(style='ticks')
try:
os.system('rm base_info.pyc' )
except:
pass
if 'base_info' in sys.modules:
del(sys.modules["base_info"])
import base_info
pandas_plots = True
include_bias = False
curr_time = time.strftime("%Y%m%d_h%H_m%M_s%S")
#====== subplot adjustments ===============
left1 = 0.1 # the left side of the subplots of the figure
right1 = 0.9 # the right side of the subplots of the figure
bottom1= 0.15 # the bottom of the subplots of the figure (ntr==16 bottom=0.05)
top1 = 0.9 # the top of the subplots of the figure
wspace1= 0.1 # the amount of width reserved for blank space between subplots
hspace1= 0.15 # the amount of height reserved for white space between subplots
##########################################################################
dpi = 600
ftype = '.png'
#ftype = '.pdf'
for x in base_info.cases[base_info.key]['dir'].split('/'):
if 'rt_' in x:
prefix = x
else:
prefix = ''.join(base_info.cases[base_info.key0]['dir'].split('/')[-3:])
prefix = 'hwm_' + prefix
out_dir = base_info.out_dir + prefix + curr_time+ '/'
# out dir and scr back up
scr_dir = out_dir + '/scr/'
os.system('mkdir -p ' + scr_dir)
args=sys.argv
scr_name = args[0]
os.system('cp -fr '+scr_name +' '+scr_dir)
os.system('cp -fr *.py '+scr_dir)
print (' > Output folder: \n > ',out_dir)
####################
def find_hwm_v01(xgrd,ygrd,maxe,xhwm,yhwm,elev_hwm,convert2msl=None,bias_cor=None ,flag='pos'):
from pynmd.tools.compute_statistics import find_nearest1d
"""
In: xgrd,ygrd,maxele: model infos
xhwm,yhwm,elev_hwm:data infos
flag: how to treat data model comparison
flag = all : find nearset grid point
= valid: find nearset grid point with non-nan value
= pos: find nearset grid point with positive value
= neg: find nearset grid point with negative value
Retun: model and data vector
# Delta or convert2msl is always for going from vertical datum to msl by an addition to that datum
# MSL = Vert_datam + convert2msl
"""
if flag == 'valid':
maxe = np.ma.masked_where(maxe==elev_max.fill_value, maxe)
mask = maxe.mask
elif flag == 'pos':
mask = [maxe < 0.0]
elif flag == 'neg':
mask = [maxe > 0.0]
elif flag == 'all':
mask = np.isnan(xgrd)
#mask = [maxe < -900.0]
else:
print ('Choose a valid flag > ')
print ('flag = all : find nearset grid point ')
print (' = valid: find nearset grid point with non-nan value')
print (' = pos: find nearset grid point with positive value')
print (' = neg: find nearset grid point with negative valueChoose a valid flag > ')
sys.exit('ERROR')
mask = np.array(mask).squeeze()
xgrd = xgrd[~mask]
ygrd = ygrd[~mask]
maxe = maxe[~mask]
#
if convert2msl is not None:
convert2msl = convert2msl[~mask]
else:
convert2msl = np.zeros_like(xgrd)
#
if bias_cor is not None:
bias_cor = bias_cor[~mask]
else:
bias_cor = np.zeros_like(xgrd)
data = []
model = []
prox = []
xmodel = []
ymodel = []
for ip in range(len(xhwm)):
i,pr = find_nearest1d(xvec = xgrd,yvec = ygrd,xp = xhwm[ip],yp = yhwm[ip])
data.append (elev_hwm [ip] + convert2msl[i])
model.append(maxe[i]+bias_cor[i])
xmodel.append(xgrd[i].item())
ymodel.append(ygrd[i].item())
prox.append(pr)
data = np.array(data ).squeeze()
model = np.array(model).squeeze()
prox = np.array(prox ).squeeze()
xmodel = np.array(xmodel).squeeze()
ymodel = np.array(ymodel).squeeze()
#
#maskf = [model < 0.0]
#maskf = np.array(maskf).squeeze()
#return data[~maskf],model[~maskf],prox[~maskf],xhwm[~maskf],yhwm[~maskf]
return data,xhwm,yhwm,model,xmodel,ymodel,prox
def find_hwm(tri,maxe,xhwm,yhwm,elev_hwm,bias_cor=None,flag='all'):
from pynmd.tools.compute_statistics import find_nearest1d
"""
In: xgrd,ygrd,maxele: model infos
xhwm,yhwm,elev_hwm:data infos
flag: how to treat data model comparison
flag = all : find nearset grid point
= valid: find nearset grid point with non-nan value
= pos: find nearset grid point with positive value
= neg: find nearset grid point with negative value
Retun: model and data vector
# Delta or convert2msl is always for going from vertical datum to msl by an addition to that datum
# MSL = Vert_datam + convert2msl
"""
# if flag == 'valid':
# mask = np.isnan(xgrd)
# elif flag == 'pos':
# mask = [maxe < 0.0]
# elif flag == 'neg':
# mask = [maxe > 0.0]
# elif flag == 'all':
# mask = maxe.mask
# #mask = [maxe < -900.0]
#
# else:
# print ('Choose a valid flag > '
# print ('flag = all : find nearset grid point '
# print (' = valid: find nearset grid point with non-nan value'
# print (' = pos: find nearset grid point with positive value'
# print (' = neg: find nearset grid point with negative valueChoose a valid flag > '
# sys.exit('ERROR')
#
# mask = np.array(mask).squeeze()
#maxe = elev_max
#xhwm = lon_hwm
#yhwm = lat_hwm
#elev_hwm = hwm
#
xgrd = tri.x
ygrd = tri.y
#
if bias_cor is None:
bias_cor = np.zeros_like(xgrd)
#
data = []
model = []
model_x = []
model_y = []
prox = []
prox_coef = []
#
for ip in range(len(xhwm)):
i, pr = find_nearest1d(xvec = xgrd [~maxe.mask],yvec = ygrd[~maxe.mask],xp = xhwm[ip],yp = yhwm[ip]) #valid mesh
ir,prr = find_nearest1d(xvec = xgrd ,yvec = ygrd ,xp = xhwm[ip],yp = yhwm[ip]) #all mesh
#if pr > 1.2 * prr:
# print pr, prr, pr/prr
data.append (elev_hwm [ip])
model.append (maxe[~maxe.mask][i]+bias_cor[~maxe.mask][i])
model_x.append(xgrd[~maxe.mask][i])
model_y.append(ygrd[~maxe.mask][i])
prox.append(pr)
prox_coef.append(pr/prr)
#
data = np.array(data ).squeeze()
model = np.array(model).squeeze()
prox = np.array(prox ).squeeze()
#
maskf = (np.array(prox_coef) > 10) | (data < 1)
maskf = np.array(maskf).squeeze()
return data[~maskf],model[~maskf],prox[~maskf], xhwm[~maskf], yhwm[~maskf]
def datetime64todatetime(dt):
tmp = []
for it in range(len(dt)):
tmp.append(pd.Timestamp(dt[it]).to_pydatetime())
return np.array(tmp)
def plot_track(ax,track,date=None,color = 'r'):
if date is not None:
dates = np.array(track['dates'])
#ind = np.array(np.where((dates==date))).squeeze().item()
ind = find_nearest_time(dates,date)
ax.plot(track['lon'][ind],track['lat'][ind],'ro',alpha=1,ms=8)
ax.plot(track['lon'],track['lat'],lw=3,color=color,ls='dashed',alpha=1)
#sys.path.append('/home/Saeed.Moghimi/opt/pycodes/csdlpy/')
#import adcirc
from atcf import readTrack
def read_track(fname=None):
if fname is None:
fname = '/scratch4/COASTAL/coastal/save/Saeed.Moghimi/models/NEMS/NEMS_inps/01_data/tracks/ike_bal092008.dat'
track = readTrack(fname)
keys = ['dates', 'lon', 'vmax', 'lat']
for key in keys:
tmp = pd.DataFrame(track[key],columns=[key])
#dfh = df
if 'trc' not in locals():
trc = tmp
else:
trc = | pd.concat([trc,tmp],axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# In[20]:
fig_dir = "~/Dropbox/Conferences_and_Meetings/PI meetings/PI meeting 2020/figures/"
# In[2]:
# data = pd.read_csv("~/Dropbox/Conferences_and_Meetings/PI meetings/PI meeting 2020/data/USGS_12510500_NO3.csv")
# In[32]:
data = pd.read_csv("./USGS-12505450_NO3.csv")
# In[33]:
data
# In[14]:
data['startDateTime'] = pd.to_datetime(data['startDateTime'])
# In[19]:
fig, ax = plt.subplots(1,1, figsize = (8, 6))
sns.lineplot(x = "startDateTime", y = "result_va", marker = "o",
data = data)
ax.set_ylabel('Nitrate (mg/L)', fontsize = 18)
ax.set_xlabel('')
ax.set_title('USGS-12510500 (Yakima River at Kiona, WA)', fontsize = 24)
ax.tick_params(axis='both', which='major', labelsize=16)
# In[30]:
# fig.savefig(fig_dir + "USGS-12510500.png", dpi=300)
fig.savefig("./USGS-12510500.png", dpi=300)
plt.close(fig)
# ## from WQP station
# In[34]:
data = pd.read_csv("./USGS-12505450_NO3.csv")
# In[35]:
data
# In[37]:
data.columns
# In[36]:
data['ActivityStartDateTime'] = | pd.to_datetime(data['ActivityStartDateTime']) | pandas.to_datetime |
from xgboost import XGBRegressor
import pandas as pd
import argparse
from sklearn import metrics
import scipy
import pickle
from pathlib import Path
import numpy as np
'''
python3 XGBoostRegressor.py --dataset_type raw_c > raw_c_results.txt
python3 XGBoostRegressor.py --dataset_type raw_d > raw_d_results.txt
python3 XGBoostRegressor.py --dataset_type raw > raw_results.txt
'''
# extract type of feature data from arguments
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_type', type=str,
required=True, help='type of data')
args = parser.parse_args()
# read base data
X_train, y_train, X_test, y_test = None, None, None, None
Xc_train, Xc_test = None, None
Xd_train, Xd_test = None, None
# Get labels (same across all datasets)
test_diff_df = pd.read_csv("diff_expr/C12TestDiff.tsv",
delimiter='\t', header=None)
train_diff_df = pd.read_csv(
"diff_expr/C12TrainDiff.tsv", delimiter='\t', header=None)
valid_diff_df = pd.read_csv(
"diff_expr/C12ValidDiff.tsv", delimiter='\t', header=None)
y_test = test_diff_df
y_train = pd.concat([train_diff_df, valid_diff_df])
# Get features
if args.dataset_type == "raw_c" or args.dataset_type == "raw":
c1_test_df = pd.read_csv("embeddings/C1TestEmbeddings.csv", header=None)
c1_train_df = pd.read_csv("embeddings/C1TrainEmbeddings.csv", header=None)
c1_valid_df = pd.read_csv("embeddings/C1ValidEmbeddings.csv", header=None)
c2_test_df = pd.read_csv("embeddings/C2TestEmbeddings.csv", header=None)
c2_train_df = pd.read_csv("embeddings/C2TrainEmbeddings.csv", header=None)
c2_valid_df = pd.read_csv("embeddings/C2ValidEmbeddings.csv", header=None)
c12_train_df = pd.concat([c1_train_df, c2_train_df], axis=1)
c12_valid_df = | pd.concat([c1_valid_df, c2_valid_df], axis=1) | pandas.concat |
from __future__ import print_function
import numpy as np
import pandas as pd
import scipy.stats
from parallelm.mlops import StatCategory as st
from parallelm.mlops import mlops as pm
from parallelm.mlops.examples import utils
from parallelm.mlops.examples.utils import RunModes
from parallelm.mlops.stats.graph import MultiGraph
from parallelm.mlops.stats.multi_line_graph import MultiLineGraph
from parallelm.mlops.stats.table import Table
"""
This code calculates the A/B test statistics.
- Points for two distributions
- Vertical line values on x-axis
- Conversion rate control
- Conversion rate B
- Relative uplift in conversion rate
- p-value
- z-score
- Standard error A
- Standard error B
- Standard error of difference
"""
class statsCalculator:
def __init__(self):
self._distControl = []
self._distB = []
self._conversionControl = []
self._conversionB = []
self._uplift = []
self._pValue = []
self._zScore = []
self._errorControl = []
self._errorB = []
self._errorDifference = []
self._verticalLine = []
"""
This function calculates all the statistics for A/B testing after the experiment is complete
Input: samples_control: Number of samples in control group
conv_samples: Number of samples converted (could be from any metric),
strictly less than samples_control
samples_B: Number of samples in group B
conv_samples_B: Number of samples converted (could be from any metric),
strictly less than samples_B
"""
def exptOutcome(self, samples_control, conv_samples_control, samples_B, conv_samples_B,
confidence):
self._conversionControl = conv_samples_control * 100.0 / samples_control
self._conversionB = conv_samples_B * 100.0 / samples_B
if self._conversionControl != 0.0:
self._uplift = (self._conversionB - self._conversionControl) * 100.0 /\
self._conversionControl
else:
self._uplift = self._conversionB * 100.0
self._errorControl = np.sqrt(self._conversionControl / 100.0 * (
1 - (self._conversionControl / 100.0)) / samples_control)
self._errorB = np.sqrt(
self._conversionB / 100.0 * (1 - (self._conversionB / 100.0)) / samples_B)
self._errorDifference = np.sqrt(self._errorControl ** 2 + self._errorB ** 2)
self._zScore = (self._conversionB - self._conversionControl) / (100.0 * self._errorDifference)
if np.sign(self._zScore) == -1:
self._pValue = 1 - scipy.stats.norm.sf(abs(self._zScore))
else:
self._pValue = scipy.stats.norm.sf(abs(self._zScore))
self._distControl = self.calDist(samples_control, conv_samples_control)
self._distB = self.calDist(samples_B, conv_samples_B)
sigma = np.sqrt(samples_control * self._conversionControl / 100.0 * (
1 - self._conversionControl / 100.0))
if (confidence == 90):
self._verticalLine = conv_samples_control + 1.645 * sigma
elif (confidence == 95):
self._verticalLine = conv_samples_control + 1.96 * sigma
elif (confidence == 99):
self._verticalLine = conv_samples_control + 2.575 * sigma
else:
raise ValueError("confidence value should either be 90, 95 or 99")
"""
This function calculated the (x,y) values for a line plot of the binomial distribution
Input: samples: Number of samples in the binomial experiment
conv_samples: Number of samples converted (could be from any metric), strictly less than conv_samples
return: distribution_x_axis: X axis points where probability mass is calculated
distribution_y_axis: Y axis points which represents the probability mass
"""
def calDist(self, samples, conv_samples):
probability = conv_samples / samples
sigma = np.sqrt(samples * probability * (1 - probability))
# Lets capture 3sigma variation with n=20 points to plot
n = 21
distribution_points = 8 * sigma / n
distribution_x_axis = (conv_samples - 4 * sigma + 8 * sigma / n) + distribution_points * \
np.arange(n)
distribution_x_axis = distribution_x_axis.astype(int)
distribution_y_axis = scipy.stats.binom.pmf(distribution_x_axis, samples, probability)
return distribution_x_axis, distribution_y_axis
"""
This function returns the confidence
"""
def calConfidence(self):
return (1 - self._pValue) * 100
"""
This function returns true if the confidence is more than the given value, false otherwise.
This is same as the top banner you see on the screen in: https://abtestguide.com/calc/
Input: value: Confidence value usually one of these three values: 90, 95, 99
Output: True or False
"""
def calSuccess(self, value):
if self.calConfidence() > value and self._uplift > 0:
return True
else:
return False
def ab_test(options, start_time, end_time, mode):
sc = None
if mode == RunModes.PYSPARK:
from pyspark import SparkContext
sc = SparkContext(appName="pm-ab-testing")
pm.init(sc)
elif mode == RunModes.PYTHON:
pm.init()
else:
raise Exception("Invalid mode " + mode)
not_enough_data = False
# Following are a and b component names
a_prediction_component_name = options.nodeA
b_prediction_component_name = options.nodeB
conv_a_stat_name = options.conversionsA
conv_b_stat_name = options.conversionsB
samples_a_stat_name = options.samplesA
samples_b_stat_name = options.samplesB
a_agent = utils._get_agent_id(a_prediction_component_name, options.agentA)
b_agent = utils._get_agent_id(b_prediction_component_name, options.agentB)
if a_agent is None or b_agent is None:
print("Invalid agent provided {} or {}".format(options.agentA, options.agentB))
pm.system_alert("PyException",
"Invalid Agent {} or {}".format(options.agentA, options.agentB))
return
try:
a_samples = pm.get_stats(name=samples_a_stat_name, mlapp_node=a_prediction_component_name,
agent=a_agent, start_time=start_time,
end_time=end_time)
b_samples = pm.get_stats(name=samples_b_stat_name, mlapp_node=b_prediction_component_name,
agent=b_agent, start_time=start_time,
end_time=end_time)
a_samples_pdf = | pd.DataFrame(a_samples) | pandas.DataFrame |
from .microfaune_package.microfaune.detection import RNNDetector
from .microfaune_package.microfaune import audio
import pandas as pd
import scipy.signal as scipy_signal
import numpy as np
import math
import os
def build_isolation_parameters(
technique,
threshold_type,
threshold_const,
threshold_min=0,
window_size=1.0,
chunk_size=2.0):
"""
Wrapper function for all of the audio isolation techniques (Steinberg,
Simple, Stack, Chunk). Will call the respective function of
each technique based on isolation_parameters "technique" key.
Args:
technique (string)
- Chooses which of the four isolation techniques to deploy
- options: "steinberg", "chunk", "stack", "simple"
threshold_type (string)
- Chooses how to derive a threshold from local score arrays
- options: "mean", "median", "standard deviation", "pure"
threshold_const (float)
- Multiplier for "mean", "median", and "standard deviation". Acts
as threshold for "pure"
threshold_min (float)
- Serves as a minimum barrier of entry for a local score to be
considered a positive ID of a class.
- default: 0
window_size (float)
- determines how many seconds around a positive ID local score
to build an annotation.
chunk_size (float)
- determines the length of annotation when using "chunk"
isolation technique
Returns:
isolation_parameters (dict)
- Python dictionary that controls how to go about isolating
automated labels from audio.
"""
isolation_parameters = {
"technique": technique,
"treshold_type": threshold_type,
"threshold_const": threshold_const,
"threshold_min": threshold_min,
"window_size": window_size,
"chunk_size": chunk_size
}
if window_size != 1.0 and technique != "steinberg":
print('''Warning: window_size is dedicated to the steinberg isolation
technique. Won't affect current technique.''')
if chunk_size != 2.0 and technique != "chunk":
print('''Warning: chunk_size is dedicated to the chunk technique.
Won't affect current technique.''')
return isolation_parameters
def isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird",
normalize_local_scores=False):
"""
Wrapper function for all of the audio isolation techniques (Steinberg,
Simple, Stack, Chunk). Will call the respective function of
each technique based on isolation_parameters "technique" key.
Args:
local_scores (list of floats)
- Local scores of the audio clip as determined by
Microfaune Recurrent Neural Network.
SIGNAL (list of ints)
- Samples that make up the audio signal.
SAMPLE_RATE (int)
- Sampling rate of the audio clip, usually 44100.
audio_dir (string)
- Directory of the audio clip.
filename (string)
- Name of the audio clip file.
isolation_parameters (dict)
- Python Dictionary that controls the various label creation
techniques.
Returns:
Dataframe of automated labels for the audio clip based on passed in
isolation technique.
"""
# normalize the local scores so that the max value is 1.
if normalize_local_scores:
local_scores_max = max(local_scores)
for ndx in range(len(local_scores)):
local_scores[ndx] = local_scores[ndx] / local_scores_max
# initializing the output dataframe that will contain labels across a
# single clip
isolation_df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import time
import pandas as pd
import sklearn.linear_model
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from pytz import timezone
import tweepy
import sys
import os
from pyowm.owm import OWM
from textblob import TextBlob
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
import pickle
import csv
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
TWITTER_ACCESS_TOKEN = '<KEY>'
TWITTER_ACCESS_TOKEN_SECRET = '<KEY>'
TWITTER_CONSUMER_KEY = 'OxzmAILFV6CYdWLuVBBFZTk4n'
TWITTER_CONSUMER_SECRET = '<KEY>'
APIKEY = '3a7810c48a0c9f7c3c38754e4415f7d7'
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
RUNTIME = 780 # Seconds to collect tweets
res = []
pathTweetStorage = 'airflow/project/tweet_storage.csv'
pathWeatherStorage = 'airflow/project/weather_storage.csv'
pathClassifier = 'airflow/project/classifier.csv'
pathLinearRegression = 'airflow/project/LinearRegression.sav'
pathRidgeRegression = 'airflow/project/RidgeRegression.sav'
pathGradientBoosting = 'airflow/project/GradientBoosting.sav'
pathSVR = 'airflow/project/SVR.sav'
pathRandomForest = 'airflow/project/RandomForest.sav'
pathAdaBoost = 'airflow/project/AdaBoost.sav'
pathColumns = 'airflow/project/columns.txt'
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'fred',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(seconds=30),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
# 'wait_for_downstream': False,
# 'dag': dag,
# 'sla': timedelta(hours=2),
# 'execution_timeout': timedelta(seconds=300),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'sla_miss_callback': yet_another_function,
# 'trigger_rule': 'all_success'
}
class StdOutListener(tweepy.Stream):
def on_status(self, status):
global res
try:
now = datetime.now(timezone('EST'))
tweetTime = f'{now.year}-{now.month}-{now.day}-{now.hour}-{now.minute}'
box = [v for v in status.place.bounding_box.coordinates[0]]
if status.lang == 'en':
res.append({'Date': tweetTime, 'Text': status.text, 'Box': box})
return True
except BaseException as e:
print("Error on _data: %s" % str(e))
return False
def on_error(self, status):
print(status)
def countdown(t):
for i in range(t, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds remaining...".format(i))
sys.stdout.flush()
time.sleep(1)
def atRemover(text):
words = text.split()
while words[0][0] == '@':
words.pop(0)
return ' '.join(words)
def analyzeSentiment(text):
sentimentAnalyzerVader = SIA().polarity_scores(text)['compound']
sentimentAnalyzerTextBlob = TextBlob(text).sentiment[0]
if sentimentAnalyzerTextBlob > 0 and sentimentAnalyzerVader > 0:
return 'Positive'
elif sentimentAnalyzerTextBlob < 0 and sentimentAnalyzerVader < 0:
return 'Negative'
return 'Cannot analyze'
def sumTuples(t1, t2):
a, b = t1
c, d = t2
return (a + c, b + d)
def collectTweets():
stream = StdOutListener(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
stream.filter(locations=[-74, 40, -73, 41], threaded=True)
countdown(RUNTIME)
stream.disconnect()
with open(pathTweetStorage, 'w', encoding='utf-8') as storage:
writer = csv.DictWriter(storage, fieldnames=['Date','Text','Box'], lineterminator = '\n')
writer.writeheader()
for data in res:
writer.writerow(data)
def processTweets():
drops = []
tweets = pd.read_csv(pathTweetStorage)
for tweet in range(tweets.shape[0]):
text = tweets.iloc[tweet, 1]
tweets.iloc[tweet, 1] = atRemover(text)
if len(text) < 4:
drops.append(tweet)
tweets = tweets.drop(drops)
tweets.to_csv(pathTweetStorage, encoding='UTF-8', index=False)
def collectWeather():
weatherList = []
owm = OWM(APIKEY)
mgr = owm.weather_manager()
for i in range(13):
now = datetime.now(timezone('EST'))
weatherTime = f'{now.year}-{now.month}-{now.day}-{now.hour}-{now.minute}'
weather = mgr.weather_at_place('New York').weather
weather_status = weather.status
temperature = weather.temperature('celsius')['temp']
weatherList.append({'Time': weatherTime, 'Status': weather_status, 'Temperature': temperature})
time.sleep(60)
with open(pathWeatherStorage, 'w', encoding='utf-8') as storage:
writer = csv.DictWriter(storage, fieldnames=['Time', 'Status', 'Temperature'], lineterminator = '\n')
writer.writeheader()
for data in weatherList:
writer.writerow(data)
def combineAndClassify():
tweets = pd.read_csv(pathTweetStorage)
weather = pd.read_csv(pathWeatherStorage)
weatherDate = [date for date in weather.iloc[:, 0]]
combined = []
for tweetInd in range(tweets.shape[0]):
tweetDate = tweets.iloc[tweetInd, 0]
try:
weatherInd = weatherDate.index(tweetDate)
sentiment = analyzeSentiment(tweets.iloc[tweetInd, 1])
if sentiment != 'Cannot analyze':
combined.append({'Date': tweetDate, 'Text': tweets.iloc[tweetInd, 1], 'Box': tweets.iloc[tweetInd, 2],
'Status': weather.iloc[weatherInd, 1], 'Temperature': weather.iloc[weatherInd, 2],
'Sentiment': sentiment})
except ValueError:
pass
combinedDF = pd.DataFrame.from_dict(combined)
if os.path.exists(pathClassifier):
combinedDF.to_csv(pathClassifier, mode='a', index=False, header=False)
else:
combinedDF.to_csv(pathClassifier, mode='a', index=False, header=True)
def regression():
data = | pd.read_csv(pathClassifier) | pandas.read_csv |
'''
ver 0.1, namera@ , initial-release, Oct26'17
ver 0.2, namera@ , included execution id for traceability, Nov3'17
ver 0.3, shawo@ , Corrected typos in variable names, Apr23'18
Hedge Your Own Funds: Running Monte Carlo Simulations on EC2 Spot
=================================================================
This worker script launches the Monte-Carlo simulations as described in the session's Jupiter Notebook.
All input parameters have defaults, please see 'parser.add_argument' for details or simply append -h at the end of the execution line to
see input parameter details.
e.g. python worker.py -h
Output:
-------
This script writes simualted results into CSV files, the files are:
<exec_id>_<Stock_Name>_MonteCarloSimResult.csv - for example, AMZN_MonteCarloSimResult.csv , this file holds the last Monte-Carlo simulated value
for each iteration and the expected cache value given the trading strategy specified in the notebook. Initial investment is $100,000
portfolioRiskAssessment.csv - returns the risk value of multiple-socks portfolio. see --portfolio_stocks_list input paramter for more detai$
Sample executions:
------------------
Run simulation with default parameters:
python worker.py
Specify a list of stocks for Portfolio Risk Assessment:
python worker.py --stocks-list IBM AMZN MSFT
Specify 1,000,000 simulations to execute:
python worker.py --iterations 1000000
'''
import pandas as pd
from pandas_datareader import data as pdr
import numpy as np
import datetime , time
from math import sqrt
from scipy.stats import norm
import fix_yahoo_finance as yf
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--iterations', dest='iterations',default=2000, type=int,
help='Number of simulated iterations default=2000')
parser.add_argument('--stock', dest='stock',default="AMZN",
help='Stock Name')
parser.add_argument('--short_window_days', dest='short_window_days',default=10, type=int,
help='Short moving avearge in days default=10')
parser.add_argument('--long_window_days', dest='long_window_days',default=40, type=int,
help='Long moving avearge in days (default=40)')
parser.add_argument('--trading_days', dest='trading_days',default=252, type=int,
help='Number of trading days (default=252)')
parser.add_argument('-n', '--stocks-list', default=['AAPL','AMZN','MSFT','INTC'], nargs='+')
parser.add_argument('--id', dest='exec_id',default="None",
help='Unique execution id')
args = parser.parse_args()
STOCK=args.stock
short_window = args.short_window_days
long_window = args.long_window_days
trading_days = args.trading_days
sim_num = args.iterations
portfolio_stocks_list = args.stocks_list
file_prepend_str = args.exec_id
# create output files (CSV) unique string to preapend
if (file_prepend_str == 'None'):
t = time.localtime()
file_prepend_str = time.strftime('%b-%d-%Y_%H%M', t)
# Import stock information to dataframe. ADDED 04/2018 - Fix for yahoo finance
yf.pdr_override()
stock_df = pdr.get_data_yahoo(STOCK,start=datetime.datetime(2006, 10, 1), end=datetime.datetime(2017, 10, 1))
# Calculate the compound annual growth rate (CAGR) which
# will give us our mean return input (mu)
days = (stock_df.index[-1] - stock_df.index[0]).days
cagr = ((((stock_df['Adj Close'][-1]) / stock_df['Adj Close'][1])) ** (365.0/days)) - 1
mu = cagr
# create a series of percentage returns and calculate
# the annual volatility of returns. Generally, the higher the volatility,
# the riskier the investment in that stock, which results in investing in one over another.
stock_df['Returns'] = stock_df['Adj Close'].pct_change()
vol = stock_df['Returns'].std()*sqrt(252)
# Set the initial capital
initial_capital= float(100000.0)
# Set up empty list to hold our ending values for each simulated price series
sim_result = []
# Set up empty list to hold portfolio value for each simulated price serries, this is the value of position['total']
portfolio_total = []
# Define Variables
start_price = stock_df['Adj Close'][-1] #starting stock price (i.e. last available real stock price)
# Initialize the `signals` DataFrame
signals = pd.DataFrame()
# Initialize by setting the value for all rows in this column to 0.0.
signals['signal'] = 0.0
signals['short_mavg'] = 0.0
# Create a DataFrame `positions`
positions = pd.DataFrame(index=signals.index).fillna(0.0)
# Choose number of runs to simulate - I have chosen 1,000
for i in range(sim_num):
# create list of daily returns using random normal distribution
daily_returns=np.random.normal(mu/trading_days,vol/sqrt(trading_days),trading_days)+1
# Set starting price and create price series generated by above random daily returns
price_list = [start_price]
for x in daily_returns:
price_list.append(price_list[-1]*x)
# Convert list to Pandas DataFrame
price_list_df = pd.DataFrame(price_list)
# Append the ending value of each simulated run to the empty list we created at the beginning
sim_result.append(price_list[-1])
# Create short simple moving average over the short & long window
signals['short_mavg'] = price_list_df[0].rolling(short_window).mean()
signals['long_mavg'] = price_list_df[0].rolling(long_window).mean()
# Create a signal when the short moving average crosses the long moving average,
# but only for the period greater than the shortest moving average window.
signals['signal'][short_window:] = np.where(signals['short_mavg'][short_window:]
> signals['long_mavg'][short_window:], 1.0, 0.0)
# Generate trading orders
signals['positions'] = signals['signal'].diff()
# Buy 100 shares
positions[STOCK] = 100*signals['signal']
# Initialize the portfolio with value owned
portfolio = positions.multiply(price_list_df[0], axis=0)
# Store the difference in shares owned
pos_diff = positions.diff()
# Add `holdings` to portfolio
portfolio['holdings'] = (positions.multiply(price_list_df[0], axis=0)).sum(axis=1)
# Add `cash` to portfolio
portfolio['cash'] = initial_capital - (pos_diff.multiply(price_list_df[0], axis=0)).sum(axis=1).cumsum()
# Add `total` to portfolio
portfolio['total'] = portfolio['cash'] + portfolio['holdings']
# Append the ending value of each simulated run to the empty list we created at the beginning
portfolio_total.append(portfolio['total'].iloc[-1])
# Simulation Results
# print sim_result
df1 = pd.DataFrame(sim_result, columns=["MonteCarloResults"])
df1.to_csv(file_prepend_str + "_" + STOCK + "_sim_results.csv")
# Portfolio Total
# Print portfolio_total
df2 = pd.DataFrame(portfolio_total, columns=["portfolioTotal"])
df2.to_csv(file_prepend_str + "_" + STOCK + "_portfolio_total.csv")
# Create one data frame and write to file.
result = pd.concat([df1, df2], axis=1, join_axes=[df1.index])
result.to_csv(file_prepend_str + "_" + STOCK + "_MonteCarloSimResult.csv")
## portfolio Risk Assessment Section
#list of stocks in portfolio
#defaults are: STOCKS = ['AAPL','AMZN','MSFT','INTC']
portfolio_data = pdr.get_data_yahoo(portfolio_stocks_list,
start=datetime.datetime(2015, 1, 1),
end=datetime.datetime(2017, 1, 1))['Adj Close']
#convert daily stock prices into daily returns
returns = portfolio_data.pct_change()
#calculate mean daily return and covariance of daily returns
mean_daily_returns = returns.mean()
cov_matrix = returns.cov()
#set up array to hold results
results = np.zeros((3,sim_num))
# run the sumulator
for i in range(sim_num):
#select random weights for portfolio holdings
weights = np.random.random(len(portfolio_stocks_list))
#rebalance weights to sum to 1
weights /= np.sum(weights)
#calculate portfolio return and volatility
portfolio_return = np.sum(mean_daily_returns * weights) * 252
portfolio_std_dev = np.sqrt(np.dot(weights.T,np.dot(cov_matrix, weights))) * np.sqrt(252)
#store results in results array
results[0,i] = portfolio_return
results[1,i] = portfolio_std_dev
#store Sharpe Ratio (return / volatility) - risk free rate element excluded for simplicity
results[2,i] = results[0,i] / results[1,i]
#convert results array to Pandas DataFrame
results_frame = | pd.DataFrame(results.T,columns=['ret','stdev','sharpe']) | pandas.DataFrame |
"""
Dash callbacks
"""
# Third party imports
from boto3.dynamodb.conditions import Key
from dash.dependencies import Input, Output, State
from dash_table.Format import Format
import plotly.graph_objs as go
from flask import current_app
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import t
# Local application imports
# from app.users import User
from app.extensions import dynamo
from app.sal.utils.colors import chart_colors
from app.sal.utils.styling import axes
from app.sal.layouts.filters import ALL_DEPT_DROPDOWN_OPTIONS
def adjust_for_sal_cutoff_column(columns, mode='remove'):
"""Adjusts list of column definitions based on the mode argument
Args:
columns (list[dict]): list of column definitions
mode (str 'remove'|'add'): what to do with the salary cutoff column
Returns:
list[dict]: Modified list of column definitions
"""
salary_cutoff_column = {'name': 'Salary Cutoff, $', 'id': 'salary_cutoff', 'type': 'numeric', 'format': Format().group(True)}
is_present = False
index = 0
for col in columns:
if col['id'] == 'salary_cutoff':
is_present = True
break
else:
index += 1
if mode == 'add':
if is_present:
return columns
else:
columns.append(salary_cutoff_column)
return columns
if mode == 'remove':
if is_present:
columns.pop(index)
return columns
else:
return columns
CUTOFF_VALUES = {
'quartile': 0.25,
'quintile': 0.20,
'third': 0.33,
}
def register_sal_callbacks(dashapp):
table = dynamo.tables[current_app.config['DB_SAL']]
# Compression toggle button
@dashapp.callback(Output('compression-options-container', 'style'),
[Input('compression-analysis-toggle', 'value')])
def toggle_compression_options_display(value):
"""Controls whether or not compression analysis options are displayed
Args:
value (bool): Value of the compression-analysis-toggle element (True if checked)
Returns:
dict: Dictionary describing the style of the compression-options-container element.
Contains a value for `display`
"""
if value:
return {'display': 'flex'}
else:
return {'display': 'none'}
# TABLE
@dashapp.callback([Output('sal-table', 'data'),
Output('sal-table', 'columns')],
[Input('timestamp-dropdown', 'value'),
Input('dept-dropdown', 'value'),
Input('compression-analysis-toggle', 'value'),
Input('rank-option', 'value'),
Input('salary-cutoff-option', 'value'),
Input('ten-stat-filter', 'value')],
State('sal-table', 'columns'))
def update_table(timestamp, dept, compression_analysis_enabled,
rank_option, salary_cutoff_option,
tenure_status_selection, table_columns):
"""Callback that controls table rendering
The line below is needed in order to get data for only one timestamp
Key('SK').between(f'DATA#SALARY#{timestamp}', f'DATA#SALARY#{timestamp}@'
Args:
timestamp (str): Selected timestamp
dept (str): Selected department
compression_analysis_enabled (bool): Value of the compression-analysis-toggle element (True if checked)
rank_option (str): Selected rank option
salary_cutoff_option (str): Selected salary cutoff option
tenure_status_selection (list[str]): Selected tenure statuses
table_columns (list[dict]): List of column definitions of Dash's DataTable
Returns:
list[dict]: Data to render
list[dict]: Definition of table's columns
"""
if dept == 'AS':
# If all A&S is selected, load every department (not efficient)
data = []
for item in ALL_DEPT_DROPDOWN_OPTIONS:
resp = table.query(
KeyConditionExpression=Key('PK').eq(f"DEPT#{item['value']}") & Key('SK').between(f'DATA#SALARY#{timestamp}', f'DATA#SALARY#{timestamp}@'),
)
data.extend(resp['Items'])
if compression_analysis_enabled:
# If compression analysis is enabled, then only display people who match the conditions
df = | pd.DataFrame(data) | pandas.DataFrame |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepcopy(self):
from copy import deepcopy
copy = deepcopy(self.strIndex)
self.assert_(copy is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals( | Index(['a', 'b']) | pandas.core.index.Index |
from backtester.executionSystem.base_execution_system import BaseExecutionSystem, InstrumentExection
from backtester.logger import *
import numpy as np
import pandas as pd
class SimpleExecutionSystem(BaseExecutionSystem):
def __init__(self, enter_threshold=0.7, exit_threshold=0.55, longLimit=10,
shortLimit=10, capitalUsageLimit=0, enterlotSize=1, exitlotSize = 1, limitType='L', price='close'):
self.enter_threshold = enter_threshold
self.exit_threshold = exit_threshold
self.longLimit = longLimit
self.shortLimit = shortLimit
self.capitalUsageLimit = capitalUsageLimit
self.enterlotSize = enterlotSize
self.exitlotSize = exitlotSize
self.limitType = limitType
self.priceFeature = price
def getPriceSeries(self, instrumentsManager):
instrumentLookbackData = instrumentsManager.getLookbackInstrumentFeatures()
try:
price = instrumentLookbackData.getFeatureDf(self.priceFeature).iloc[-1]
return price
except KeyError:
logError('You have specified Dollar Limit but Price Feature Key %s does not exist'%self.priceFeature)
def getLongLimit(self, instrumentIds, price):
if isinstance(self.longLimit, pd.DataFrame):
return self.convertLimit(self.longLimit, price)
if isinstance(self.longLimit, dict):
longLimitDf = pd.Series(self.longLimit)
return self.convertLimit(longLimitDf, price)
else:
return self.convertLimit(pd.Series(self.longLimit, index=instrumentIds), price)
def getShortLimit(self, instrumentIds, price):
if isinstance(self.shortLimit, pd.DataFrame):
return self.convertLimit(self.shortLimit, price)
if isinstance(self.shortLimit, dict):
shortLimitDf = pd.Series(self.shortLimit)
return self.convertLimit(shortLimitDf, price)
else:
return self.convertLimit(pd.Series(self.shortLimit, index=instrumentIds), price)
def getEnterLotSize(self, instrumentIds, price):
if isinstance(self.enterlotSize, pd.DataFrame):
return self.convertLimit(self.lotSize, price)
if isinstance(self.enterlotSize, dict):
lotSizeDf = pd.Series(self.enterlotSize)
return self.convertLimit(lotSizeDf, price)
else:
return self.convertLimit(pd.Series(self.enterlotSize, index=instrumentIds), price)
def getExitLotSize(self, instrumentIds, price):
if isinstance(self.exitlotSize, pd.DataFrame):
return self.convertLimit(self.lotSize, price)
if isinstance(self.exitlotSize, dict):
lotSizeDf = pd.Series(self.exitlotSize)
return self.convertLimit(lotSizeDf, price)
else:
return self.convertLimit(pd.Series(self.exitlotSize, index=instrumentIds), price)
def convertLimit(self, df, price):
if self.limitType == 'L':
return df
else:
try:
return np.floor(df / price)
except KeyError:
logError('You have specified Dollar Limit but Price Feature Key does not exist')
def getInstrumentExecutionsFromExecutions(self, time, executions):
instrumentExecutions = []
for (instrumentId, position) in executions.iteritems():
if position == 0:
continue
instExecution = InstrumentExection(time=time,
instrumentId=instrumentId,
volume=np.abs(position),
executionType=np.sign(position))
instrumentExecutions.append(instExecution)
return instrumentExecutions
def getExecutions(self, time, instrumentsManager, capital):
instrumentLookbackData = instrumentsManager.getLookbackInstrumentFeatures()
currentPredictions = instrumentLookbackData.getFeatureDf('prediction').iloc[-1]
executions = self.exitPosition(time, instrumentsManager, currentPredictions)
executions += self.enterPosition(time, instrumentsManager, currentPredictions, capital)
# executions is a series with stocknames as index and positions to execute as column (-10 means sell 10)
return self.getInstrumentExecutionsFromExecutions(time, executions)
def getExecutionsAtClose(self, time, instrumentsManager):
instrumentExecutions = []
instruments = instrumentsManager.getAllInstrumentsByInstrumentId().values()
for instrument in instruments:
position = instrument.getCurrentPosition()
if position == 0:
continue
instrumentExec = InstrumentExection(time=time,
instrumentId=instrument.getInstrumentId(),
volume=np.abs(position),
executionType=-np.sign(position))
instrumentExecutions.append(instrumentExec)
return instrumentExecutions
def exitPosition(self, time, instrumentsManager, currentPredictions, closeAllPositions=False):
instrumentLookbackData = instrumentsManager.getLookbackInstrumentFeatures()
positionData = instrumentLookbackData.getFeatureDf('position')
position = positionData.iloc[-1]
price = self.getPriceSeries(instrumentsManager)
executions = pd.Series([0] * len(positionData.columns), index=positionData.columns)
if closeAllPositions:
executions = -position
return executions
executions[self.exitCondition(currentPredictions, instrumentsManager)] = -np.sign(position)*\
np.minimum(self.getExitLotSize(positionData.columns, price) , np.abs(position))
executions[self.hackCondition(currentPredictions, instrumentsManager)] = -np.sign(position)*\
np.minimum(self.getExitLotSize(positionData.columns, price) , np.abs(position))
return executions
def enterPosition(self, time, instrumentsManager, currentPredictions, capital):
instrumentLookbackData = instrumentsManager.getLookbackInstrumentFeatures()
positionData = instrumentLookbackData.getFeatureDf('position')
position = positionData.iloc[-1]
price = self.getPriceSeries(instrumentsManager)
executions = pd.Series([0] * len(positionData.columns), index=positionData.columns)
executions[self.enterCondition(currentPredictions, instrumentsManager)] = \
self.getEnterLotSize(positionData.columns, price) * self.getBuySell(currentPredictions, instrumentsManager)
# No executions if at position limit
executions[self.atPositionLimit(capital, positionData, price)] = 0
return executions
def getBuySell(self, currentPredictions, instrumentsManager):
return np.sign(currentPredictions - 0.5)
def enterCondition(self, currentPredictions, instrumentsManager):
return (currentPredictions - 0.5).abs() > (self.enter_threshold - 0.5)
def atPositionLimit(self, capital, positionData, price):
if capital <= self.capitalUsageLimit:
logWarn('Not Enough Capital')
return | pd.Series(True, index=positionData.columns) | pandas.Series |
# Copyright (C) 2020 University of Oxford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import requests
import pandas as pd
import numpy as np
from utils.fetcher.base_government_response import BaseGovernmentResponseFetcher
from .utils import parser, to_int
from datetime import datetime, timedelta
__all__ = ('StringencyFetcher',)
logger = logging.getLogger(__name__)
class StringencyFetcher(BaseGovernmentResponseFetcher):
LOAD_PLUGIN = True
SOURCE = 'GOVTRACK'
def fetch(self):
# First intensive care hospitalisation on 2020-01-01
if self.sliding_window_days:
date_from = (datetime.now() - timedelta(days=self.sliding_window_days)).strftime('%Y-%m-%d')
else:
date_from = '2020-01-01'
date_to = datetime.today().strftime('%Y-%m-%d')
api_data = requests.get(
f'https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/date-range/{date_from}/{date_to}').json()
return parser(api_data, self.country_codes_translator)
def fetch_details(self, country_code, date_value):
api_details = requests.get(
f'https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/actions/{country_code}/{date_value}'
).json()
if "stringencyData" in api_details:
api_details.pop("stringencyData")
return api_details
def fetch_csv(self):
df = pd.read_csv(
f'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv')
df = df.replace({np.nan: None})
df = df.merge(self.country_codes_translator.translation_pd, right_on='Alpha-3 code', left_on='CountryCode',
how='left')
return df
def run(self):
# RAW Govtrack data
raw_govtrack_data = self.fetch_csv()
for index, record in raw_govtrack_data.iterrows():
if | pd.isna(record['English short name lower case']) | pandas.isna |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Classes & functions related to postgreSQL databases
"""
from contextlib import contextmanager
from functools import partial
from operator import itemgetter
from typing import Optional, Union, Tuple, List, Any, Sequence
import numpy as np
import pandas as pd
import pandas.io.sql
import psycopg2
from psycopg2 import sql
from psycopg2.extras import execute_values
from psycopg2.extensions import register_adapter, AsIs
from ..utils.log_utl import monitor, create_logger
from ..utils.os_utl import check_types, check_options, NoneType
from ._core import BaseActorSQL, join_schema_to_table_name, parse_schema_table
register_adapter(np.int64, AsIs)
FLAVOR = 'PostGreSQL'
LOGGER = create_logger(__name__, on_conflict='ignore', fmt=f'[%(asctime)s | {FLAVOR} | %(levelname)s] %(message)s',
logger_level=20)
MONITOR_LOGGER = LOGGER.getChild('monitor')
MONITOR_LOGGER.setLevel(30)
# =========================================================================================================
# ================================ 1. OTHER
# To retrieve data types in PostgreSQL
"""SELECT typname, typelem FROM pg_type
WHERE typname LIKE '_int%';"""
# PostgreSQL type equivalent with pandas
TYPE_ALIASES = {
'object': 'text',
'string': 'text',
'int64': 'bigint',
'int32': 'integer',
'int16': 'integer',
'int8': 'smallint',
'float': 'float',
'float64': 'real',
'float32': 'real',
'float16': 'real',
'float8': 'real',
'bool': 'boolean',
'datetime64[ns]': 'timestamp',
}
# GLOBAL VARIABLES BELOW MAYBE CAN BE DELETED
TYPE_ALIASES_REVERSED = {
16: 'bool',
21: 'float32', # int16 but do not support NA so then pd.to_numeric
20: 'float64', # int64 but do not support NA so then pd.to_numeric
23: 'float32', # int32 but do not support NA so then pd.to_numeric
25: 'object',
700: 'float32',
701: 'float32',
1043: 'object',
1114: 'object', # datetime64 so then parse by pd.read_csv
1700: 'float64'
}
DATES_OID = [1114]
DATE_PARSER = pd.to_datetime
# =========================================================================================================
# ================================ 2. CLASSES
# noinspection PyUnusedLocal,PyTypeChecker
class PostgreSQLManager(BaseActorSQL):
"""This class provides methods to interact with a PostgreSQL Database
Args:
user: User identification in the database
password: <PASSWORD>
db_name: Name of the database to connect to
host: Address to the database
port: Port to connect through
kwargs: Additional keyword arguments to pass to BaseActor class ("schema")
Examples:
>>> db = PostgreSQLManager('user', 'password', '<PASSWORD>', 'localhost', '5432', 'some_schema')
[2021-05-20 12:59:11 | PostGreSQL | INFO] Connection opened to test
[2021-05-20 12:59:12 | PostGreSQL | INFO] Active schema changed to some_schema.
"""
_flavor = FLAVOR
_unsafe_symbols = (';', '--') # define symbols to be checked on arguments that can't be parsed by sql components.
_cursor_manager_exceptions = (Exception, KeyboardInterrupt, psycopg2.Error)
def __init__(self, user: str, password: str, host: str, port: Union[str, int] = '5432',
database: Optional[str] = None, **kwargs) -> None:
self.host, self.port = host, port
super().__init__(database, user, password, logger=LOGGER, **kwargs)
self.refresh()
@parse_schema_table
def analyse(self, table_name: str, schema: Optional[str] = None) -> None:
"""Performs an analysis of a table. This computes statistics such as approximate count of rows
Args:
table_name: Table to be analysed
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
"""
stmt = "ANALYZE {schema}.{table_name}"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name))
self.execute(sql.SQL(stmt).format(**params), log=f'Table {schema}.{table_name} analysed.')
def execute(self, stmt: str, params: Optional[Tuple[Any]] = None, *, return_cursor_metadata: bool = False,
log: Optional[str] = None) -> Optional[Union[bool, str, psycopg2.extensions.cursor]]:
return super().execute(stmt, params, return_cursor_metadata=return_cursor_metadata, log=log)
def get_sql(self, stmt: str, params: Optional[Tuple[Any]] = None, **kwargs) -> str:
"""Generates the sql statement with the params passed. This can be used to simulate the statements before
sending them.
Args:
stmt: Sql statement to apply params to
params: Params to insert in the sql statement
kwargs: For compatibility
Returns:
String: Sql statement with params applied to
"""
with self.cursor_manager() as cur:
return cur.mogrify(stmt, params).decode()
@check_types(pid=(int, list, tuple))
def kill_process(self, pid: Union[int, List[int], Tuple[int]], *, force: bool = False) -> bool:
"""Terminate a process in the database.
Args:
pid: Id of the process to terminate
force: whether to "cancel" the process softly or "terminate" forcefully.
Returns:
Boolean: True if successful
"""
if isinstance(pid, (list, tuple)):
for pid_ in pid:
self.kill_process(pid_, force=force)
elif isinstance(pid, int):
return self.execute(f"SELECT pg_{'terminate' if force else 'cancel'}_backend({pid});",
log=f'Process {pid} was terminated successfully.')
@check_options(state=('active', 'idle', None))
def get_transactions(self, state: Optional[str] = None) -> pd.DataFrame:
"""List transactions in the database.
Args:
state: Filter transactions listed by their "state". Allowed: ('active', 'idle', None).
Returns:
pd.DataFrame: Table with contents of pg_stat_activity according to the filter applied.
"""
stmt = "SELECT * FROM pg_stat_activity"
params = dict()
if state is not None:
stmt += " WHERE state = {state}"
params['state'] = sql.Literal(state)
return self.query(sql.SQL(stmt).format(**params))
def refresh(self) -> None:
"""Refresh the names of the existing tables.
Returns:
None
"""
stmt = "SELECT DISTINCT table_schema, table_name FROM information_schema.tables " \
"WHERE table_schema !~ 'pg_catalog|information_schema'"
schemas_and_tables = self.query(stmt)
self._tables = list(schemas_and_tables['table_schema'].str.cat(schemas_and_tables['table_name'], sep='.'))
@check_types(columns=(str, list, dict), not_null=(bool, list))
@parse_schema_table
def add_columns(self, table_name: str, columns: Union[str, list, dict], not_null: Union[bool, List[bool]] = False,
schema: Optional[str] = None) -> None:
"""Add columns to a table
Args:
table_name: Table to add columns to
columns: column_name or list of column_names or dict of {column_name: column_type} to be created
not_null: Whether the created columns can have Null values or not. If a single boolean passed it will be
applied to all columns. Alternatively it can be passed a list of booleans with specification per column.
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
"""
# Preparation of inputs and sanity checks
if isinstance(columns, str):
columns = [columns]
if isinstance(columns, list):
self.logger.warning('Adding columns without specifying their type.\nThe "text" format will be applied '
'which can lead to low performance.')
columns = dict(zip(columns, ['text']*len(columns)))
if isinstance(not_null, bool):
not_null = [not_null]*len(columns)
elif isinstance(not_null, list):
assert len(not_null) == len(columns), "If explicitly passing 'NOT NULL' entries for each column, then " \
"'not null' list needs to be the same size of columns."
# Preparation of statement and parameters
stmt = "ALTER TABLE {schema}.{table_name}"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name))
for i, ((c, t), nn) in enumerate(zip(columns.items(), not_null)):
self._check_safety(t, 'types')
stmt += f" ADD COLUMN {{c{i}}} {TYPE_ALIASES.get(t.lower(), t)}"
params[f'c{i}'] = sql.Identifier(c)
if nn:
stmt += " NOT NULL"
stmt += ','
else:
stmt = stmt[:-1]
self.execute(sql.SQL(stmt).format(**params),
log=f'Columns {tuple(columns)} added successfully to {schema}.{table_name}.')
@monitor(mode='time', logger=MONITOR_LOGGER)
@check_types(columns=dict)
@parse_schema_table
def alter_columns(self, table_name: str, columns: dict, using: Optional[Union[str, List[str]]] = None,
schema: Optional[str] = None) -> None:
"""Alter column types in a table
Args:
table_name: Table to alter columns on
columns: dictionary of format {column_name: column_type}.
using: Optional. expression or list of expressions to use to convert. If list, needs to be the same size
as "columns". Expressions cannot contain "unsafe_symbols" such as ";" or "--" to avoid injection.
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
"""
# Preparation of inputs and sanity checks
if isinstance(using, str) or using is None:
using = [using]*len(columns)
assert len(using) == len(columns), 'Size of "using" needs to be the same as "columns".'
# Preparation of statement and parameters
stmt = "ALTER TABLE {schema}.{table_name}"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name))
for i, ((c, t), u) in enumerate(zip(columns.items(), using)):
self._check_safety(t, 'types')
stmt += f" ALTER COLUMN {{c{i}}} TYPE {t}"
params[f'c{i}'] = sql.Identifier(c)
if u is not None:
self._check_safety(u, 'using')
stmt += f" USING {{c{i}}}::{u}"
stmt += ','
else:
stmt = stmt[:-1]
self.execute(sql.SQL(stmt).format(**params),
log=f'Columns {columns} of table {schema}.{table_name} changed successfully.')
@check_types(columns=(str, list, tuple), cascade=(bool, list))
@parse_schema_table
def drop_columns(self, table_name: str, columns: Union[str, list, tuple], cascade: Union[bool, List[bool]] = False,
if_exists: bool = True, schema: Optional[str] = None) -> None:
"""Drops columns from table
Args:
table_name: Table to drop columns from
columns: column_name or list of column_names to drop
cascade: boolean or list of booleans to control whether dependent objects are dropped or an error is raised.
If a list is passed it needs to be of the same length of "columns". If a single boolean is passed it
will be applied to all columns
if_exists: Whether to raise an error or not if the column doesn't exist
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
"""
if isinstance(columns, str):
columns = [columns]
if isinstance(cascade, bool):
cascade = [cascade]*len(columns)
elif isinstance(cascade, list):
assert len(cascade) == len(columns), "If explicitly passing 'cascade' entries for each column, then " \
"'cascade' list needs to be the same size of columns."
stmt = "ALTER TABLE {schema}.{table_name}"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name))
for i, (col, cas) in enumerate(zip(columns, cascade)):
stmt += " DROP COLUMN" + " IF EXISTS"*if_exists + f" {{c{i}}}" + " CASCADE"*cas + ','
params[f'c{i}'] = sql.Identifier(col)
else:
stmt = stmt[:-1]
self.execute(sql.SQL(stmt).format(**params),
log=f'Dropped columns {tuple(columns)} from "{schema}.{table_name}" successfully.')
@parse_schema_table
def rename_column(self, table_name: str, old_column_name: str, new_column_name: str,
schema: Optional[str] = None) -> None:
"""Rename a column in a table
Args:
table_name: Table that contains the column to be renamed
old_column_name: Original column name
new_column_name: New column name
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
"""
stmt = "ALTER TABLE {schema}.{table_name} RENAME {old_column_name} TO {new_column_name};"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name),
old_column_name=sql.Identifier(old_column_name), new_column_name=sql.Identifier(new_column_name))
self.execute(sql.SQL(stmt).format(**params), log=f'Column "{old_column_name}" of table "{schema}.{table_name}" '
f'renamed to "{new_column_name}" successfully.')
@monitor(mode='time', logger=MONITOR_LOGGER)
@check_types(columns=(str, list), unique=bool)
@check_options(on_conflict=('raise', 'drop'))
@parse_schema_table
def create_index(self, table_name: str, columns: Union[str, list], index_name: Optional[str] = None,
unique: bool = False, on_conflict: str = 'raise', schema: Optional[str] = None) -> None:
"""Creates an index on a table on the given columns
Args:
table_name: table to create the index one
columns: which columns to use for the index
index_name: Optional. name to use for index. If none is passed the name will be generated automatically
unique: whether the index should have unique entries
on_conflict: Whether to raise an error or drop the index if an index with the same name exists in the schema
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
Examples:
>>> db = PostgreSQLManager(...)
>>> db.create_index('test_table', columns=['a', 'b'], unique=True)
[2021-05-20 12:59:11 | PostGreSQL | INFO] Index idx_public_test_table_a_b successfully created.
"""
if isinstance(columns, str):
columns = [columns]
# Sorting to ensure consistency in the index whatever the order of given columns
# define index name
columns = sorted([c.replace(' ', '_') for c in columns])
index_name = index_name or f"{schema}_{table_name}_{'_'.join(columns)}_idx"
# check whether the index exists and if should be dropped or not
if not self.get_index(index_name, schema).empty and on_conflict == 'raise':
raise IndexError(f'Index with name {schema}.{index_name} already exists. Check if you need to create '
f'this index.\nIf you wish to drop it and recreate pass on_conflict=="drop".')
# Drop if already exists with same name and creates
self.drop_index(f'{schema}.{index_name}')
stmt = "CREATE" + " UNIQUE"*unique + " INDEX {index_name} ON {schema}.{table_name} ({columns});"
params = dict(index_name=sql.Identifier(index_name), table_name=sql.Identifier(table_name),
schema=sql.Identifier(schema), columns=sql.SQL(",").join(map(sql.Identifier, columns)))
self.execute(sql.SQL(stmt).format(**params), log=f'Index {index_name} successfully created.')
def drop_index(self, index_name: str, *, cascade: bool = False, return_query: bool = False) -> Optional[str]:
"""Drops an existing index by its name.
Args:
index_name: index name to drop
cascade: Automatically drop objects that depend on the index.
return_query: whether to return the original query used to create the index being dropped.
Returns:
Optional[String]: Query used to create index being dropped or empty string
Examples:
>>> db = PostgreSQLManager(...)
>>> db.drop_index('idx_public_test_index_col_a', return_query=True)
'CREATE INDEX idx_public_test_index_col_a ON public.test_index (col_a)'
"""
# parse idx_name (can be passed with schema)
schema = None
if '.' in index_name:
schema, index_name = index_name.split('.', 1)
# check if the idx_exists
idx = self.get_index(index_name, schema)
if idx.empty:
return
# if there is no schema passed get it from the table
if schema is None:
if idx.shape[0] > 1:
raise IndexError(f'There are multiple indexes with name {index_name}. Please pass explicitly the '
f'schema in the index_name as "<schema>.<index_name>".')
schema = idx.iloc[0]['schemaname']
stmt = "DROP INDEX IF EXISTS {schema}.{index_name}" + " CASCADE" * cascade
self.execute(sql.SQL(stmt).format(index_name=sql.SQL(index_name), schema=sql.Identifier(schema)),
log=f'Index {index_name} successfully dropped.')
if return_query:
return idx[idx['schemaname'] == schema]['indexdef'].iloc[0]
def get_index(self, idx_name: str, schema: Optional[str] = None) -> pd.DataFrame:
"""Retrieve information for a specific index
Args:
idx_name: Name of the index to retrieve info for
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
pd.DataFrame: Table with information. Typically it should have a single row
"""
idxs = self.get_indexes(schema=schema)
return idxs[idxs.indexname == idx_name]
def get_indexes(self, table_name: Optional[str] = None, schema: Optional[str] = None) -> pd.DataFrame:
"""List all indexes. Might be filtered by schema/table
Args:
table_name: name of the table if filtering by table
schema: name of the schema if filtering by schema
Returns:
pd.DataFrame: table with the indexes information filtered according to the parameters passed
"""
if table_name is not None and '.' in table_name:
if schema is not None:
self.logger.warning('Schema passed in "table_name". The "schema" argument will be ignored.')
schema, table_name = table_name.split('.')
stmt = "SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'"
params = dict()
if schema is not None:
stmt += " AND schemaname = {schema}"
params['schema'] = sql.Literal(schema)
if table_name is not None:
stmt += " AND tablename = {table_name}"
params['table_name'] = sql.Literal(table_name)
return self.query(sql.SQL(stmt).format(**params))
def get_indexes_columns(self, index_name: Optional[Union[str, list]] = None, table_name: Optional[str] = None,
schema: Optional[str] = None) -> pd.Series:
"""Get columns for indexes in a table, schema or specific index_name
Args:
index_name: Specific index_name string or list of index_names
table_name: Tables to search the indexes on
schema: Schema to search the indexes on
Returns:
pd.Series: index=index_name, values=lists of columns
"""
idxs = self.get_indexes(table_name, schema=schema)
if index_name is not None:
if isinstance(index_name, str):
index_name = [index_name]
idxs = idxs[idxs['indexname'].isin(index_name)]
idxs['cols'] = idxs['indexdef'].str.extract(r'.*\((.*)\)')
idxs['cols'] = idxs['cols'].str.split(', ').apply(sorted)
idxs.set_index('indexname', inplace=True)
return idxs['cols']
def create_schema(self, schema_name: str, if_not_exists: bool = True) -> None:
"""Creates a new schema in the database
Args:
schema_name: Name for the new schema
if_not_exists: If False will raise an error if the a schema with the same name already exists.
Returns:
None
Examples:
>>> db = PostgreSQLManager(...)
>>> db.create_schema('test_schema')
[2021-05-20 12:59:11 | PostGreSQL | INFO] Schema test_schema successfully created in <name> database.
"""
stmt = "CREATE SCHEMA" + " IF NOT EXISTS"*if_not_exists + " {schema_name}"
self.execute(sql.SQL(stmt).format(schema_name=sql.Identifier(schema_name)),
log=f'Schema {schema_name} successfully created in {self.name} database.')
def drop_schema(self, schema_name: Union[str, list], cascade: bool = False, if_exists: bool = True) -> None:
"""Drops an entire schema. If "cascade" is True it will drop any table inside of that schema. Allows multiple
schemas to be dropped at same time by passing a list of schema names. If the active_schema is dropped it will
reset the instance "active_schema" to "public".
Args:
schema_name: name of the schema to drop
cascade: Whether to drop everything inside of the schema or to raise an error if any table is still in the
schema.
if_exists: Whether to ignore or raise an error if the schema doesn't exist.
Returns:
None
"""
if isinstance(schema_name, (tuple, list, set)):
drop_schema = self.active_schema in schema_name
_schema_name = sql.SQL(",").join(map(sql.Identifier, schema_name))
else:
drop_schema = self.active_schema == schema_name
_schema_name = sql.Identifier(schema_name)
stmt = "DROP SCHEMA" + " IF EXISTS"*if_exists + " {schema_name}" + " CASCADE"*cascade
if self.execute(sql.SQL(stmt).format(schema_name=_schema_name),
log=f'Schema(s) {schema_name} dropped successfully.'):
if drop_schema:
self.set_active_schema(None)
def get_schemas(self) -> list:
"""Finds available schemas in the database
Returns:
list: list of schemas in database
"""
stmt = "SELECT schema_name FROM information_schema.schemata " \
"WHERE schema_name !~ 'pg_catalog|information_schema|pg_toast'"
return list(self.query(stmt)['schema_name'])
def set_active_schema(self, schema: Optional[str] = None) -> None:
"""Sets the active schema in the database. Any query will be done within the active schema without need
to specifically identify the schema on the query
Args:
schema: If None is passed it will set the active_schema to "public"
Returns:
None
"""
schemas = self.get_schemas()
if (schema is not None) and (schema not in schemas):
self.logger.warning(f'Passed schema "{schema}" does not exist in database "{self.name}" or current user '
f'might not have access privileges to it.'
f'\nSchema was not changed. Current schema: {self.active_schema}')
else:
super().set_active_schema(schema)
self.refresh()
if schema in schemas:
stmt = 'ALTER USER {user} SET search_path TO {active_schema}'
params = dict(user=sql.Identifier(self.user), active_schema=sql.Identifier(self.active_schema))
self.execute(sql.SQL(stmt).format(**params))
@parse_schema_table
def drop_primary_key(self, table_name: str, schema: Optional[str] = None) -> None:
"""Drops current primary key
Args:
table_name: table name to drop primary key from
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
"""
constraints = self.get_constraints(table_name, contype='p', schema=schema)['conname']
if not constraints.empty:
stmt = "ALTER TABLE {schema}.{table_name} DROP CONSTRAINT IF EXISTS {constraint}"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name),
constraint=sql.SQL(constraints[0]))
self.execute(sql.SQL(stmt).format(**params), log=f'Primary key {constraints[0]} dropped.')
@parse_schema_table
def get_primary_key(self, table_name: str, schema: Optional[str] = None) -> pd.Series:
"""Retrieve all info about the primary key of a table
Args:
table_name: Table to get the primary key from
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
"""
return self.get_constraints(table_name, 'p', schema=schema)
@parse_schema_table
def get_primary_key_columns(self, table_name: str, idx: bool = False, schema: Optional[str] = None) -> list:
"""Find the columns of the primary_key constraint for a given table
Args:
table_name: Name to get primary key from
idx: Whether to return the columns as names or indexes. Default: False (returns names).
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
List: list of columns that are the primary keys
"""
key = self.get_primary_key(table_name, schema=schema)['conkey']
if key.empty:
return []
key = key.iloc[0]
if not idx:
cols = self.get_columns(table_name, schema)
cols.insert(0, '_')
key = itemgetter(*key)(cols)
key = [key] if not isinstance(key, tuple) else list(key)
return sorted(key)
@monitor(mode='time', logger=MONITOR_LOGGER, log_level='debug')
@check_options(on_conflict=('raise', 'drop'))
@parse_schema_table
def set_primary_key(self, table_name: str, id_column: Union[str, list, tuple], on_conflict='raise',
schema: Optional[str] = None) -> Optional[bool]:
"""Adds a primary key to the table.
Args:
table_name: table to add the primary key to
id_column: column(s) to be defined as primary key. Column must exist in table, otherwise it will raise
an error
on_conflict: What to do if already exists a primary key set for this table. Options: 'raise', 'drop'.
Default: 'raise'
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
If True the set of the key was completed successfully
"""
# if more than one column is passed join them as a single string
if not isinstance(id_column, (list, tuple, set)):
id_column = [id_column]
stmt = "ALTER TABLE {schema}.{table_name} ADD PRIMARY KEY ({id_column})"
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name),
id_column=sql.SQL(',').join(map(sql.Identifier, id_column)))
if on_conflict == 'drop':
self.drop_primary_key(table_name, schema)
return self.execute(sql.SQL(stmt).format(**params), log=f'Primary key set on {tuple(id_column)} successfully.')
@monitor(logger=MONITOR_LOGGER)
@parse_schema_table
def append_to_table(self, table_name: str, values: Union[pd.DataFrame, Sequence],
columns: Optional[Union[list, dict]] = None, on_new_columns: str = 'raise',
schema: Optional[str] = None) -> None:
"""Uploads a table with the name passed to the DataBase. If the table doesn't exist yet, will create a new one
else it will append any missing columns to the dataframe passed (to ensure consistency) with null values and
add it. New columns that don't exist on the destination table will raise a error.
Args:
table_name: table name to upload to in DataBase
values: table to upload
columns: list, dict or None. Definition of new columns (or new table) requires a dictionary of format
{<col_name>: <col_type>} except if 'values' is a DataFrame.
on_new_columns: What to do if new columns are in the DataFrame passed. Options: ('raise', 'ignore', 'add').
"raise": raises an error
"ignore": drop the additional columns from the dataframe before uploading it. If values is not a
DataFrame it will raise an error instead.
"add": create columns in the database table with inferred type before uploading the data.
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
Examples:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
>>> db = PostgreSQLManager(...)
>>> db.append_to_table('test_table', df)
"""
self.refresh()
full_tablename = f'{schema}.{table_name}'
if full_tablename not in self._tables:
if not isinstance(values, pd.DataFrame) and isinstance(columns, (list, NoneType)):
raise TypeError(f'Table {full_tablename} does not exist. To create the table with "append_to_table" '
f'method, "columns" must be a dictionary defining the types of each column.')
self.upload_table(table_name, values, columns, schema=schema)
else:
# if the table already exists, update its schema and add any missing columns before appending the new table
values, columns = self._update_table_schema(full_tablename, values, columns, on_new_columns)
if self._commit_table(table_name, values, columns, update_table_names=False, schema=schema):
self.logger.info(f'Data appended successfully to table {full_tablename}.')
@monitor(logger=MONITOR_LOGGER)
@parse_schema_table
def copy_table(self, table_name: str, new_table_name: str, columns: Optional[list] = None,
where: Optional[str] = None, structure_only: bool = False, schema: Optional[str] = None,
destination_schema: Optional[str] = None) -> None:
"""Copies one table to another. The copy can include all of the data, just a selection of the data or
structure only.
Args:
table_name: Name of the table to copy
new_table_name: Name of the new table
columns: Which columns to include on the copy
where: Sql statement to select specific rows to be copied. This statement cannot use any "unsafe_symbol"
such as ";" or "--".
structure_only: Whether to copy only the structure of the table (without any data)
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
destination_schema: Optional. Schema to copy the table to. If 'None' uses the instance active_schema
Returns:
None
"""
params = dict(schema=sql.Identifier(schema), table_name=sql.Identifier(table_name))
stmt = "CREATE TABLE {new_schema}.{new_table_name} AS"
# parse and add new schema and table name
destination_schema = destination_schema or self.active_schema
new_schema, new_table_name = join_schema_to_table_name(new_table_name, destination_schema).split('.', 1)
params['new_schema'], params['new_table_name'] = map(sql.Identifier, (new_schema, new_table_name))
if (columns is None) and (where is None):
stmt += " TABLE {schema}.{table_name}"
else:
stmt += " SELECT {columns} FROM {schema}.{table_name}"
params['columns'] = sql.SQL('*') if columns is None else sql.SQL(",").join(map(sql.Identifier, columns))
if where is not None:
self._check_safety(where, 'where')
stmt += f" WHERE {where}"
if structure_only:
stmt += " WITH NO DATA"
log = f'Table {schema}.{table_name} copied successfully to {new_schema}.{new_table_name}'
if self.execute(sql.SQL(stmt).format(**params), log=log):
self.refresh()
@check_types(table_name=str, types=(NoneType, dict))
@parse_schema_table
def create_empty_table(self, table_name: str, types: Optional[dict] = None,
from_df: Optional[pd.DataFrame] = None, if_not_exists: bool = False,
schema: Optional[str] = None) -> None:
"""Creates a new empty table in the selected (or active if None passed) schema.
Args:
table_name: Name for the table
types: dictionary of format {column_name: column_type}. If a DataFrame is passed in "from_df",
"types" is used to override any inferred type from pandas
from_df: A DataFrame to use to infer the structure of the table from
if_not_exists: Boolean flag to raise or not an error if a table with the same name already exists
schema: Optional. Schema to look for the table in. If None provide will use the instance active_schema
Returns:
None
Examples:
>>> db = PostgreSQLManager(...)
>>> db.create_empty_table('test_table', ['a', 'b'], ['int32', 'string'])
[2021-05-20 12:59:11 | PostGreSQL | INFO] Table test_table successfully created.
"""
# get known types from global dict
if types is not None:
types = {col: TYPE_ALIASES.get(str(t).lower(), str(t)) for col, t in types.items()}
# if a df was passed extract missing types from it
if from_df is not None:
types = _get_col_types_from_df_schema( | pd.io.sql.get_schema(from_df, '_', dtype=types) | pandas.io.sql.get_schema |
# coding: utf-8
# # Content
# __1. Exploratory Visualization__
# __2. Data Cleaning__
# __3. Feature Engineering__
# __4. Modeling & Evaluation__
# __5. Ensemble Methods__
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('ggplot')
# In[2]:
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline, make_pipeline
from scipy.stats import skew
from sklearn.decomposition import PCA, KernelPCA
from sklearn.preprocessing import Imputer
# In[3]:
from sklearn.model_selection import cross_val_score, GridSearchCV, KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
# In[4]:
pd.set_option('max_colwidth',200)
pd.set_option('display.width',200)
pd.set_option('display.max_columns',500)
pd.set_option('display.max_rows',1000)
# In[7]:
train=pd.read_csv('E:/Workspace/HousePrices/train.csv')
test=pd.read_csv('E:/Workspace/HousePrices/test.csv')
# In[8]:
# train = pd.read_csv('../input/train.csv')
# test = pd.read_csv('../input/test.csv')
# # Exploratory Visualization
# + __It seems that the price of recent-built houses are higher. So later I 'll use labelencoder for three "Year" feature.__
# In[9]:
plt.figure(figsize=(15,8))
sns.boxplot(train.YearBuilt, train.SalePrice)
# + __As is discussed in other kernels, the bottom right two two points with extremely large GrLivArea are likely to be outliers. So we delete them.__
# In[10]:
plt.figure(figsize=(12,6))
plt.scatter(x=train.GrLivArea, y=train.SalePrice)
plt.xlabel("GrLivArea", fontsize=13)
plt.ylabel("SalePrice", fontsize=13)
plt.ylim(0,800000)
# In[11]:
train.drop(train[(train["GrLivArea"]>4000)&(train["SalePrice"]<300000)].index,inplace=True)
# In[12]:
full=pd.concat([train,test], ignore_index=True)
# In[13]:
full.drop(['Id'],axis=1, inplace=True)
full.shape
# # Data Cleaning
# ### Missing Data
# In[14]:
aa = full.isnull().sum()
aa[aa>0].sort_values(ascending=False)
# + __Let's first imput the missing values of LotFrontage based on the median of LotArea and Neighborhood. Since LotArea is a continuous feature, We use qcut to divide it into 10 parts.__
# In[15]:
full.groupby(['Neighborhood'])[['LotFrontage']].agg(['mean','median','count'])
# In[16]:
full["LotAreaCut"] = | pd.qcut(full.LotArea,10) | pandas.qcut |
import numpy as np
import pandas as pd
class Result():
# structure of the result
def __init__(self):
self.ranks = None
self.scores = None
self.features = None
self.ranked_features = None
def PasiLuukka(in_data, target, measure = 'luca', p = 1):
d = pd.DataFrame(in_data)
t = pd.DataFrame(target)
data = pd.concat([d,t],axis=1)
# Feature selection method using similarity measure and fuzzy entropy
# measures based on the article:
# <NAME>, (2011) Feature Selection Using Fuzzy Entropy Measures with
# Similarity Classifier, Expert Systems with Applications, 38, pp. 4600-4607
l = int(max(data.iloc[:,-1]))
m = data.shape[0]
t = data.shape[1]-1
dataold = data.copy()
idealvec_s = np.zeros((l,t))
for k in range(l):
idx = data.iloc[:,-1] == k+1
idealvec_s[k,:] = data[idx].iloc[:,:-1].mean(axis = 0)
# scaling data between [0,1]
data_v = data.iloc[:,:-1]
data_c = data.iloc[:,-1] # labels
mins_v = data_v.min(axis = 0)
Ones = np.ones((data_v.shape))
data_v = data_v + np.dot(Ones,np.diag(abs(mins_v)))
tmp =[]
for k in range(l):
tmp.append(abs(mins_v))
idealvec_s = idealvec_s+tmp
maxs_v = data_v.max(axis = 0)
data_v = np.dot(data_v,np.diag(maxs_v**(-1)))
tmp2 =[];
for k in range(l):
tmp2.append(abs(maxs_v))
idealvec_s = idealvec_s/tmp2
data_vv = pd.DataFrame(data_v) # Convert the array of feature to a dataframe
data = | pd.concat([data_vv, data_c], axis=1, ignore_index=False) | pandas.concat |
import os
import audiofile
import audiofile as af
import numpy as np
import pandas as pd
import pytest
import audinterface
import audformat
def signal_duration(signal, sampling_rate):
return signal.shape[1] / sampling_rate
def signal_max(signal, sampling_rate):
return np.max(signal)
SEGMENT = audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
)
def signal_modification(signal, sampling_rate, subtract=False):
if subtract:
signal -= 0.1 * signal
else:
signal += 0.1 * signal
return signal
@pytest.mark.parametrize(
'process_func, segment, signal, sampling_rate, start, end, keep_nat, '
'channels, mixdown, expected_output',
[
(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
SEGMENT,
np.ones((1, 8000)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
None,
np.ones(3),
8000,
None,
None,
False,
0,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
1,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
None,
True,
0.5,
),
(
signal_max,
None,
np.array([[-1., -1., -1.], [0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
[1, 2],
True,
0.5,
),
# invalid channel selection
pytest.param(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
1,
False,
1,
marks=pytest.mark.xfail(raises=ValueError),
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
None,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
True,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
None,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.NaT,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1000ms',
'2000ms',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1,
2,
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1.0,
2.0,
False,
None,
False,
1.0,
),
],
)
def test_process_file(
tmpdir,
process_func,
segment,
signal,
sampling_rate,
start,
end,
keep_nat,
channels,
mixdown,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
channels=channels,
mixdown=mixdown,
segment=segment,
keep_nat=keep_nat,
verbose=False,
)
# create test file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# test absolute path
y = process.process_file(
path,
start=start,
end=end,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
# test relative path
y = process.process_file(
file,
start=start,
end=end,
root=root,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
@pytest.mark.parametrize(
'process_func, num_files, signal, sampling_rate, starts, ends, '
'expected_output',
[
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
None,
None,
[3.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
1,
2,
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, 1],
[None, 2],
[3.0, 1.0],
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s'],
[3.0, 1.0],
),
(
signal_duration,
3,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s', None],
[3.0, 1.0],
),
(
signal_duration,
1,
np.zeros((1, 24000)),
8000,
[None],
[None, '2s'],
[3.0],
),
],
)
def test_process_files(
tmpdir,
process_func,
num_files,
signal,
sampling_rate,
starts,
ends,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
verbose=False,
)
# create files
files = []
paths = []
root = tmpdir
for idx in range(num_files):
file = f'file{idx}.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
files.append(file)
paths.append(path)
# test absolute paths
output = process.process_files(
paths,
starts=starts,
ends=ends,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
# test relative paths
output = process.process_files(
files,
starts=starts,
ends=ends,
root=root,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
@pytest.mark.parametrize(
'num_files, segment, num_workers, multiprocessing',
[
(3, None, 1, False, ),
(3, None, 2, False, ),
(3, None, None, False, ),
(3, SEGMENT, 1, False, ),
]
)
def test_process_folder(
tmpdir,
num_files,
segment,
num_workers,
multiprocessing,
):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
segment=segment,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
path = str(tmpdir.mkdir('wav'))
files = [
os.path.join(path, f'file{n}.wav') for n in range(num_files)
]
for file in files:
signal = np.random.uniform(-1.0, 1.0, (1, sampling_rate))
af.write(file, signal, sampling_rate)
y = process.process_folder(path)
pd.testing.assert_series_equal(
y,
process.process_files(files),
)
def test_process_func_args():
def process_func(s, sr, arg1, arg2):
assert arg1 == 'foo'
assert arg2 == 'bar'
audinterface.Process(
process_func=process_func,
process_func_args={
'arg1': 'foo',
'arg2': 'bar',
}
)
with pytest.warns(UserWarning):
audinterface.Process(
feature_names=('o1', 'o2', 'o3'),
process_func=process_func,
arg1='foo',
arg2='bar',
)
@pytest.mark.parametrize(
'num_workers, multiprocessing',
[
(1, False, ),
(2, False, ),
(None, False, ),
]
)
def test_process_index(tmpdir, num_workers, multiprocessing):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
signal = np.random.uniform(-1.0, 1.0, (1, 3 * sampling_rate))
# create file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# empty index
index = audformat.segmented_index()
y = process.process_index(index)
assert y.empty
# segmented index with absolute paths
index = audformat.segmented_index(
[path] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# filewise index with absolute paths
index = audformat.filewise_index(path)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# segmented index with relative paths
index = audformat.segmented_index(
[file] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
# filewise index with relative paths
index = audformat.filewise_index(path)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
@pytest.mark.parametrize(
'process_func, process_func_args, segment, signal, '
'sampling_rate, file, start, end, keep_nat, expected_signal',
[
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
'file',
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
None,
None,
None,
False,
np.array([1., 2.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
'file',
None,
None,
False,
np.array([1., 2.]),
),
(
signal_max,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
3,
None,
None,
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
pd.to_timedelta('2s'),
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.to_timedelta('1s'),
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.NaT,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
None,
pd.NaT,
True,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
1,
2,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
1.0,
2.0,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
'1s',
'2s',
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
'file',
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
1.0,
),
(
signal_modification,
{},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[1.1, 1.1, 1.1]]),
),
(
signal_modification,
{'subtract': False},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[1.1, 1.1, 1.1]]),
),
(
signal_modification,
{'subtract': True},
None,
np.array([1., 1., 1.]),
44100,
None,
None,
None,
False,
np.array([[0.9, 0.9, 0.9]]),
),
],
)
def test_process_signal(
process_func,
process_func_args,
segment,
signal,
sampling_rate,
file,
start,
end,
keep_nat,
expected_signal,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=None,
resample=False,
segment=segment,
keep_nat=keep_nat,
verbose=False,
process_func_args=process_func_args,
)
x = process.process_signal(
signal,
sampling_rate,
file=file,
start=start,
end=end,
)
signal = np.atleast_2d(signal)
if start is None or pd.isna(start):
start = pd.to_timedelta(0)
elif isinstance(start, (int, float)):
start = pd.to_timedelta(start, 's')
elif isinstance(start, str):
start = pd.to_timedelta(start)
if end is None or (pd.isna(end) and not keep_nat):
end = pd.to_timedelta(
signal.shape[1] / sampling_rate,
unit='s',
)
elif isinstance(end, (int, float)):
end = pd.to_timedelta(end, 's')
elif isinstance(end, str):
end = pd.to_timedelta(end)
if segment is not None:
index = segment.process_signal(
signal,
sampling_rate,
start=start,
end=end,
)
start = index[0][0]
end = index[0][1]
if file is None:
y = pd.Series(
[expected_signal],
index=audinterface.utils.signal_index(start, end),
)
else:
y = pd.Series(
[expected_signal],
index=audformat.segmented_index(file, start, end),
)
pd.testing.assert_series_equal(x, y)
@pytest.mark.parametrize(
'num_workers, multiprocessing',
[
(1, False, ),
(2, False, ),
(None, False, ),
]
)
@pytest.mark.parametrize(
'process_func, signal, sampling_rate, index',
[
(
None,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(),
),
(
None,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(
pd.timedelta_range('0s', '3s', 3),
pd.timedelta_range('1s', '4s', 3)
),
),
(
signal_max,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(
pd.timedelta_range('0s', '3s', 3),
pd.timedelta_range('1s', '4s', 3),
),
),
(
signal_max,
np.random.random(5 * 44100),
44100,
audinterface.utils.signal_index(),
),
pytest.param(
signal_max,
np.random.random(5 * 44100),
44100,
pd.MultiIndex.from_arrays(
[
pd.timedelta_range('0s', '3s', 3),
],
),
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
signal_max,
np.random.random(5 * 44100),
44100,
pd.MultiIndex.from_arrays(
[
['wrong', 'data', 'type'],
pd.timedelta_range('1s', '4s', 3),
],
),
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
signal_max,
np.random.random(5 * 44100),
44100,
pd.MultiIndex.from_arrays(
[
pd.timedelta_range('0s', '3s', 3),
['wrong', 'data', 'type'],
],
),
marks=pytest.mark.xfail(raises=ValueError)
),
],
)
def test_process_signal_from_index(
num_workers,
multiprocessing,
process_func,
signal,
sampling_rate,
index,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=None,
resample=False,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
result = process.process_signal_from_index(signal, sampling_rate, index)
expected = []
for start, end in index:
expected.append(
process.process_signal(signal, sampling_rate, start=start, end=end)
)
if not expected:
pd.testing.assert_series_equal(
result,
pd.Series([], index, dtype=float),
)
else:
pd.testing.assert_series_equal(
result,
pd.concat(expected, names=['start', 'end']),
)
@pytest.mark.parametrize(
'segment',
[
audinterface.Segment(
process_func=lambda x, sr: audinterface.utils.signal_index()
),
audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
),
audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
pd.to_timedelta(x.shape[1] / sr, unit='s'),
)
),
audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
[
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
],
[
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
| pd.to_timedelta(x.shape[1] / sr) | pandas.to_timedelta |
# 数据分析的五个步骤:
# 提出问题,数据处理,探索数据,得出结论,结果报告
# 探索数据的常用分析方法:对比分析,相关分析,因子分析,交叉分析,回归分析,分组分析等
# 项目实战,房屋售价数据分析
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
price = pd.read_csv('kc_house_data.csv')
print(type(price))
# 查看price的行数和列数
print('price的行列数分别为:{}'.format(price.shape))
'''
variables:
price : 每个房子的售价
date : 房子售出的日期
bedrooms : 卧室的数量
bathrooms : 洗手间的数量,小数点表示有卫生间无浴室
sqft_living : 公寓内部居住空间(平方英尺)
sqft_lot : 土地面积(平方英尺)
floors : 楼层数
waterfront : 是否可以俯瞰海滨(虚拟变量)
view : 视野优良评分(0到4)
condition : 公寓条件评分(1到5)
grade : 建筑施工设计评分(1到13)
1-3 建筑施工设计不足
7 建筑施工设计的平均水平
11-13 建筑施工设计高质量水平
sqft_above : 地面以上的室内居住空间(平方英尺)
sqft_basement : 地面以下的室内居住空间(平方英尺)
yr_built : 房子最初建成年份
yr_renovated : 房子最近一次更新的年份
zipcode : 邮编
lat : 纬度
long : 经度
sqft_living15 : 最近的15个邻居的室内居住空间
sqft_lot15 : 最近的15个邻居的土地面积
'''
# ####### 数据清洗
# # 处理重复值
# price_dup = price.duplicated()
# print(price[price_dup])
# price = price.drop_duplicates()
#
# # 处理缺失值
# isNA_price = price.isnull()
# print(price[isNA_price.any(axis = 1)])
#
# plt.scatter(x= list(range(1,len(price['bedrooms'])+1)), y = price['bedrooms'])
# plt.title('Scatter plot for numbers of bedrooms')
# plt.xlabel('samples')
# plt.ylabel('bedrooms')
# plt.show()
# 去除异常值
outlier = [i for i in range(len(price['bedrooms'])) if price['bedrooms'][i] > 10]
print(outlier)
price.drop(outlier, inplace=True)
# outlier = [i for i in range(len(price['bedrooms'])) if price['bedrooms'][i] > 10]
outlier1 = [i for i in price['bedrooms'] if i > 10]
print(outlier)
# 变量处理
## grade
price['grade'] = [1 if i <= 3 else 2 if i< 7 else 3 if i== 7 else 4 if i <11 else 5
for i in price['grade']]
print(price['grade'].head(10))
# # col_name = price.columns.tolist()
# # price.insert(loc = col_name.index('grade')+1, column='grade_category', value = grade_category)
#
## price
per_price = price['price']/price['sqft_living']
price.insert(loc = 1, column='per_price', value = per_price)
print(price.iloc[:2,:3])
print('#############')
print(price[price['grade']==1]['per_price'])
print('#############')
## age
print(price[['yr_built', 'date']][:3])
year = [eval(i[:4]) for i in price['date']]
price['age'] = year - price['yr_built']
print(price['age'][:3])
print(min(price['age']), max(price['age']))
bins = [min(price['age'])-1, 10, 20, 30,40, 50, 60, 70, 80, 90,100, max(price['age'])+1]
cut = pd.cut(price['age'], bins, right=False)
col_name = price.columns.tolist()
price.insert(loc = col_name.index('age')+1, column = 'age_category', value = cut)
print(price.iloc[:5, [col_name.index('age'), col_name.index('age')+1]])
# sale_month
price['sale_month'] = [eval(i[4:6]) if i[4]!= '0' else eval(i[5:6]) for i in price['date']]
print(price['sale_month'].head())
## yr_renovated
# 1 if the house has been renovated
price['yr_renovated'] = price['yr_renovated'].apply(lambda x: 1 if x>0 else 0)
## sqft_basement
price['if_basement'] = price['sqft_basement'].apply(lambda x: 1 if x>0 else 0)
print(price[['yr_renovated', 'if_basement']].head())
# print(price.head())
####### 房屋单位居住面积价格与房屋得分关系分析
# 视野优良得分
# data = pd.concat([price['per_price'],price['view'],price['condition'],price['grade']],axis=1)
# sns.boxplot(x = 'view', y = 'per_price', data = price, palette = 'hls')
# plt.show()
# sns.boxplot(x = 'condition', y = 'per_price', data = price , palette = 'hls')
# plt.show()
# sns.boxplot(x = 'grade', y = 'per_price', data = price, palette = 'hls')
# plt.show()
# print(price[price['grade']==1]['per_price'])
### 相关性分析
# 房屋价格与房屋面积及配置关系分析
# 各变量相关系数热力图
# corrmat = price.corr()
# f, ax = plt.subplots(figsize=(9, 8))
# sns.heatmap(corrmat, square= True, annot= True,center = None, fmt='.2f', robust = True,
# linewidths=0.05, annot_kws={'size':6})
# plt.xticks(rotation=90)
# plt.yticks(rotation=360)
# plt.show()
# 相关性较大的变量
# 多变量散点图
# sns.set()
# col = ['price','bedrooms','bathrooms','sqft_living','grade','sqft_above','sqft_living15']
# sns.pairplot(price[col], size = 1.5)
# plt.show()
# # sqft_living居住面积与房价
# sns.jointplot(x='sqft_living', y = 'price', data = price, kind='reg', size = 5)
# plt.show()
# # sqft_above地上居住面积与房价
# sns.jointplot(x='sqft_above', y = 'price', data = price, kind='reg', size = 5)
# plt.show()
# # 周围15个邻居居住面积与房价
# sns.jointplot(x='sqft_living15', y = 'price', data = price, kind='reg', size = 5)
# plt.show()
#
# # bedroom卧室个数与房价
# data = pd.concat([price['price'],price['bedrooms']],axis=1)
# fig = sns.boxplot(x = 'bedrooms', y = 'price', data = data)
# fig.axis(ymin = 0, ymax = 4000000)
# plt.show()
#
# # bathroom浴室个数与房价
# data = pd.concat([price['price'],price['bathrooms']],axis=1)
# fig = sns.boxplot(x = 'bathrooms', y = 'price', data = data)
# fig.axis(ymin = 0, ymax = 4000000)
# plt.show()
# 房屋面积单价与成交年份及房屋建成年限分析
# sns.distplot(price['age'], bins, hist_kws=dict(edgecolor='k'))
# plt.ylabel('Percentage')
# plt.show()
# 销售时间段统计
month_count = [list(price['sale_month']).count(i) for i in range(1,13)]
dic = {'month': range(1,13),
'count': month_count}
df = | pd.DataFrame(dic) | pandas.DataFrame |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = | pd.Series([True, False, False, False, False, False]) | pandas.Series |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = | pd.read_csv(file_path) | pandas.read_csv |
"""
data
Created by: <NAME>
On: 19-11-19, 9:57
"""
from abc import ABC, abstractmethod
import pandas as pd
from drugex.core import util
class EnvironData(ABC):
def __init__(self, is_regression=False, subsample_size=None):
self.is_regression = is_regression
self.subsample_size = subsample_size
@abstractmethod
def update(self):
pass
@abstractmethod
def getX(self):
pass
@abstractmethod
def gety(self):
pass
@abstractmethod
def getGroundTruthData(self):
pass
class ChEMBLCSV(EnvironData):
def __init__(self, input_file : str, activity_threshold=None, subsample_size=None, id_col='CMPD_CHEMBLID', is_regression=False):
super().__init__(subsample_size=subsample_size, is_regression=is_regression)
self.PAIR = [id_col, 'CANONICAL_SMILES', 'PCHEMBL_VALUE', 'ACTIVITY_COMMENT']
self.activity_threshold = activity_threshold
if self.activity_threshold is not None:
self.is_regression = False
else:
self.is_regression = True
self.input_file = input_file
self.df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import tilemapbase
from math import log
import os.path as path
from os import listdir
from progress.bar import Bar
import imageio
# get dataset from source
print("Getting data set...")
raw_data = pd.read_csv("https://raw.githubusercontent.com/datadesk/california-coronavirus-data/master/latimes-place-totals.csv")
# convert date column to a time series
raw_data["date"] = | pd.to_datetime(raw_data["date"]) | pandas.to_datetime |
#!/usr/bin/env python3
"""
Created on Tue Jun 16 15:19:24 2020
@author: Barney
"""
import os
import geopandas as gpd
import pandas as pd
from numpy import linspace, zeros, diff, where, NaN
from shapely.geometry import Point, LineString
from shutil import copyfile
import sys
from glob import glob
def aggregate(root: str,
partition_root: str,
partition: str,
catchment: str = "cranbrook",
dt_sim = 5,
demo = False):
""" Aggregates partitions into compartments.
Paremeters: catchment can be "cranbrook" or "norwich"
Returns a file
"""
E = 2.71828
DT_ARC = pd.Timedelta('1s') #i.e. assumes arc flows are given in average m3/s over timestep
DT_RAW = | pd.Timedelta('1m') | pandas.Timedelta |
import pandas as pd
from utils.helpers import run_check_by_row
from sql.query_templates import (QUERY_SUPPRESSED_NULLABLE_FIELD_NOT_NULL,
QUERY_SUPPRESSED_REQUIRED_FIELD_NOT_EMPTY,
QUERY_SUPPRESSED_NUMERIC_NOT_ZERO,
QUERY_VEHICLE_ACCIDENT_SUPPRESSION_ICD9, QUERY_VEHICLE_ACCIDENT_SUPPRESSION_ICD10,
QUERY_CANCER_CONCEPT_SUPPRESSION, QUERY_SUPPRESSED_FREE_TEXT_RESPONSE,
QUERY_GEOLOCATION_SUPPRESSION)
def check_field_suppression(check_df, project_id, post_dataset_id, pre_deid_dataset=None, mapping_dataset=None):
"""Run field suppression check
Parameters
----------
check_df: pd.DataFrame
Dataframe containing the checks that need to be done
project_id: str
Google Bigquery project_id
post_dataset_id: str
Bigquery dataset after de-id rules were run
pre_deid_dataset: str
Bigquery dataset before de-id rules were run
Returns
-------
pd.DataFrame
"""
nullable_field = check_df[check_df['is_nullable'] == 'YES']
required_numeric_field = check_df[(check_df['is_nullable'] == 'NO') & (check_df['data_type'] == 'INT64')]
required_other_field = check_df[(check_df['is_nullable'] == 'NO') & (check_df['data_type'] != 'INT64')]
nullable_field_check = run_check_by_row(nullable_field, QUERY_SUPPRESSED_NULLABLE_FIELD_NOT_NULL,
project_id, post_dataset_id)
required_numeric_field_check = run_check_by_row(required_numeric_field, QUERY_SUPPRESSED_NUMERIC_NOT_ZERO,
project_id, post_dataset_id)
required_other_field_check = run_check_by_row(required_other_field, QUERY_SUPPRESSED_REQUIRED_FIELD_NOT_EMPTY,
project_id, post_dataset_id)
return | pd.concat([nullable_field_check, required_numeric_field_check, required_other_field_check], sort=True) | pandas.concat |
#Compare painted data with observed data - for three different sets of ages
#Works but could do with some tidying up of the code
import numpy as np
import h5py
import pandas as pd
import math
from astropy.io import fits
from astropy.table import Table, join
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm
import matplotlib.colors as colors
import sys
sys.path.append('./scripts/')
from chemevo import *
#fl = chem_evo_data('./comparison.hdf5')
#fl = chem_evo_data('./output.hdf5')
fl = chem_evo_data('./KSsfr.hdf5')
hdf5_file = '/data/ktfm2/apogee_data/gaia_spectro.hdf5'
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits'
data_file_2 = '/data/jls/apokasc_astroNN.fits'
hdf = h5py.File(hdf5_file, "r")
dataset = hdf['data']
log_age_data = dataset["log10_age"]
ID_data = dataset["APOGEE_ID"]
SD_table = Table([ID_data, log_age_data], names=('apogee_id','log_age_data'))
hdu_list_1 = fits.open(data_file_1, memmap=True)
apogee_data = Table(hdu_list_1[1].data)
hdu_list_1.close()
hdu_list_2 = fits.open(data_file_2, memmap=True)
apokasc_data = Table(hdu_list_2[1].data)
hdu_list_2.close()
#print(apokasc_data.colnames)
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Join the APOGEE data table and table from Sanders & Das - will have less rows
#Comment out if not using S&D ages
#full_table = join(apogee_data, SD_table)
#apogee_data = full_table
#Use APOKASC data, comment out if not using
#apogee_data = apokasc_data
#===================================================================================================================
#Radial Migration filter
#fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['LogAge']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H_ERR']))&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
#fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['log_age_data']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(apogee_data['LOGG']<3.5)&(outs(apogee_data['GALZ'],-1.0,1.0))&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_lowess_correct']))&(apogee_data['age_lowess_correct']>0.0)&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(apogee_data['LOGG']<3.5)&(outs(apogee_data['GALZ'],-1.0,1.0))&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
lower_fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_lowess_correct']))&(apogee_data['age_lowess_correct']>0.0)&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(apogee_data['LOGG']<3.5)&(outs(apogee_data['GALZ'],-1.0,1.0))&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],4.6,5.6))
upper_fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_lowess_correct']))&(apogee_data['age_lowess_correct']>0.0)&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~ | pd.isna(apogee_data['FE_H_ERR']) | pandas.isna |
# Need to be able to process batch and single results
# Need to load in data with near_id
import pandas as pd
import os
import argparse
import yaml
from datetime import datetime
import sys
import pickle
CURR_FP = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CURR_FP)
from model_utils import format_crash_data
from model_classes import Indata, Tuner, Tester
from train_model import process_features, get_features
import sklearn.linear_model as skl
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
def predict(trained_model, predict_data, features, data_model_features, DATA_DIR):
"""
Returns
nothing, writes prediction segments to file
"""
# Ensure predict_data has the same columns and column ordering as required by trained_model
predict_data_reduced = predict_data[data_model_features]
preds = trained_model.predict_proba(predict_data_reduced)[::, 1]
predict_data['predictions'] = preds
predict_data.to_csv(os.path.join(DATA_DIR, 'predictions.csv'), index=False)
predict_data.to_json(os.path.join(DATA_DIR, 'predictions.json'), orient='index')
def get_accident_count_recent(predict_data, data):
data['DATE_TIME'] = pd.to_datetime(data['DATE_TIME'])
current_date = datetime.now()
past_7_days = current_date - pd.to_timedelta("7day")
past_30_days = current_date - pd.to_timedelta("30day")
past_365_days = current_date - pd.to_timedelta("365day")
past_1825_days = current_date - pd.to_timedelta("1825day")
past_3650_days = current_date - pd.to_timedelta("3650day")
recent_crash_7 = data.loc[data['DATE_TIME'] > past_7_days]
recent_crash_30 = data.loc[data['DATE_TIME'] > past_30_days]
recent_crash_365 = data.loc[data['DATE_TIME'] > past_365_days]
recent_crash_1825 = data.loc[data['DATE_TIME'] > past_1825_days]
recent_crash_3650 = data.loc[data['DATE_TIME'] > past_3650_days]
column_names = ['LAST_7_DAYS', 'LAST_30_DAYS', 'LAST_365_DAYS', 'LAST_1825_DAYS', 'LAST_3650_DAYS']
recent_crashes = [recent_crash_7, recent_crash_30, recent_crash_365, recent_crash_1825, recent_crash_3650]
for col_name in column_names:
predict_data[col_name] = ""
i = 0
print('About to append recent accident counts. This will take some time.')
for i in range(len(predict_data)):
current_segment_id = predict_data.loc[i].segment_id
for j in range(len(recent_crashes)):
# Find number of crashes at same segment that have occured in appropriate time period
recent_crash = recent_crashes[j]
num_crashes = len(recent_crash.loc[recent_crash['segment_id'] == current_segment_id])
# Assign this number to predict_data
col_name = column_names[j]
predict_data.at[i, col_name] = num_crashes
if i % 5000 == 0:
print("Got through {}% of results".format(100 * i / len(predict_data)))
return predict_data
def add_empty_features(predict_data, features):
# Read in the features from our modelling dataset
features_path = os.path.join(PROCESSED_DIR, 'features.pk')
with open(features_path, 'rb') as fp:
data_model_features = pickle.load(fp)
# Get the difference of features between our modelling dataset and predicting dataset
# Recast as a list to allow for looping over
feature_difference = list(set(data_model_features) - set(features))
# Add features in a loop as python doens't like adding all at one time
for feat in feature_difference:
predict_data[feat] = 0
return predict_data, feature_difference, data_model_features
if __name__ == '__main__':
print('Within train_model.py')
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, help="yml file for model config, default is a base config with open street map data and crashes only")
parser.add_argument('-d', '--DATA_DIR', type=str, help="data directory")
parser.add_argument('-f', '--forceupdate', type=str, help="force update our data model or not", default=False)
args = parser.parse_args()
config = {}
if args.config:
config_file = args.config
with open(config_file) as f:
config = yaml.safe_load(f)
# Create required data paths
DATA_DIR = os.path.join(BASE_DIR, 'data', config['name'])
PROCESSED_DIR = os.path.join(BASE_DIR, 'data', config['name'], 'processed/')
crash_data_path = os.path.join(PROCESSED_DIR, 'crash.csv.gz')
road_data_path = os.path.join(PROCESSED_DIR, 'roads.csv.gz')
# Read in road data. We shall generate a prediction for each segment.
# predict_data = pd.read_csv(road_data_path)
# Use pk rather than csv to keep datatypes correct
with open(os.path.join(PROCESSED_DIR, 'roads.pk'), 'rb') as fp:
predict_data = pickle.load(fp)
# Reset the index so that it can be properly looped over in the attach accident count phase
# Drop because there should already be a correlate_id within the DF, was a duplicate
predict_data.reset_index(inplace=True, drop=True)
# Read in crash data. We shall use this to attach historic accident counts to road data.
data = | pd.read_csv(crash_data_path) | pandas.read_csv |
import numpy as np
from numpy import where
from flask import Flask, request, jsonify, render_template
import pandas as pd
from sklearn.ensemble import IsolationForest
from pyod.models.knn import KNN
import json
from flask import send_from_directory
from flask import current_app
app = Flask(__name__)
class Detect:
def __init__(self, file, non_num):
self.file = file
self.non_num = non_num
def IQR(self):
# anomaly=pd.DataFrame()
data = pd.DataFrame(self.file)
non_num=pd.DataFrame(self.non_num)
data.dropna(axis=0,inplace=True)
# data=data.select_dtypes(include=['float64','int64'])
Q1 = data.quantile(0.25)
Q3 = data.quantile(0.75)
IQR = Q3 - Q1
IQR_Out = data[((data < (Q1 - 1.5 * IQR)) |(data > (Q3 + 1.5 * IQR))).any(axis=1)]
IQR_Out = non_num.join(IQR_Out, how='inner')
IQR_Out.to_csv(r'IQR_Outlier.csv')
# IQR Method
def isolation(self):
anomaly=pd.DataFrame()
data_n= | pd.DataFrame(self.file) | pandas.DataFrame |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import sys
import gzip
import logging
import tempfile
import itertools
import numpy as np
import pandas as pd
from GetConfig import getConfig
from collections import Counter
config = getConfig()
class ReadFilterData(object):
'''
This class is mainly used to:
1. read VCF and BAM file
2. get individuals in sample list
3. filter female individual by missing rate or K-means
4. filter sites to get region for time estimation
5. genotype Y-STRs after read BAMs
6. map ISOGG reference to BAMs, and get a list for subsequent analysis
'''
def __init__(self, input_file, samples):
self.logger = logging.getLogger()
self.samples = samples
self.input_file = input_file
# get overlaped male individuals from sample list
def _get_male_inds(self, inds):
males_not_in_data = [i for i in inds if i not in self.samples]
if males_not_in_data:
if males_not_in_data == self.samples:
self.logger.error('[Y-LineageTracker] [Error] No samples in input data, please check sample file')
used_males = inds
else:
self.logger.warning('[Y-LineageTracker] [Warning] %s not in input data' % (', '.join(males_not_in_data)))
used_males = [i for i in inds if i in self.samples]
else:
used_males = self.samples
return used_males
# get motif number of Y-STR
def _count_motif_num(self, bam, contig, start, end, base_motif_num, motif, motif_length, end_motif):
add_end = end+motif_length
pos_info = bam.fetch(contig, start-1, add_end)
add_ref_length = add_end-start+1
# get all possible sequence of specific region
seqs = []
for i in pos_info:
num = start-i.pos-1
num2 = add_end-i.pos
if num > 0:
if num2 < i.qlen:
seq = i.seq[num:num2]
else:
seq_tmp = i.seq[num: i.qlen]
seq = seq_tmp+(add_ref_length-len(seq_tmp))*'.'
else:
seq_tmp = i.seq[:num2]
seq = (add_ref_length-len(seq_tmp))*'.'+seq_tmp
seqs.append(seq)
# convert seq region to block
block = [''.join(s) for s in zip(*seqs)]
# get final sequence from block
final_seq = ''.join([Counter(i.strip('.')).most_common()[0][0] if i.count('.') != len(i) else '.' for i in block])
if len(final_seq) == 0 or '.' in final_seq:
sample_motif_count = '.'
else:
if len(motif) == 1:
sample_motif_count = final_seq.count(motif[0])
else:
sample_motif_count = sum([final_seq.count(m) for m in motif])
# recursion if motif+1
if sample_motif_count > base_motif_num:
if pd.isna(end_motif) or (not pd.isna(end_motif) and final_seq.endswith(end_motif)):
base_motif_num = sample_motif_count
end += motif_length
sample_motif_count = self._count_motif_num(bam, contig, start, end, base_motif_num, motif, motif_length, end_motif)
return sample_motif_count
def _get_specific_sites(self, build, sub_ref_info, id_info_dict):
data_info_array = []
for n, pos_num in enumerate(sub_ref_info['Build'+str(build)]):
line = []
int_pos = int(pos_num)
for bam, contig in id_info_dict.values():
pos_info = bam.fetch(contig, int_pos-1, int_pos)
pos_list = [i.seq[int_pos-i.pos-1].upper() for i in pos_info if int_pos-i.pos < i.qlen]
if len(set(pos_list)) == 1:
pos = pos_list[0]
elif len(set(pos_list)) > 1:
pos = Counter(pos_list).most_common()[0][0]
else:
pos = '.'
line.append(pos)
data_info_array.append([pos_num] + line)
data_info = pd.DataFrame(columns=['POS']+list(id_info_dict.keys()), data=np.array(data_info_array))
return data_info
def _match_sites(self, data_info, ref_info, build, matched_marker, header_cut, header_end):
matched_info = pd.merge(data_info, ref_info,
left_on='POS',
right_on='Build'+str(build),
how='inner',
sort=False)
matched_info_array = np.array(matched_info)
hap_function = (lambda x: matched_marker+matched_info_array[i, -10] if x == match_marker else x)
for i in range(len(matched_info)):
match_marker = matched_info_array[i, -6]
matched_info_array[i, header_cut: header_end] = list(map(hap_function, matched_info_array[i, header_cut: header_end]))
final_matched_info = pd.DataFrame(columns=matched_info.columns.tolist(), data=matched_info_array)
return final_matched_info
def _get_female_by_KMeans(self, missing_rate_data, samples):
from sklearn.cluster import KMeans
missing_rate_data.extend([0, 1])
missing_rate_data = np.array(missing_rate_data).reshape(-1, 1)
estimator = KMeans(n_clusters=2)
res = estimator.fit_predict(missing_rate_data)[: -2]
centroids = estimator.cluster_centers_
if centroids[0][0] > centroids[1][0]:
del_list = [samples[j] for i, j in zip(res, range(len(res))) if i == 0]
female_num = len(list(filter(lambda x: x == 0, res)))
else:
del_list = [samples[j] for i, j in zip(res, range(len(res))) if i == 1]
female_num = len(list(filter(lambda x: x == 1, res)))
return del_list, female_num
# read BAM and do matching analysis
def read_bam(self, ref_info, build, extract_type, format):
import os
import pysam
from collections import Counter
if format == 'bam':
open_method = 'rb'
else:
open_method = 'rc'
# get bam list
input_file_list = self.input_file.split(', ')
if len(input_file_list) == 1:
file = input_file_list[0]
try:
pysam.AlignmentFile(file, open_method)
bam_list = input_file_list
except ValueError:
bam_list = open(self.input_file).read().splitlines()
else:
bam_list = self.input_file.split(', ')
for i in bam_list:
file_exists = os.path.isfile(i)
if not file_exists:
raise FileNotFoundError('No such file or directory: %s' % i)
id_info_dict = {}
format_contigs = config.get('DataInfo', 'Contigs').split(',')
for f in bam_list:
file_name = os.path.basename(f)
bam = pysam.AlignmentFile(f, open_method)
bam_with_index = bam.has_index()
if not bam_with_index:
self.logger.error('[Y-LineageTracker] bam/cram file %s has no index' % f)
sys.exit()
ind = bam.header.get('RG')[0]['ID']
if ind in self.samples:
continue
sample_contigs = [i.contig for i in bam.get_index_statistics()]
overlaped_contigs = list(set(sample_contigs) & set(format_contigs))
if len(overlaped_contigs) == 1:
contig = overlaped_contigs[0]
else:
if len(overlaped_contigs) > 1:
self.logger.error('[Y-LineageTracker] More than one contigs for Y chromosome (Y, chrY, 24, chr24), please check input data')
sys.exit()
if len(overlaped_contigs) == 0:
self.logger.error('[Y-LineageTracker] No contig for Y chromosome (Y, chrY, 24, chr24), please check input data')
sys.exit()
id = ind+'('+file_name+')'
id_info_dict[id] = [bam, contig]
# for NRY haplogroup classification
# match ISOGG sites to BAM files
if extract_type == 'snp':
# match sites of main trunk
matched_marker = config.get('HaplogroupMarker', 'MatchMarker') # used to mark matched haplogroups
header_cut = config.getint('DataInfo', 'BamHeaderCut')
header_end = -config.getint('DataInfo', 'ISOGGInfoCut')
common_trunk = config.get('HaplogroupTree', 'CommonTrunk').split(',')
special_haps = config.get('HaplogroupTree', 'SpecialTrunk').split(',')
# get allele of haplogroup on main trunks
main_trunk_ref_info = ref_info[ref_info['MainInfo']=='Main']
self.logger.info('[Y-LineageTracker] Extrating mian trunks from BAMs/CRAMs...')
main_trunk_info = self._get_specific_sites(build, main_trunk_ref_info, id_info_dict)
# filter females
main_marker_num = main_trunk_info.index.size
f_samples = main_trunk_info.columns.tolist()[1:]
missing_rate_data = [main_trunk_info[col].tolist().count('.')/main_marker_num for col in f_samples]
del_list, female_num = self._get_female_by_KMeans(missing_rate_data, f_samples)
if del_list:
if female_num == len(f_samples):
self.logger.error('[Y-LineageTracker] Program stopped since no male sample left for subsequent analysis')
sys.exit()
else:
main_trunk_info.drop(del_list, inplace=True, axis=1)
for female in del_list:
del id_info_dict[female]
# match main haplogroups
main_matched_info = self._match_sites(main_trunk_info, main_trunk_ref_info, build, matched_marker, header_cut, header_end)
self.logger.info('[Y-LineageTracker] Extrating mian trunks from BAMs/CRAMs finifhsed')
# find which main haplogroup with higher matching rate
main_sub_info = main_matched_info[main_matched_info['Haplogroup'].map(lambda x: x not in common_trunk)]
all_haps = main_sub_info['Haplogroup'].tolist()
main_sub_info = np.array(main_sub_info)
id_hap_dict = {}
for id_num, id in enumerate(id_info_dict.keys()):
col_info = main_sub_info[:, id_num+header_cut]
id_haps = sorted(list(map(lambda x: x[1:], list(filter(lambda x: x[0]==matched_marker, col_info)))))
ratio = 0
for hap in set(id_haps):
ratio2 = id_haps.count(hap) / all_haps.count(hap)
if ratio2 > ratio:
ratio = ratio2
main_hap = hap
if ratio < 0.25:
main_hap = None
id_hap_dict[id] = main_hap
# extract sub-haplogroups of each main trunk
self.logger.info('[Y-LineageTracker] Extrating sub-trunks from BAMs/CRAMs...')
sub_trunk_ref_info = ref_info[ref_info['MainInfo']!='Main']
info_list = []
NA_ids = []
for hap in sorted(set(id_hap_dict.values())):
hap_ids = list({k: v for k, v in id_hap_dict.items() if v==hap}.keys())
sub_id_info_dict = {k: v for k, v in id_info_dict.items() if k in hap_ids}
if main_hap:
hap_ref_info = sub_trunk_ref_info[sub_trunk_ref_info['Haplogroup'].map(lambda x: x.startswith(hap))]
hap_info = self._get_specific_sites(build, hap_ref_info, sub_id_info_dict)
info_list.append(hap_info)
else:
hap_ref_info = sub_trunk_ref_info[sub_trunk_ref_info['Haplogroup'].map(lambda x: x.startswith('A') and x in special_haps)]
hap_info = self._get_specific_sites(build, hap_ref_info, sub_id_info_dict)
sub_hap_info = pd.concat(info_list, axis=0, sort=False).fillna('.')
self.logger.info('[Y-LineageTracker] Extrating sub-trunks from BAMs/CRAMs finifhsed')
sub_trunk_info = sub_hap_info[['POS']+list(id_info_dict.keys())]
sub_matched_info = self._match_sites(sub_trunk_info, sub_trunk_ref_info, build, matched_marker, header_cut, header_end)
data_info = np.array(pd.concat([main_matched_info, sub_matched_info]).drop_duplicates())
inds = list(id_info_dict.keys())
self.logger.info('[Y-LineageTracker] Extrating information from BAMs/CRAMs finifhsed')
# for Y-STR genotyping
# genotype Y-STRs from BAMs
else:
from FilesIO import get_str_from_panel
str_info = get_str_from_panel(ref_info)
data_info = pd.DataFrame(columns=list(id_info_dict.keys()))
for num, STR_idx in enumerate(str_info.index):
# read basic information
start = str_info.at[STR_idx, 'Start'+str(build)]
end = str_info.at[STR_idx, 'End'+str(build)]
motif = str_info.at[STR_idx, 'RefMotif'].split(', ')
str_name = str_info.at[STR_idx, 'Name'+str(build)]
motif_length = str_info.at[STR_idx, 'MotifLength']
end_motif = str_info.at[STR_idx, 'End']
motif_count = []
# matching
for bam, contig in id_info_dict.values():
base_motif_num = str_info.at[STR_idx, 'Ref'+str(build)]
sample_motif_count = self._count_motif_num(bam, contig, start, end, base_motif_num, motif, motif_length, end_motif)
motif_count.append(sample_motif_count)
data_info.loc[str_name] = motif_count
percent = float(num + 1) * 100 / float(str_info.index.size)
sys.stdout.write('[Y-LineageTracker] Genotyping Y-STR…… %.2f' % percent)
sys.stdout.write('%\r')
sys.stdout.flush()
data_info = data_info.T
inds = list(id_info_dict.keys())
print()
self.logger.info('[Y-LineageTracker] Genotyping Y-STR finished')
return data_info, inds
# open input file of variant calling
def _read_variant(self, file_format):
# open VCF
if file_format == 'gzvcf':
opened_file = gzip.open(self.input_file, 'rt')
else:
opened_file = open(self.input_file, 'r')
# get row number of header
config = getConfig()
if 'vcf' in file_format:
header_cut = config.getint('DataInfo', 'VcfHeaderCut')
start_symbol = '#CHROM'
chr_symbol = start_symbol
for i in itertools.count(start=0, step=1):
vcf_line = opened_file.readline()
if vcf_line.startswith(start_symbol):
head_num = i
break
else:
header_cut = config.getint('DataInfo', 'InpHeaderCut')
start_symbol = 'dbSNP'
blank_num = 0
for i in itertools.count(start=0, step=1):
inp_line = opened_file.readline()
if inp_line == '\n':
blank_num += 1
if inp_line.startswith(start_symbol):
head_num = i - blank_num
start_symbol = inp_line.split('\t')[0]
chr_symbol = inp_line.split('\t')[1]
pos_symbol = inp_line.split('\t')[2]
break
opened_file.close()
# open input file in pandas
self.logger.info('[Y-LineageTracker] Reading input file...')
data = pd.read_csv(self.input_file,
header=head_num,
sep='\t',
encoding='utf-8',
dtype='object')
self.logger.info('[Y-LineageTracker] Input file Read')
# change header name of site vcf if input is INP
if file_format == 'inp':
data.rename(columns={pos_symbol: 'POS'}, inplace=True)
# chr column keeps only Y/24
chr_type = len(set(data[chr_symbol].tolist()))
if chr_type > 1:
print('[Y-LineageTracker] [Warning] Other chromosomes are detected in inputfile, only the Y chromosome is reserved for analysis')
data = data[data[chr_symbol].map(lambda x: 'Y' in x or '24' in x or 'chrY' in x or 'chr24' in x)]
return data, header_cut
# filter individuals
def _filter_variant(self, data, cutoff, file_format, header_cut):
# list of indivisuals and number
inds = data.columns.tolist()[header_cut:]
ind_num_before = len(inds)
self.logger.info('[Y-LineageTracker] There are %d individuals in input file' % (ind_num_before))
# filter according to sample list
if self.samples:
used_males = self._get_male_inds(inds)
header = data.columns.tolist()[:header_cut]
data = data[header+used_males]
female_num = ind_num_before-len(used_males)
# filter female individuals according to missing rate or K-means cluster if no sample information
else:
if cutoff is None:
female_num = 0
else:
del_list = []
site_num = data.index.size
missing_count = (lambda x: 'missing' if '.' in x or './.' in x else x)
try:
# count misisng rate of each individual
if 'vcf' in file_format:
missing_rate_data = [data[i].map(missing_count).tolist().count('missing') / site_num for i in inds]
else:
missing_rate_data = [data[i].tolist().count('U') / site_num for i in inds]
# if set cutoff of missing rate, filter according to misisng rate
if cutoff:
del_list = [j for i, j in zip(missing_rate_data, inds) if i > cutoff]
female_num = len([i for i in inds if i not in del_list])
# if not set cutoff, will use K-means to cluster missing rate to identify samples and females
else:
del_list, female_num = self._get_female_by_KMeans(missing_rate_data, inds)
# if all individual filtered
if len(del_list) == ind_num_before:
self.logger.error('[Y-LineageTracker] Program stopped since no male sample left for subsequent analysis')
sys.exit()
# remove filtered indivisuals from data
data.drop(del_list, inplace=True, axis=1)
except TypeError:
self.logger.error('[Y-LineageTracker] [Error] VCF file format error, please check your file')
sys.exit()
inds_after = data.columns.tolist()[header_cut:]
left_num = len(inds_after)
if not cutoff is None:
self.logger.info('[Y-LineageTracker] %d female individuals filtered and %d individuals left' % (female_num, left_num))
return data, inds_after
# main function for reading and filtering variant calling
def read_filter_variant(self, cutoff, file_format):
data_read, header_cut = self._read_variant(file_format)
data_info, inds = self._filter_variant(data_read, cutoff, file_format, header_cut)
return data_info, inds
# filter sites and keep only SNPs for time estimation
def restrict_time_region(self, data):
# read data of region for time calculation
from FilesIO import CommonData
common_data = CommonData()
region_info = common_data.read_region_info()
time_data_info = pd.DataFrame(columns=data.columns.tolist())
# function used to filter indels
def remove_indels(x):
alt = x.split(',')
flag_num = sum([0 if len(i) == 1 else 1 for i in alt])
if flag_num == 0:
return True
else:
return False
# restrict sites in regions
for i in region_info.index:
start_pos = int(region_info.at[i, 'start'])
end_pos = int(region_info.at[i, 'end'])
time_region_data = data[pd.to_numeric(data['POS']).map(lambda x: x >= start_pos and x <= end_pos)]
time_data_info = | pd.concat([time_data_info, time_region_data]) | pandas.concat |
#!/usr/bin/env python3
"""
The bioscreen module analyzes and graphs growth curves from Bioscreen C data
See example folder on GitHub for a step-by-step example and more in-depth documentation.
## Basic example
# create new Experiment object
import bioscreen
expt = bioscreen.Experiment()
# configure the Experiment, i.e. define what each well is.
# there are 3 options. See documentation for each for more information
expt.set_config_from_file(config_file_path)
expt.set_config([groups], [samples])
expt.configuration = [{'group': 'group1', 'blank': [1,2,3,4], 'sample1': [5,6,7,8]}, {'group': 'group2', ...}, ...]
# summarize/analyze the experiment
# in this step, the readings for each set of wells is averaged at each time point
# the blank well readings, if available, are subtracted from the sample readings
expt.summarize()
# output the summary data if desired
expt.write_summary(summary_file)
# graph the data
expt.graph(figure_file)
## It is also possible to create graphs from previously-made summary files
expt = bioscreen.Experiment()
expt.load_summary(summary_file)
expt.graph(figure_file)
## Instead of graphing everything at once, selected data can be graphed
# graphing data just for "group1" and "group2"
expt.graph(figure_file, groups_to_graph=['group1', 'group1'])
# graphing just "sample1" in "group1"
expt.graph(figure_file, samples_go_graph=['group1__sample1'])
# or creating a graph for each group
expt.graph_groups('figure_base_name')
"""
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Experiment:
def __init__(self):
"""
Experiment objects are used to analyze and graph bioscreen data.
The basic workflow is:
(1) use Experiment.set_config() or .set_config_from_file() to define the experiment configuration
(2) summarize data with Experiment.summarize()
(3) graph data with Experiment.graph()
See documentation for the bioscreen module and for each method for more information
"""
self.configuration = None
self.summary_data = None
self.data_path = None
self.summary_path = None
def status(self):
report = 'An Experiment object in the bioscreen module\n'
if self.configuration is not None:
report = report + '\nExperimental configuration: ' + str(self.configuration) + '\n'
else:
report = report + '\nExperimental configuration is not set.\n'
if self.summary_data is not None:
report = report + '\nData has been summarized.\n\tData File: {}\n\tSummary File: {}'.format(self.data_path, self.summary_path)
else:
report = report + '\nData has not been summarized yet.'
return report
def __str__(self):
return self.status()
def __repr__(self):
return self.status()
def load_summary(self, summary_path):
"""
Load summary data that was previously created by Experiment.write_summary()
Arguments:
(1) path to summary file.
"""
self.summary_data = | pd.read_table(summary_path) | pandas.read_table |
# -*- coding: utf-8 -*-
"""Moving Data Processing.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LttfL6PKbT1DdG9dye8AyFT0yMes9X4X
"""
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd
import seaborn as sns
import numpy as np
data = | pd.read_csv("/content/drive/My Drive/covid project/Data/13-Dec.csv",index_col=0) | pandas.read_csv |
# Question: Please concatenate this file with this one to a single text file.
# The content of the output file should look like below.
# http://www.pythonhow.com/data/sampledata.txt
# http://pythonhow.com/data/sampledata_x_2.txt
# Expected output:
# x,y
# 3,5
# 4,9
# 6,10
# 7,11
# 8,12
# 6,10
# 8,18
# 12,20
# 14,22
# 16,24
# Answer:
import pandas as pd
df1 = pd.read_csv('http://www.pythonhow.com/data/sampledata.txt')
df2 = pd.read_csv('http://pythonhow.com/data/sampledata_x_2.txt')
frames = [df1,df2]
df_result = | pd.concat(frames) | pandas.concat |
import numpy as np
import pandas as pd
round_1_sp = pd.read_stata('data/NHATS_Round_1_SP_File.dta')
# Exclusions
# dementia
# (hc1disescn9) - 1 - YES, 2 - NO, -1 Inapplicable, -8 DK, -9 Missing
# filter hc1disescn9 == 2
# n post filter = 7146
round_1_cohort = round_1_sp[round_1_sp.hc1disescn9 == ' 2 NO']
# missing grip strength
# n post filter = 5969
# 2 measures, gr1grp1rdng and gr1grp2rdng
# remove those with na in both measurements
both_grip_na = [pd.isna(g1) and pd.isna(g2) for g1, g2 in zip(
round_1_cohort.gr1grp1rdng, round_1_cohort.gr1grp2rdng)]
round_1_cohort = round_1_cohort[[not x for x in both_grip_na]]
# missing height or weight data
# weight in pounds (hw1currweigh), height in feet (hw1howtallft), height in inches (hw1howtallin)
# n post filter = 5822
missing_height_or_weight = [any([pd.isna(weight), pd.isna(height_f), pd.isna(height_in)]) for weight, height_f, height_in in zip(
round_1_cohort.hw1currweigh, round_1_cohort.hw1howtallft, round_1_cohort.hw1howtallin)]
round_1_cohort = round_1_cohort[[not x for x in missing_height_or_weight]]
# Derived measures
# max grip_strength
# max of both grip readings (if applicable)
# appended as max_grip
round_1_cohort['max_grip'] = round_1_cohort.apply(
lambda x: np.nanmax([x.gr1grp1rdng, x.gr1grp2rdng]), axis=1)
# BMI
# defined as self-reported baseline weight in kg divided by height in meters-squared
# appended as weight_kg, height_m, and BMI respectively
round_1_cohort['weight_kg'] = round_1_cohort.hw1currweigh.astype(
'float') / 2.2046
round_1_cohort['height_m'] = (round_1_cohort.hw1howtallft.astype(
'int') * 12 + round_1_cohort.hw1howtallin.astype('int')) * 0.0254
round_1_cohort['bmi'] = round_1_cohort.weight_kg / round_1_cohort.height_m**2
# High waist circumference
# waist measure in inches (wc1wstmsrinc)
# indicator for high waist circumference, >= 102 cm in males, >= 88 cm in females
# appended as high_wc
# 104 missing wc measure
def high_wc(x):
if pd.isna(x.r1dgender) or pd.isna(x.wc1wstmsrinc):
return np.nan
wc = x.wc1wstmsrinc * 2.54
if x.r1dgender == '1 MALE':
return True if wc >= 102 else False
elif x.r1dgender == '2 FEMALE':
return True if wc >= 88 else False
else:
raise Exception
round_1_cohort['high_wc'] = round_1_cohort.apply(high_wc, axis=1)
# Sarcopenia (defined by grip strength)
# grip strength < 35.5 kg in males, <20 kg in females
# appended as sarcopenia
# no na due to exclusion criteria
def sarcopenia(x):
if pd.isna(x.max_grip) or pd.isna(x.r1dgender):
return np.nan
if x.r1dgender == '1 MALE':
return True if x.max_grip < 35.5 else False
elif x.r1dgender == '2 FEMALE':
return True if x.max_grip < 20 else False
else:
raise Exception
round_1_cohort['sarcopenia'] = round_1_cohort.apply(sarcopenia, axis=1)
def sarcopenia_cutoff2(x):
if pd.isna(x.max_grip) or pd.isna(x.r1dgender):
return np.nan
if x.r1dgender == '1 MALE':
return True if x.max_grip < 26 else False
elif x.r1dgender == '2 FEMALE':
return True if x.max_grip < 16 else False
else:
raise Exception
round_1_cohort['sarcopenia_cutoff2'] = round_1_cohort.apply(
sarcopenia_cutoff2, axis=1)
# SDOC Sarcopenia (defined by grip strength/BMI ratio)
# grip strength/BMI < 1.05 in males, < 0.79 in females
# appended as sdoc_sarcopenia
# no na due to exclusion criteria
def sdoc_sarcopenia(x):
if any([pd.isna(m) for m in [x.max_grip, x.bmi, x.r1dgender]]):
return np.nan
ratio = x.max_grip / x.bmi
if x.r1dgender == '1 MALE':
return True if ratio < 1.05 else False
elif x.r1dgender == '2 FEMALE':
return True if ratio < 0.79 else False
else:
raise Exception
round_1_cohort['sdoc_sarcopenia'] = round_1_cohort.apply(
sdoc_sarcopenia, axis=1)
# Gender
# r1dgender
round_1_cohort['gender'] = round_1_cohort.r1dgender
# Race
# rl1dracehisp, recode values below in dictionary
# no na
# appended as race
def race(x):
d = {' 1 White, non-hispanic': 'White', ' 2 Black, non-hispanic': 'Black',
' 3 Other (Am Indian/Asian/Native Hawaiian/Pacific Islander/other specify), non-Hispanic': 'Other', ' 4 Hispanic': 'Hispanic', ' 5 more than one DKRF primary': 'Other', ' 6 DKRF': 'DKRF'}
return d.get(x.rl1dracehisp, np.nan)
round_1_cohort['race'] = round_1_cohort.apply(race, axis=1)
# Smoking status
# Current - sd1smokedreg == 1 (smoked regularly) & sd1smokesnow == 1 (smokes now)
# Former smoker - sd1smokedreg == 1 & sd1smokesnow == 2 or sd1smokesnow is na
# Never - sd1smokedreg == 2 & sd1smokesnow == 2
# appended as smoking_status
# 1 overall na
def smoking_status(x):
if pd.isna(x.sd1smokedreg) and pd.isna(x.sd1smokesnow): # only 1
return np.nan
elif pd.isna(x.sd1smokedreg) and pd.notna(x.sd1smokesnow): # never
raise Exception
elif pd.notna(x.sd1smokedreg) and pd.isna(x.sd1smokesnow): # 2818
if x.sd1smokedreg == ' 1 YES':
return 'Former, maybe current'
elif x.sd1smokedreg == ' 2 NO':
return 'Never'
else: # both exist
if x.sd1smokedreg == ' 1 YES' and x.sd1smokesnow == ' 1 YES':
return 'Current'
elif x.sd1smokedreg == ' 1 YES' and x.sd1smokesnow == ' 2 NO':
return 'Former'
else:
return 'Never'
round_1_cohort['smoking_status'] = round_1_cohort.apply(smoking_status, axis=1)
# Education
# el1higstschl
# Less than high school: 1 - no schooling,
# 2 - 1st to 8th grade,
# 3 - 9th to 12th grade, no diploma
# High school to some college: 4 - high school graduate (diploma or equivalent)
# 5 - vocational, technical, business or trade school certificate
# beyond high school
# 6 - some college but no degree
# College degree: 7 - associate's degree
# 8 - bachelor's degree
# Graduate degree: 9 - master's, professional, or doctoral
# appended as education
# 4 na
def education(x):
d = {' 1 NO SCHOOLING COMPLETED': 'Less than high school',
' 2 1ST-8TH GRADE': 'Less than high school',
' 3 9TH-12TH GRADE (NO DIPLOMA)': 'Less than high school',
' 4 HIGH SCHOOL GRADUATE (HIGH SCHOOL DIPLOMA OR EQUIVALENT)': 'High school to some college',
' 6 SOME COLLEGE BUT NO DEGREE': 'High school to some college',
" 8 BACHELOR'S DEGREE": 'College degree',
" 9 MASTER'S, PROFESSIONAL, OR DOCTORAL DEGREE": 'Graduate degree',
' 5 VOCATIONAL, TECHNICAL, BUSINESS, OR TRADE SCHOOL CERTIFICATE OR DIPLOMA (BEYOND HIGH SCHOOL LEVEL)': 'High school to some college',
" 7 ASSOCIATE'S DEGREE": 'College degree'
}
return d.get(x.el1higstschl, np.nan)
round_1_cohort['education'] = round_1_cohort.apply(education, axis=1)
# Physical activity proxy
# pa1evrgowalk
# appended as ever_walk
# no na
round_1_cohort['ever_walk'] = round_1_cohort.apply(
lambda x: True if x.pa1evrgowalk == ' 1 YES' else False, axis=1)
# Comorbidities
# heart_disease
# hc1disescn1 - had heart attack
# hc1disescn2 - has heart disease
# no na
def heart_disease(x):
if x.hc1disescn1 == ' 1 YES' or x.hc1disescn2 == ' 1 YES':
return True
elif x.hc1disescn1 == ' 2 NO' or x.hc1disescn2 == ' 2 NO':
return False
else:
return np.nan
round_1_cohort['heart_disease'] = round_1_cohort.apply(heart_disease, axis=1)
# hypertension
# hc1disescn3
# 7 na
round_1_cohort['hypertension'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn3 == ' 1 YES' else False if x.hc1disescn3 == ' 2 NO' else np.nan, axis=1)
# arthritis
# hc1disescn4
# 12 na
round_1_cohort['arthritis'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn4 == ' 1 YES' else False if x.hc1disescn4 == ' 2 NO' else np.nan, axis=1)
# diabetes
# hc1disescn6
# 2 na
round_1_cohort['diabetes'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn6 == ' 1 YES' else False if x.hc1disescn6 == ' 2 NO' else np.nan, axis=1)
# lung_disease
# hc1disescn7
# 4 na
round_1_cohort['lung_disease'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn7 == ' 1 YES' else False if x.hc1disescn7 == ' 2 NO' else np.nan, axis=1)
# stroke
# hc1disescn8
# 5 na
round_1_cohort['stroke'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn8 == ' 1 YES' else False if x.hc1disescn8 == ' 2 NO' else np.nan, axis=1)
# cancer
# hc1disescn10
# 2 na
round_1_cohort['cancer'] = round_1_cohort.apply(
lambda x: True if x.hc1disescn10 == ' 1 YES' else False if x.hc1disescn10 == ' 2 NO' else np.nan, axis=1)
# Age category
# r1d2intvrage
# no na
# appended as age_category
def age_category(x):
d = {
'1 - 65-69': '65-69',
'2 - 70-74': '70-74',
'3 - 75-79': '75-79',
'4 - 80-84': '80-84',
'5 - 85-89': '85+',
'6 - 90 +': '85+'
}
return d.get(x.r1d2intvrage, np.nan)
round_1_cohort['age_category'] = round_1_cohort.apply(age_category, axis=1)
# Obesity (defined by BMI)
# BMI >= 30 kg/m^2
# appended as Obesity
# no na due to exclusion criteria
round_1_cohort['obesity'] = round_1_cohort.apply(
lambda x: True if x.bmi >= 30 else False, axis=1)
# Sarcopenic obesity definitions
# Grouping 1: sarcopenia, obesity, sarcopenic obesity, neither
# obesity derived from BMI (variable obesity)
# appended as grouping_1_so_status
def grouping_1_so_status(x):
if pd.isna(x.sarcopenia) or pd.isna(x.obesity): # shouldn't happen
return np.nan
if x.sarcopenia and not x.obesity:
return 'Sarcopenia'
elif not x.sarcopenia and x.obesity:
return 'Obesity'
elif x.sarcopenia and x.obesity:
return 'Sarcopenic Obesity'
elif not x.sarcopenia and not x.obesity:
return 'Neither'
round_1_cohort['grouping_1_so_status'] = round_1_cohort.apply(
grouping_1_so_status, axis=1)
def grouping_1_so_status_cutoff2(x):
if pd.isna(x.sarcopenia_cutoff2) or pd.isna(x.obesity): # shouldn't happen
return np.nan
if x.sarcopenia_cutoff2 and not x.obesity:
return 'Sarcopenia'
elif not x.sarcopenia_cutoff2 and x.obesity:
return 'Obesity'
elif x.sarcopenia_cutoff2 and x.obesity:
return 'Sarcopenic Obesity'
elif not x.sarcopenia_cutoff2 and not x.obesity:
return 'Neither'
round_1_cohort['grouping_1_so_status_cutoff2'] = round_1_cohort.apply(
grouping_1_so_status_cutoff2, axis=1)
# Grouping 2: sarcopenia, obesity, sarcopenic obesity, neither
# obesity derived from waist circumference (variable name high_wc)
# appended as grouping_2_so_status
# 104 na (due to missing wc)
def grouping_2_so_status(x):
if pd.isna(x.sarcopenia) or | pd.isna(x.high_wc) | pandas.isna |
from typing import List, Sequence
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from sklearn.metrics import roc_auc_score, roc_curve
from dfds_ds_toolbox.analysis.plotting_utils import (
_get_equally_grouped_data,
_get_trend_changes,
_get_trend_correlation,
_univariate_plotter,
)
def plot_classification_proba_histogram(y_true: Sequence[int], y_pred: Sequence[float]) -> Figure:
"""Plot histogram of predictions for binary classifiers.
Args:
y_true: 1D array of binary target values, 0 or 1.
y_pred: 1D array of predicted target values, probability of class 1.
"""
fig, ax = plt.subplots()
bins = np.linspace(0, 1, 11)
df = pd.DataFrame()
df["Actual class"] = y_true
df["Probability of class 1"] = y_pred
df_actual_1 = df[df["Actual class"] == 1]
df_actual_0 = df[df["Actual class"] == 0]
ax.hist(
x=df_actual_0["Probability of class 1"], bins=bins, label="Actual class 0", histtype="step"
)
ax.hist(
x=df_actual_1["Probability of class 1"], bins=bins, label="Actual class 1", histtype="step"
)
ax.set_xlabel("Probability of class 1")
ax.set_ylabel("Counts")
ax.legend()
return fig
def plot_univariate_dependencies(
data: pd.DataFrame,
target_col: str,
features_list: List[str] = None,
bins: int = 10,
data_test: pd.DataFrame = None,
):
"""Creates univariate dependence plots for features in the dataset
Args:
data: dataframe containing features and target columns
target_col: target column name
features_list: by default creates plots for all features. If list passed, creates plots of only those features.
bins: number of bins to be created from continuous feature
data_test: test data which has to be compared with input data for correlation
Returns:
Draws univariate plots for all columns in data
"""
if features_list is None:
features_list = list(data.columns)
features_list.remove(target_col)
for cols in features_list:
if cols != target_col and data[cols].dtype == "O":
print(cols + " is categorical. Categorical features not supported yet.")
elif cols != target_col and data[cols].dtype != "O":
_univariate_plotter(
feature=cols, data=data, target_col=target_col, bins=bins, data_test=data_test
)
def get_trend_stats(
data: pd.DataFrame,
target_col: str,
features_list: List[str] = None,
bins: int = 10,
data_test: pd.DataFrame = None,
) -> pd.DataFrame:
"""Calculates trend changes and correlation between train/test for list of features.
Args:
data: dataframe containing features and target columns
target_col: target column name
features_list: by default creates plots for all features. If list passed, creates plots of only those features.
bins: number of bins to be created from continuous feature
data_test: test data which has to be compared with input data for correlation
Returns:
dataframe with trend changes and trend correlation (if test data passed)
"""
if features_list is None:
features_list = list(data.columns)
features_list.remove(target_col)
stats_all = []
has_test = type(data_test) == pd.core.frame.DataFrame
ignored = []
for feature in features_list:
if data[feature].dtype == "O" or feature == target_col:
ignored.append(feature)
else:
cuts, grouped = _get_equally_grouped_data(
input_data=data, feature=feature, target_col=target_col, bins=bins
)
trend_changes = _get_trend_changes(
grouped_data=grouped, feature=feature, target_col=target_col
)
if has_test:
grouped_test = _get_equally_grouped_data(
input_data=data_test.reset_index(drop=True), # type: ignore[union-attr]
feature=feature,
target_col=target_col,
bins=bins,
cuts=cuts,
)
trend_corr = _get_trend_correlation(grouped, grouped_test, feature, target_col)
trend_changes_test = _get_trend_changes(
grouped_data=grouped_test, feature=feature, target_col=target_col
)
stats = [feature, trend_changes, trend_changes_test, trend_corr]
else:
stats = [feature, trend_changes]
stats_all.append(stats)
stats_all_df = | pd.DataFrame(stats_all) | pandas.DataFrame |
import pandas as pd
import pytest
from dku_timeseries import ExtremaExtractor
from recipe_config_loading import get_extrema_extraction_params
@pytest.fixture
def columns():
class COLUMNS:
date = "Date"
category = "categorical"
data = "value1"
return COLUMNS
@pytest.fixture
def monthly_df():
co2 = [4, 9, 4, 2, 5, 1]
time_index = pd.date_range("1-1-2015", periods=6, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "Date": time_index})
return df
@pytest.fixture
def recipe_config(columns):
config = {u'window_type': u'none', u'groupby_columns': [], u'closed_option': u'left', u'window_unit': u'months', u'window_width': 2,
u'causal_window': False, u'datetime_column': columns.date, u'advanced_activated': False, u'extrema_column': columns.data, u'extrema_type': u'max',
u'aggregation_types': [u'average'], u'gaussian_std': 1}
return config
class TestExtremaFrequencies:
def test_month(self, recipe_config, columns):
params = get_extrema_extraction_params(recipe_config)
extrema_extractor = ExtremaExtractor(params)
df = get_df_DST("M", columns)
output_df = extrema_extractor.compute(df, columns.date, columns.data)
assert output_df.shape == (1, 5)
assert output_df.loc[0, columns.date] == pd.Timestamp("2019-06-30 01:59:00+02:00")
def test_year(self, recipe_config, columns):
recipe_config["window_unit"] = "years"
params = get_extrema_extraction_params(recipe_config)
extrema_extractor = ExtremaExtractor(params)
df = get_df_DST("Y", columns)
output_df = extrema_extractor.compute(df, columns.date, columns.data)
assert output_df.shape == (1, 5)
assert output_df.loc[0, columns.date] == pd.Timestamp("2024-12-31 01:59:00+01:00")
def test_weeks(self, recipe_config, columns):
recipe_config["window_unit"] = "weeks"
params = get_extrema_extraction_params(recipe_config)
extrema_extractor = ExtremaExtractor(params)
df = get_df_DST("W", columns)
output_df = extrema_extractor.compute(df, columns.date, columns.data)
assert output_df.shape == (1, 5)
assert output_df.loc[0, columns.date] == pd.Timestamp("2019-03-10 01:59:00+01:00")
def test_days(self, recipe_config, columns):
recipe_config["window_unit"] = "days"
params = get_extrema_extraction_params(recipe_config)
extrema_extractor = ExtremaExtractor(params)
df = get_df_DST("D", columns)
output_df = extrema_extractor.compute(df, columns.date, columns.data)
assert output_df.shape == (1, 5)
assert output_df.loc[0, columns.date] == pd.Timestamp("2019-02-05 01:59:00+01:00")
def test_hours(self, recipe_config, columns):
recipe_config["window_unit"] = "hours"
params = get_extrema_extraction_params(recipe_config)
extrema_extractor = ExtremaExtractor(params)
df = get_df_DST("H", columns)
output_df = extrema_extractor.compute(df, columns.date, columns.data)
assert output_df.shape == (1, 5)
assert output_df.loc[0, columns.date] == pd.Timestamp("2019-01-31 06:59:00+0100")
def test_minutes(self, recipe_config, columns):
recipe_config["window_unit"] = "minutes"
params = get_extrema_extraction_params(recipe_config)
extrema_extractor = ExtremaExtractor(params)
df = get_df_DST("T", columns)
output_df = extrema_extractor.compute(df, columns.date, columns.data)
assert output_df.shape == (1, 5)
assert output_df.loc[0, columns.date] == | pd.Timestamp("2019-01-31 02:04:00+0100") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 18:24:12 2020
@author: omar.elfarouk
"""
import pandas
import numpy
import seaborn
import scipy
import matplotlib.pyplot as plt
data = pandas.read_csv('gapminder.csv', low_memory=False)
#setting variables you will be working with to numeric
data['internetuserate'] = | pandas.to_numeric(data['internetuserate'], errors='coerce') | pandas.to_numeric |
# -*- coding: UTF-8 -*-
import os
import numpy as np
import pandas as pd
from stock.globalvar import FINANCE_DIR, BASIC_DIR, LRB_CH2EN, XJLLB_CH2EN, ZCFZB_CH2EN
from stock.utils.symbol_util import symbol_to_exsymbol
def _set_quarter(df):
for i in range(len(df)):
dt = df.index[i]
if dt.month == 3:
df.loc[df.index[i], "quarter"] = "Q1"
elif dt.month == 6:
df.loc[df.index[i], "quarter"] = "Q2"
elif dt.month == 9:
df.loc[df.index[i], "quarter"] = "Q3"
elif dt.month == 12:
df.loc[df.index[i], "quarter"] = "Q4"
def _parse_cell(string, parser=None):
string = string.strip()
if string == "":
return np.nan
if string == "--":
return np.nan
if parser == None:
return string
return parser(string)
def get_lrb_data(exsymbol):
filename = "%s_lrb" % exsymbol
path = os.path.join(FINANCE_DIR["stock"], filename)
if not os.path.isfile(path):
msg = "%s has no lrb data" % exsymbol
raise Exception(msg)
content = None
with open(path, "r") as f:
content = f.read()
lines = content.splitlines()
data = {}
index = []
for line in lines:
if line.strip() == "":
continue
cells = line.split(",")
col = cells[0].strip()
if col == "报告日期":
index = map(lambda x: _parse_cell(x, str), cells[1:])
else:
en = LRB_CH2EN.get(col)
if en == "":
raise Exception("en for %s not defined" % cells[0])
array = data.setdefault(en, [])
parsed = map(lambda x: _parse_cell(x, float), cells[1:])
array.extend(parsed)
df = pd.DataFrame(data=data, index=index).fillna(0.0)
df = df[ | pd.notnull(df.index) | pandas.notnull |
from datetime import datetime
from typing import List
import pandas as pd
import pytest
from hyperwave import (
HyperwaveWeekLenghtGrouping,
HyperwavePhaseGrouper,
HyperwaveGroupingPhasePercent,
HyperwaveGroupingPhaseAggregator,
HyperwaveGroupingToPhase4,
HyperwaveGrouperByMedianSlopeIncrease,
HyperwaveGrouping,
HyperwaveGrouperSmallWeek,
)
def get_path_row(
x1: int = 0,
x1_date: datetime = datetime(2000, 1, 1),
x1_normalize: float = 0.0,
x2: int = 0,
x2_date: datetime = datetime(2000, 1, 1),
x2_normalize: float = 0.0,
y1: float = 0.0,
y1_normalize: float = 0.0,
y2: float = 0.0,
y2_normalize: float = 0.0,
m: float = 0.0,
b: float = 0.0,
m_normalize: float = 0.0,
b_normalize: float = 0.0,
angle: float = 0.0,
angle_normalize: float = 0.0,
weeks: int = 0,
mean_error: float = 0.0,
nb_is_lower: int = 0,
ratio_error_cut: float = 0.0,
ratio_slope_y1_normalize: float = 0.0,
ratio_slope_y2_normalize: float = 0.0,
):
return {
"x1": x1,
"x1_date": x1_date,
"x1_normalize": x1_normalize,
"x2": x2,
"x2_date": x2_date,
"x2_normalize": x2_normalize,
"y1": y1,
"y1_normalize": y1_normalize,
"y2": y2,
"y2_normalize": y2_normalize,
"m": m,
"b": b,
"m_normalize": m_normalize,
"b_normalize": b_normalize,
"angle": angle,
"angle_normalize": angle_normalize,
"weeks": weeks,
"mean_error": mean_error,
"nb_is_lower": nb_is_lower,
"ratio_error_cut": ratio_error_cut,
"ratio_slope_y1_normalize": ratio_slope_y1_normalize,
"ratio_slope_y2_normalize": ratio_slope_y2_normalize,
}
@pytest.mark.parametrize(
"raw_path, expected_phases, increase_factor, test_conment",
[
([get_path_row()], [[0]], 2.0, "one row return the row if greater than zero"),
(
[get_path_row(), get_path_row()],
[[0, 1]],
2.0,
"Two path with m_normalize equal zero should return an array with both element",
),
(
[get_path_row(m_normalize=-0.5), get_path_row(m_normalize=-1.0)],
[],
2.0,
"Path with only negative elements should return empty array",
),
(
[get_path_row(m_normalize=-0.5), get_path_row(m_normalize=1.0)],
[[1]],
2.0,
"Path with only one positive m_normalize should retunr an array with one element",
),
(
[get_path_row(m_normalize=0.5), get_path_row(m_normalize=0.7)],
[[0, 1]],
2.0,
"Path with two positive m_normalize without increase factor should return an array with both elements id",
),
(
[get_path_row(m_normalize=0.5), get_path_row(m_normalize=1.1)],
[[0], [1]],
2.0,
"Path with two positive m_normalize with increase factor greated should return an array with two array",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
],
[[0], [1, 2]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5] should return [[0],[1, 2]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
],
[[0], [1, 2]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5] should return [[0],[1, 2]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=2.2),
],
[[0], [1, 2, 3]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5, 2.2] should return [[0],[1, 2, 3]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=2.4),
],
[[0], [1, 2], [3]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5, 2.4] should return [[0],[1, 2], [3]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=2.4),
get_path_row(m_normalize=10),
],
[[0], [1, 2], [3], [4]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5, 2.4, 10] should return [[0],[1, 2], [3], [4]",
),
],
)
def test_that_grouping_return_expected_value(
raw_path, expected_phases, increase_factor, test_conment
):
df_path = pd.DataFrame(raw_path)
hw_phase_grouper = HyperwavePhaseGrouper(increase_factor)
phases = hw_phase_grouper.group(df_path)
assert expected_phases == phases, test_conment
@pytest.mark.parametrize(
"raw_path, input_group, expected_result, group_min_week, only_group_last_phase, test_comment",
[
(
[get_path_row(weeks=4)],
[[0]],
[[0]],
10,
True,
"one path with weeks lower than should return same input",
),
(
[get_path_row(weeks=4), get_path_row(weeks=4)],
[[1]],
[[1]],
10,
True,
"path with two input path but one group should return one group",
),
(
[get_path_row(weeks=10), get_path_row(weeks=4)],
[[0], [1]],
[[0, 1]],
10,
True,
"path with two input path and two groups should return one group",
),
(
[get_path_row(weeks=10), get_path_row(weeks=4), get_path_row(weeks=3)],
[[0], [1], [2]],
[[0, 1, 2]],
10,
True,
"initial group [[0], [1], [2]] with weeks [10, 4, 3] shoud return group [[0, 1, 2]]",
),
(
[
get_path_row(weeks=10),
get_path_row(weeks=4),
get_path_row(weeks=3),
get_path_row(weeks=4),
],
[[0], [1], [2, 3]],
[[0], [1, 2, 3]],
10,
True,
"initial group [[0], [1], [2, 3]] with weeks [10, 4, 3, 4] shoud return group [[0], [1, 2, 3]]",
),
(
[
get_path_row(weeks=10),
get_path_row(weeks=4),
get_path_row(weeks=7),
get_path_row(weeks=4),
],
[[0], [1], [2], [3]],
[[0, 1], [2, 3]],
10,
False,
"initial group [[0], [1], [2, 3]] with weeks [10, 4, 3, 4] shoud return group [[0], [1, 2, 3]]",
),
(
[
get_path_row(weeks=10),
get_path_row(weeks=4),
get_path_row(weeks=7),
get_path_row(weeks=4),
],
[[0], [1], [2], [3]],
[[0], [1], [2, 3]],
10,
True,
"initial group [[0], [1], [2, 3]] with weeks [10, 4, 3, 4] shoud return group [[0], [1, 2, 3]]",
),
],
)
def test_grouping_second_step_week_base_when_all_weeks_are_enough_long(
raw_path,
input_group,
expected_result,
group_min_week,
only_group_last_phase,
test_comment,
):
df_path = | pd.DataFrame(raw_path) | pandas.DataFrame |
import gc
import itertools
import multiprocessing
import time
from collections import Counter
import numpy as np
import pandas as pd
def create_customer_feature_set(train):
customer_feats = pd.DataFrame()
customer_feats['customer_id'] = train.customer_id
customer_feats['customer_max_ratio'] = train.customer_id / \
np.max(train.customer_id)
customer_feats['index_max_ratio'] = train.customer_id / \
(train.index + 1e-14)
customer_feats['customer_count'] = train.customer_id.map(
train.customer_id.value_counts())
customer_feats['cust_first'] = train.customer_id.apply(
lambda x: int(str(x)[:1]))
customer_feats['cust_2first'] = train.customer_id.apply(
lambda x: int(str(x)[:2]))
customer_feats['cust_3first'] = train.customer_id.apply(
lambda x: int(str(x)[:3]))
customer_feats['cust_4first'] = train.customer_id.apply(
lambda x: int(str(x)[:4]))
customer_feats['cust_6first'] = train.customer_id.apply(
lambda x: int(str(x)[:6]))
# customer_feats.cust_3first = pd.factorize(customer_feats.cust_3first)[0]
customer_feats.drop(['customer_id'], axis=1, inplace=True)
return customer_feats
def create_groupings_feature_set(data, features, transform=True):
df_features = | pd.DataFrame() | pandas.DataFrame |
from functools import partial
from unittest import TestCase, main as unittest_main
import numpy as np
import pandas as pd
from scipy.special import digamma
from scipy.stats import beta, norm
from gbstats.bayesian.dists import Beta, Norm
DECIMALS = 5
round_ = partial(np.round, decimals=DECIMALS)
def roundsum(x, decimals=DECIMALS):
return np.round(np.sum(x), decimals=decimals)
class TestBeta(TestCase):
def test_posterior(self):
prior = 1, 1
data = 1, 2
result = Beta.posterior(prior, data)
outcome = (2, 2)
for res, out in zip(result, outcome):
self.assertEqual(res, out)
prior = 1, 1
data = | pd.Series([1, 10]) | pandas.Series |
"""
Module containing walk-forward functions
"""
#* Walk-Forward Modeling
from sklearn.linear_model import LinearRegression
from scipy import stats
from sklearn.metrics import make_scorer, mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV
from src.transform_cv import TimeSeriesSplitMod
from src.transform_cv import DisabledCV, ToConstantTransformer, ToNumpyTransformer
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
def calculate_r2_wf(y_true, y_pred, y_moving_mean):
"""
Calculate out-of-sample R^2 for the walk-forward procedure
"""
mse_urestricted = ((y_true - y_pred)**2).sum()
mse_restricted = ((y_true - y_moving_mean)**2).sum()
return 1 - mse_urestricted/mse_restricted
def calculate_msfe_adjusted(y_true, y_pred, y_moving_mean):
"""
Calculate t-statistic for the test on significant imporvement in predictions
"""
f = (y_true - y_moving_mean)**2 - ((y_true - y_pred)**2 - (y_moving_mean - y_pred)**2)
t_stat,pval_two_sided = stats.ttest_1samp(f, 0, axis=0)
pval_one_sided = stats.t.sf(t_stat, f.count() - 1)
return t_stat, pval_one_sided
def r2_adj_score(y_true,y_pred,N,K):
"""
Calculate in-sample R^2 that is adjusted for the number of predictors (ols model only)
"""
r2 = r2_score(y_true,y_pred)
return 1-(1-r2)*(N-1)/(N-K-1)
def estimate_walk_forward(config, X, y, start_idx, rolling = False,
tr_win = None, val_win = None, verbose = True):
"""
Function that esimates walk-forward using expanding or rolling window.
Cross-validation procedure, and the type of grid-search are determined in the config file.
Please see "model_configs.py" for the model config structure.
Yields
---------
Outputs are pandas dataseries:
- models_estimated - best model estimated for given month using past info
- scores_estimated - scores of the best models
- predictions - predictions of the best models
"""
if verbose == True:
print(config['param_grid'])
max_idx = y.shape[0]
# Generate Interaction Terms
if 'interactions' in config:
if config['interactions'] == True:
X = pd.DataFrame(PolynomialFeatures(degree = 2, interaction_only=False,include_bias = False).fit_transform(X),index = X.index)
# Generate Lags
if 'addlags' in config:
LAGS = config['addlags']
if (type(LAGS) == int) & (LAGS > 0):
temp = X
for lag in range(1,LAGS+1,1):
temp = pd.concat([temp, X.shift(lag).add_suffix('_L{}'.format(lag))], axis = 1)
temp.iloc[0,(X.shape[1]):] = X.iloc[0,:].values
X = temp
# Define outputs
models_estimated = pd.Series(index=X.index[start_idx:])
scores_estimated = pd.Series(index=X.index[start_idx:])
predictions = | pd.Series(index=X.index[start_idx:]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 19:22:40 2020
@author: adria.bove
Llibreria per fer dibuixos de
"""
# importing libraries
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
from BBDD import BBDD
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from math import sqrt
import scores_metrics_diagnosis_tools as scores
class plotter:
def lines(self, BBDDs, Columns, frequencies, y_label, title, legend, signs, days_from_today:int=-1, ndays:int=1, save:str='', input_df:list=[]):
colors=['hotpink','cornflowerblue','green','purple','slateblue','firebrick']
# defining color schemes
plt.style.use('seaborn-whitegrid')
if len(input_df)==0:
dfs=[BBDD(i).extract(days_from_today,ndays) for i in BBDDs]
for i in range(len(dfs)):
dfs[i].index=dfs[i]['dt']
else:
dfs=input_df
# data visualization
# defininig figure size
a=plt.figure(figsize=(15,10))
k=0
curves=[]
for i in range(len(dfs)):
for j in Columns:
try:
ys= | pd.to_numeric(dfs[i][j]) | pandas.to_numeric |
import pandas as pd
import numpy as np
from tkinter import *
from tkintertable import TableCanvas
import matplotlib.pyplot as plt
k_factor = 10
def update_elo(w_elo, l_elo):
exp_win = expected_result(w_elo, l_elo)
change = k_factor * (1-exp_win)
w_elo = w_elo + change
l_elo = l_elo - change
return w_elo, l_elo
def expected_result(elo_a, elo_b):
expect = 1.0/(1+10**((elo_b - elo_a)/400))
return expect
df_lists = pd.DataFrame()
start_year = 2018
end_year = 2020
# scrapes data from hockeyreference.com
for year in range(start_year, end_year + 1):
k = 1
# 2005 was the lockout so there is no data to be scraped
if year == 2005:
print("2005 was the lockout")
else:
url = r'https://www.hockey-reference.com/leagues/NHL_' + str(year) + r'_games.html'
df_temp_reg = pd.DataFrame(pd.read_html(url)[0])
df_temp_reg['season'] = year
# use commented out code if playoff data is desired
try:
df_temp_post = pd.DataFrame(pd.read_html(url)[1])
df_temp_post['season'] = year
except IndexError as e:
k = 0
print('no playoffs available yet')
print(str(year) + " scraped")
df_lists = df_lists.append(df_temp_reg)
if k == 1:
df_lists.append(df_temp_post)
df_lists.rename(columns={'G': 'VisitingGoals',
'G.1': 'HomeGoals',
'Unnamed: 5': 'OTSO',
'season': 'Season'},
inplace=True)
df_lists.drop(['Att.', 'LOG', 'Notes'], axis=1, inplace=True)
df_lists.loc[:, 'Date'] = pd.to_datetime(df_lists['Date'])
replace_dict = {'Home': {'Atlanta Thrashers': 'Winnipeg Jets',
'Mighty Ducks of Anaheim': 'Anaheim Ducks',
'Phoenix Coyotes': 'Arizona Coyotes'},
'Visitor': {'At<NAME>': 'Winnipeg Jets',
'Mighty Ducks of Anaheim': 'Anaheim Ducks',
'Phoenix Coyotes': 'Arizona Coyotes'}}
df_lists.replace(replace_dict, inplace=True)
ind = df_lists['OTSO'].isna()
df_lists.loc[ind, 'OTSO'] = 'REG'
# how come this doesnt work??? teams = df_lists['Home'].unique().sort()
teams = df_lists['Home'].unique()
teams.sort()
games = df_lists.reset_index()
games.drop('index', axis=1, inplace=True)
class TeamElos(dict):
def __init__(self, teams):
super().__init__(self)
for team in teams:
self[team] = 1500
self.history = HistoryList(teams)
def plot_history(self, *args):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim([1350,1700])
plt.xticks(rotation=45)
for team in args:
cax = plt.plot(
self.history[team]['Date'],
self.history[team]['Elo Rating'],
label = team)
plt.title(f'{args} Elo History')
plt.legend()
plt.show()
def update(self, game_tuple):
if game_tuple.VisitingGoals > game_tuple.HomeGoals:
winning_team = game_tuple.Visitor
losing_team = game_tuple.Home
else:
winning_team = game_tuple.Home
losing_team = game_tuple.Visitor
self[winning_team], self[losing_team] = update_elo(
self[winning_team],
self[losing_team])
self.history.update(
winning_team = winning_team,
losing_team = losing_team,
win_elo = self[winning_team],
lose_elo = self[losing_team],
date = game_tuple.Date
)
class HistoryList(dict):
def __init__(self, teams):
super().__init__(self)
for team in teams:
self[team] = pd.DataFrame(columns = ['Date', 'Elo Rating'])
def update(self, winning_team, losing_team, win_elo, lose_elo, date):
self[winning_team] = self[winning_team].append(
{'Date': date,'Elo Rating': win_elo},
ignore_index = True)
self[losing_team]= self[losing_team].append(
{'Date': date,'Elo Rating': lose_elo},
ignore_index = True)
elos = TeamElos(teams)
for row in games.itertuples():
if row.Date.date() < pd.Timestamp.today().date():
elos.update(row)
elos.plot_history('Toronto Maple Leafs', 'Montreal Canadiens')
elos = pd.DataFrame(
elos,
index=['Elo Rating']).T.sort_values(by='Elo Rating', ascending=False)
ind = games['Date'] > | pd.Timestamp('today') | pandas.Timestamp |
import numpy as np
import pandas as pd
def sample_SynthA(treat_prob=0.5, sample_no=1000):
"""
Generate synthetic data from Athey and Imbens
"""
noise1 = np.random.normal(0, 0.01, sample_no)
noise0 = np.random.normal(0, 0.01, sample_no)
w = np.random.uniform(0, 1, sample_no)
w = w <= treat_prob
y = np.zeros(sample_no)
model_treatment = lambda eta, x, kappa, noise: eta(x) + 1 / 2 * (kappa(x)) + noise
model_control = lambda eta, x, kappa, noise: eta(x) - 1 / 2 * (kappa(x)) + noise
x = np.random.normal(0, 1, [sample_no, 2])
eta = lambda x: 1 / 2 * x[:, 0] + x[:, 1]
kappa = lambda x: 1 / 2 * x[:, 0]
y1 = model_treatment(eta, x, kappa, noise1)
y0 = model_control(eta, x, kappa, noise0)
y1 = y1.squeeze()
y0 = y0.squeeze()
y[w == 1] = y1[w == 1]
y[w == 0] = y0[w == 0]
tau = y1 - y0
return x, w, y, y1, y0, tau
def sample_SynthB(train_sample_no=300, test_sample_no=1000):
sample_no = train_sample_no + test_sample_no
X = np.round(np.random.normal(size=(sample_no, 1), loc=66.0, scale=4.1)) # age
X = np.block([X, np.round(
np.random.normal(size=(sample_no, 1), loc=6.2, scale=1.0) * 10.0) / 10.0]) # white blood cell count
X = np.block(
[X, np.round(np.random.normal(size=(sample_no, 1), loc=0.8, scale=0.1) * 10.0) / 10.0]) # Lymphocyte count
X = np.block([X, np.round(np.random.normal(size=(sample_no, 1), loc=183.0, scale=20.4))]) # Platelet count
X = np.block([X, np.round(np.random.normal(size=(sample_no, 1), loc=68.0, scale=6.6))]) # Serum creatinine
X = np.block(
[X, np.round(np.random.normal(size=(sample_no, 1), loc=31.0, scale=5.1))]) # Aspartete aminotransferase
X = np.block([X, np.round(np.random.normal(size=(sample_no, 1), loc=26.0, scale=5.1))]) # Alanine aminotransferase
X = np.block([X, np.round(np.random.normal(size=(sample_no, 1), loc=339.0, scale=51))]) # Lactate dehydrogenase
X = np.block([X, np.round(np.random.normal(size=(sample_no, 1), loc=76.0, scale=21))]) # Creatine kinase
X = np.block([X, np.floor(np.random.uniform(size=(sample_no, 1)) * 11) + 4]) # Time from study 4~14
TIME = X[:, 9]
X_ = | pd.DataFrame(X) | pandas.DataFrame |
# House Prices EDA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
import sklearn.linear_model as linear_model
import seaborn as sns
import xgboost as xgb
import scipy.stats as st
import patsy
from sklearn.model_selection import KFold
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 20
train = pd.read_csv('train.csv')
test = pd.read_csv('train.csv')
quantitative = [f for f in train.columns if train.dtypes[f] != 'object']
quantitative.remove('SalePrice')
quantitative.remove('Id')
qualitative = [f for f in train.columns if train.dtypes[f] == 'object']
missing = train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar()
plt.show()
y = train['SalePrice']
plt.figure(1)
plt.title('<NAME>')
sns.distplot(y, kde=False, fit=st.johnsonsu)
plt.figure(2)
plt.title('Normal')
sns.distplot(y, kde=False, fit=st.norm)
plt.figure(3)
plt.title('Log Normal')
sns.distplot(y, kde=False, fit=st.lognorm)
plt.show()
def test_normality(x): return stats.shapiro(x.fillna(0))[1] < 0.01
normal = pd.DataFrame(train[quantitative])
normal = normal.apply(test_normality)
print(not normal.any())
f = pd.melt(train, value_vars=quantitative)
g = sns.FacetGrid(f, col="variable", col_wrap=2, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
plt.show()
for c in qualitative:
train[c] = train[c].astype('category')
if train[c].isnull().any():
train[c] = train[c].cat.add_categories(['MISSING'])
train[c] = train[c].fillna('MISSING')
def boxplot(x, y, **kwargs): sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
f = pd.melt(train, id_vars=['SalePrice'], value_vars=qualitative)
g = sns.FacetGrid(
f, col="variable", col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(boxplot, "value", "SalePrice")
plt.show()
def anova(frame):
anv = | pd.DataFrame() | pandas.DataFrame |
"""
Created on Mon Jul 30 20:39:24 2018
@author: <NAME>
"""
import pandas as pd
import time
import datetime as datetime
#from sklearn.model_selection import train_test_split
#from sklearn.ensemble import RandomForestRegressor
#import numpy as np
from db import get_db
begin = time.time()
###############################################################################
###############################################################################
# Ingest all data up to end of 2018 season, clean up, and get required fields
###############################################################################
###############################################################################
# rsgc = regular season games compact, rsgd = regular season games detailed
rsgc = pd.read_csv('/Users/Ryan/Google Drive/ncaa-basketball-data/2018-kaggle-update/RegularSeasonCompactResults.csv')
rsgd = pd.read_csv('/Users/Ryan/Google Drive/ncaa-basketball-data/2018-kaggle-update/RegularSeasonDetailedResults.csv')
seasons = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Desktop/DataFiles/Seasons.csv')
teams = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Desktop/DataFiles/Teams.csv')
# Merge in day 0 to rsgc & rsgd, add days to day 0 to get date of game, delete extra columns
rsgc = pd.merge(rsgc,seasons[['Season','DayZero']],on='Season')
rsgc['DayZero'] = pd.to_datetime(rsgc['DayZero'],format='%m/%d/%Y')
rsgc['DayNum'] = pd.to_timedelta(rsgc['DayNum'],unit='d')
rsgc['GameDate'] = rsgc['DayZero'] + rsgc['DayNum']
del rsgc['DayNum'], rsgc['DayZero']
rsgd = pd.merge(rsgd,seasons[['Season','DayZero']],on='Season')
rsgd['DayZero'] = pd.to_datetime(rsgd['DayZero'],format='%m/%d/%Y')
rsgd['DayNum'] = pd.to_timedelta(rsgd['DayNum'],unit='d')
rsgd['GameDate'] = rsgd['DayZero'] + rsgd['DayNum']
del rsgd['DayNum'], rsgd['DayZero']
# Merge together compact and detailed results when possible, delete old dataframes
rsg = pd.merge(left = rsgc, right = rsgd, how = 'left', on = ['GameDate','Season','WTeamID','LTeamID',
'WScore','LScore','WLoc','NumOT'])
del rsgc, rsgd
# Create detailedgame field in rsg to indicate if the game has details or not
rsg['DetailedGame'] = 0
rsg.loc[(rsg['WFGM'] > 0),'DetailedGame'] = 1
# Create high-level counts of games, detailed games, and a count missing details
rsg_summary1 = rsg[['Season','GameDate']].groupby(['Season']).agg('count').reset_index()
rsg_summary2 = rsg[['Season','DetailedGame']].groupby(['Season']).agg('sum').reset_index()
rsg_summary = pd.merge(rsg_summary1,rsg_summary2,how = 'inner', on = ['Season'])
rsg_summary = rsg_summary.rename(columns={'GameDate':'GameCount','DetailedGame':'DetailedGameCount'})
rsg_summary['MissingDetails'] = rsg_summary['GameCount'] - rsg_summary['DetailedGameCount']
del rsg_summary1, rsg_summary2, rsg['DetailedGame']
# Duplicate rsg into loser rsg
lrsg = rsg.copy()
# Rename columns for rsg
rsg = rsg.rename(columns = {'WTeamID':'TmID','WScore':'TmPF','LTeamID':'OppID','LScore':'OppPF','WLoc':'TmLoc'})
rsg = rsg.rename(columns = {'WFGM':'TmFGM','WFGA':'TmFGA','WFGM3':'TmFGM3','WFGA3':'TmFGA3','WFTM':'TmFTM','WFTA':'TmFTA'})
rsg = rsg.rename(columns = {'WOR':'TmOR','WDR':'TmDR','WAst':'TmAst'})
rsg = rsg.rename(columns = {'WTO':'TmTO','WStl':'TmStl','WBlk':'TmBlk','WPF':'TmFoul'})
rsg = rsg.rename(columns = {'LFGM':'OppFGM','LFGA':'OppFGA','LFGM3':'OppFGM3','LFGA3':'OppFGA3','LFTM':'OppFTM','LFTA':'OppFTA'})
rsg = rsg.rename(columns = {'LOR':'OppOR','LDR':'OppDR','LAst':'OppAst'})
rsg = rsg.rename(columns = {'LTO':'OppTO','LStl':'OppStl','LBlk':'OppBlk','LPF':'OppFoul'})
rsg['TmWin'] = 1
# Rename columns for lrsg
lrsg = lrsg.rename(columns = {'WTeamID':'OppID','WScore':'OppPF','LTeamID':'TmID','LScore':'TmPF'})
lrsg = lrsg.rename(columns = {'WFGM':'OppFGM','WFGA':'OppFGA','WFGM3':'OppFGM3','WFGA3':'OppFGA3','WFTM':'OppFTM','WFTA':'OppFTA'})
lrsg = lrsg.rename(columns = {'WOR':'OppOR','WDR':'OppDR','WAst':'OppAst'})
lrsg = lrsg.rename(columns = {'WTO':'OppTO','WStl':'OppStl','WBlk':'OppBlk','WPF':'OppFoul'})
lrsg = lrsg.rename(columns = {'LFGM':'TmFGM','LFGA':'TmFGA','LFGM3':'TmFGM3','LFGA3':'TmFGA3','LFTM':'TmFTM','LFTA':'TmFTA'})
lrsg = lrsg.rename(columns = {'LOR':'TmOR','LDR':'TmDR','LAst':'TmAst'})
lrsg = lrsg.rename(columns = {'LTO':'TmTO','LStl':'TmStl','LBlk':'TmBlk','LPF':'TmFoul'})
lrsg['TmWin'] = 0
# Adjust locations in loser rsg
lrsg.loc[(lrsg['WLoc'] == 'H'),'TmLoc'] = 'A'
lrsg.loc[(lrsg['WLoc'] == 'A'),'TmLoc'] = 'H'
lrsg.loc[(lrsg['WLoc'] == 'N'),'TmLoc'] = 'N'
del lrsg['WLoc']
# Append lrsg to rsg, delete lrsg,
rsg = rsg.append(lrsg)
del lrsg
# Bring in team names for both Tm and Opp
rsg = pd.merge(rsg,teams[['TeamID','TeamName']],left_on='TmID',right_on='TeamID')
del rsg['TeamID']
rsg = rsg.rename(columns = {'TeamName':'TmName'})
rsg = pd.merge(rsg,teams[['TeamID','TeamName']],left_on='OppID',right_on='TeamID')
del rsg['TeamID']
rsg = rsg.rename(columns = {'TeamName':'OppName'})
# Add countable field for number of games
rsg['TmGame'] = 1
# Add field for number of minutes
rsg['GameMins'] = 40 + rsg['NumOT']*5
# Add field for Total Rebounds
rsg['TmTR'] = rsg['TmOR'] + rsg['TmDR']
rsg['OppTR'] = rsg['OppOR'] + rsg['OppDR']
# Count number of FGA2/FGM2
rsg['TmFGM2'] = rsg['TmFGM'] - rsg['TmFGM3']
rsg['TmFGA2'] = rsg['TmFGA'] - rsg['TmFGA3']
rsg['OppFGM2'] = rsg['OppFGM'] - rsg['OppFGM3']
rsg['OppFGA2'] = rsg['OppFGA'] - rsg['OppFGA3']
# Calculate field goal percentages in each game
rsg['TmFGPct'] = rsg['TmFGM'] / rsg['TmFGA']
rsg['TmFG3Pct'] = rsg['TmFGM3'] / rsg['TmFGA3']
rsg['TmFG2Pct'] = rsg['TmFGM2'] / rsg['TmFGA2']
rsg['TmFTPct'] = rsg['TmFTM'] / rsg['TmFTA']
rsg['OppFGPct'] = rsg['OppFGM'] / rsg['OppFGA']
rsg['OppFG3Pct'] = rsg['OppFGM3'] / rsg['OppFGA3']
rsg['OppFG2Pct'] = rsg['OppFGM2'] / rsg['OppFGA2']
rsg['OppFTPct'] = rsg['OppFTM'] / rsg['OppFTA']
# Calculate game margin
rsg['TmMargin'] = rsg['TmPF'] - rsg['OppPF']
rsg['OppMargin'] = -rsg['TmMargin']
# Two prefixes: Tm (Team) and Opp (Opponent)
# Core metrics: PF (points for); Margin; FGM (field goals made); FGA (field goals attempted); FGPct (Field goal percent)
# cont...FGM3 (3pt field goals made); FGA3 (3pt field goals attempted); FG3Pct (3pt field goal percent)
# cont...FGM2 (2pt field goals made); FGA2 (2pt field goals attempted); FG2Pct (2pt field goal percent)
# cont...Ast (assists); OR (offensive rebounds); DR (devensive rebounds); TR (total rebounds)
# cont...FTA (free throws attempted); FTM (free throws made); FTPct (Free throw percent)
# cont...TO (turn overs); Stl (steals); Blk (blocks); Foul (foul)
metrics = ['PF','Margin','FGM','FGA',
'FGM3','FGA3','FGM2','FGA2','Ast','OR','DR','TR',
'FTA','FTM','TO','Stl','Blk','Foul']
# Getting in game per-40 for latter opponent adjusting
for x in {'Opp','Tm'}:
for column in metrics:
rsg[x + column + 'per40'] = rsg[x + column] / rsg['GameMins'] * 40
del column, x
# Create summable fields
summables = ['GameMins','TmWin','TmGame']
for x in {'Opp','Tm'}:
for column in metrics:
summables.append(x + column)
del column, x
# Create seasonteams dataframe, getting in season stats
seasonteams = rsg.groupby(['TmID','TmName','Season'])[summables].sum().reset_index()
# Per-40 adjust the season stats, for later compare to in-game stats
for x in {'Opp','Tm'}:
for column in metrics:
seasonteams[x + column + 'per40'] = seasonteams[x + column] / seasonteams['GameMins'] * 40
del column, x
# Calculate season-long percentages
seasonteams['TmFGPct'] = seasonteams['TmFGM'] / seasonteams['TmFGA']
seasonteams['TmFG3Pct'] = seasonteams['TmFGM3'] / seasonteams['TmFGA3']
seasonteams['TmFG2Pct'] = seasonteams['TmFGM2'] / seasonteams['TmFGA2']
seasonteams['TmFTPct'] = seasonteams['TmFTM'] / seasonteams['TmFTA']
seasonteams['OppFGPct'] = seasonteams['OppFGM'] / seasonteams['OppFGA']
seasonteams['OppFG3Pct'] = seasonteams['OppFGM3'] / seasonteams['OppFGA3']
seasonteams['OppFG2Pct'] = seasonteams['OppFGM2'] / seasonteams['OppFGA2']
seasonteams['OppFTPct'] = seasonteams['OppFTM'] / seasonteams['OppFTA']
# Double Check for columns showing up in both
#rsg_cols = pd.DataFrame(list(rsg)).reset_index()
#seasonteams_cols = pd.DataFrame(list(seasonteams)).reset_index()
#col_diffs = pd.merge(rsg_cols, seasonteams_cols, on=[0],how='outer')
# Benchmark time
poatime = time.time()-begin
if poatime < 60:
print('Pre-Opponent-Adjust Time: ' + str(round((poatime),2)) + ' sec')
else:
print('Pre-Opponent-Adjust Time: ' + str(round((poatime)/60,2)) + ' min')
###############################################################################
###############################################################################
# Define opponentadjust UDF (for ease of opponent-adjusting metrics)
###############################################################################
###############################################################################
def opponentadjust(OAmetric):
global rsg, seasonteams
# Figure out the prefix, core metric, for use later
if OAmetric[:2] == 'Tm':
prefix = OAmetric[:2]
otherprefix = 'Opp'
coremetric = OAmetric[2:]
if OAmetric[:3] == 'Opp':
prefix = OAmetric[:3]
otherprefix = 'Tm'
coremetric = OAmetric[3:]
# print (coremetric + prefix)
# From iteams put average PF into opponent side of irsg
# Example, Opp_AvgPF_Against, Opp_AvgPA_Against
# If I am OAing TmPFper40 (my offense proficiency), I want to get OppPFper40 for my opponent
# So, for a TmName, get their OppPFper40, aka their PAper40
tempseasonteams = seasonteams[['TmName','Season',otherprefix+coremetric]]
# Rename my opponent's metric to say it's *their* average <insert metric>
# Rename to OppAvg_OppScoreper40 (it's my opponent's average opponents (me) score per 40)
tempseasonteams = tempseasonteams.rename(columns = {otherprefix+coremetric:'OppAvg_'+otherprefix+coremetric})
# Merge in this info into irsg, for the opponent in irsg
rsg = | pd.merge(rsg,tempseasonteams,left_on=['OppName','Season'],right_on=['TmName','Season'],how='left',suffixes=('','_y')) | pandas.merge |
from copy import deepcopy
import os
import pandas as pd
import re
import string
data_columns = {"id": int, "text": str, "source": str, "user_id": str, "truncated": str, "in_reply_to_status_id": str,
"in_reply_to_user_id": str, "in_reply_to_screen_name": str, "retweeted_status_id": str, "geo": str,
"place": str, "contributors": str, "retweet_count": int, "reply_count": str, "favorite_count": str,
"favorited": str, "retweeted": str, "possibly_sensitive": str, "num_hashtags": int, "num_urls": str,
"num_mentions": str, "created_at": str, "timestamp": str, "crawled_at": str, "updated": str}
def remove_url(tweet):
"""
Regex based URL removed. Removes all nonwhitespace characters after http until a whitespace is reached
:param tweet: Tweet to be checked
:return: Tweet that is substituted with URL in the place of the actual URL
"""
return re.sub(r"http\S+", "URL", tweet)
def clean_and_write_tweets(path, category):
"""
Cleans and writes the tweets to a file
:param path: Path to file
:param category: Category of the tweet
:return: None
"""
table = str.maketrans({key: None for key in string.punctuation})
test_csv = | pd.read_csv(path, dtype=data_columns) | pandas.read_csv |
"""
This module contains all functions that help to clean raw data, genreating feature engineering,
general helpers functions, plotting functions, etc.
"""
import warnings
import datetime as dt
import pandas as pd
from pandas import concat
import numpy as np
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
######### DATA CLEANING + FEATURE ENGINEERING ###############
def read_raw_data(path, list_year):
"""
This function is used to read all *.csv files and concatinate them into one single dataframe
@param path: path directory contains raw data
@param list_year: list containing all years that we are interested in to build model
@return data_raw: a dataframe containing raw data of all years (concatinate all *.csv file)
"""
try:
assert path != " "
assert list_year != []
except AssertionError as exp:
exp.args += ('Path and list_year must not be empty', "check read_raw_data function")
raise
all_files = [path + str(year) + ".csv" for year in list_year]
current_dataframe = []
for filename in all_files:
temp = pd.read_csv(filename, index_col=None, header=0)
current_dataframe.append(temp)
data_raw = pd.concat(current_dataframe, axis=0, ignore_index=True)
return data_raw
def concat_name_county(name):
"""
This function is used to concat a string of words by putting underscore between words
example: "new york steuben" --> "new_york_steuben"
@param name: string of raw name
@return concat_name: concated words of string by underscore
"""
try:
assert name != ""
except AssertionError as exp:
exp.args += ('input must not be a empty string', "check concat_name_county function")
raise
name_vector = str(name).split(" ")
concat_name = ""
for i in name_vector:
if i in [" ", ""]:
continue
else:
concat_name = concat_name + "_" + str(i)
return concat_name[1:].strip()
def compute_lag_time_series_features(df_feature, lag_time=30):
"""
This function is used to compute lag features i.e we look at Air Quality Index in previous days
(look back 30 days), and take the historical AQIs as our features to input to model
@param df_features: dataframe contains basic features such as date,
AIQ of one day, state, county name, etc.
@param lag_time: how many days we want to look back
@return lag_features: dataframe contains all lag features, which are historical AQI.
"""
assert df_feature.shape[0] >= 1
try:
temps = df_feature.sort_values(by=["date"])["AQI"]
dataframe = temps
col_name = ['AQI']
for lag_index in range(1, lag_time):
dataframe = concat([temps.shift(lag_index), dataframe], axis=1)
col_name.append('lag_' + str(lag_index))
dataframe.columns = col_name
if dataframe.shape[0] < lag_time:
lag_features = dataframe.iloc[-1:, :]
lag_features = lag_features.fillna(0)
else:
lag_features = dataframe.iloc[lag_time-1:, :]
except:
raise AttributeError("FEATURE DATAFRAME IS EMPTY !!!!!")
return lag_features
def data_cleaning(data_raw):
"""
This function is used to take raw air pollution data each year
and clean it before feature engineering
@param data_raw: raw data read in from .csv file
@return data_raw: return cleaned dataframe
"""
try:
assert data_raw.shape != (0, 0)
assert "State Name" in data_raw.columns
assert "county Name" in data_raw.columns
assert "Date" in data_raw.columns
except AssertionError as exp:
exp.args += ('data_raw must not be empty or missing columns',
"check data_cleaning function")
raise
data_raw["State Name"] = data_raw["State Name"].apply(lambda x: x.lower().strip())
data_raw["State Name"] = data_raw["State Name"].apply(concat_name_county)
data_raw["county Name"] = data_raw["county Name"].apply(lambda x: x.lower().strip())
data_raw["county Name"] = data_raw["county Name"].apply(concat_name_county)
data_raw["state_county"] = data_raw["State Name"] + "_" + data_raw["county Name"]
data_raw["state_county"] = data_raw["state_county"].apply(lambda x: x.lower())
data_raw["date"] = data_raw["Date"].apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d').date())
data_raw = data_raw.rename(columns={"State Name": "State", "county Name": "County"})
return data_raw
def feature_engineering_for_aqi(data, lag_time=30, county_name="", save_path=""):
"""
This function is used to generate features for train dataset
@param data: raw data from 2016 to 2018
@param lag_time: how many days we want to look back to see the AQI pattern in history.
@param county_name: the county that is of our interest
@param save_path: where to save our features
@return dict: contains all info of outputed data such as columns name, save path, etc.
"""
try:
df_state = data[data["state_county"] == county_name]
except:
raise AttributeError("DATAFRAME IS EMPTY !!!!!")
try:
df_state["date"] = df_state["Date"].apply(pd.to_datetime)
df_state["current_date"] = df_state["date"].dt.day
df_state["current_month"] = df_state["date"].apply(lambda x: x.month)
df_state['day_of_week'] = df_state['date'].dt.weekday_name
day_df = pd.get_dummies(df_state["day_of_week"], prefix="day")
df_temp = pd.concat([df_state, day_df], axis=1)
df_feature = df_temp[list(day_df.columns) +
["AQI", "current_date", "current_month", "date"]
]
df_feature = df_feature.sort_values(by=["date"])
df_lag_features = compute_lag_time_series_features(df_feature)
row = np.min([df_feature.shape[0]-1, lag_time-1])
df_data = (
pd.concat([df_lag_features.drop(["AQI"], axis=1),
df_feature.drop(["date"], axis=1).iloc[row:, :]], axis=1))
if save_path:
path = save_path + county_name + "_feature.csv"
print("---> Saving features to {}".format(path))
df_data.to_csv(path, index=False)
except:
raise AttributeError("DATAFRAME IS EMPTY !!!!!")
return {"successive code": 1,
"save_path": save_path,
"feature_names": df_data.columns,
"data": df_data}
def data_feature_engineering_for_test(data2019, county, predicted_date):
"""
This function is used to generate feature engineering for test data.
@param data2019: dataframe loaded from .csv file of 2019,
since we use data from 2019 as our test data
@param county: the county that we are interested in
@param predicted_data: day that we are concerned
@return data_feature_temp: return features that are ready to input to model.
"""
## prepare data for specific county and specific date
try:
data_state = data2019[data2019["state_county"] == county]
except:
raise AttributeError(
"DATAFRAME IS EMPTY!!! check data_feature_engineering_for_test function"
)
data_state["predicted_date"] = | pd.to_datetime(predicted_date) | pandas.to_datetime |
import databricks.koalas as ks
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.data_cleaning.drop_datatype_columns import DropDatatypeColumns
@pytest.fixture
def data():
X = | pd.DataFrame({"A": [1, 2], "B": [1.0, 2.0], "C": ["q", "w"]}) | pandas.DataFrame |
import torch
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset,DataLoader
from sklearn.preprocessing import StandardScaler,MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import random
from tqdm import tqdm
import warnings
warnings.simplefilter('ignore')
class WADDA(nn.Module):
def __init__(self,src_x,src_y,tgt_x,tgt_y,
normalize_idx_list = None,
limit_y_range = False,
train_stage_1_epochs = 3000,
train_stage_2_epochs = 10000,
):
super().__init__()
'''
src_x : 模擬數據的x type:pd.DataFrame()
src_y : 模擬數據的y type:pd.DataFrame()
tgt_x : 真實數據的x type:pd.DataFrame()
tgt_y : 真實數據的y type:pd.DataFrame()
normalize_idx_list : 告訴模型那些輸出欄位加總必須等於1
limit_y_range : 限制輸出範圍
'''
# config
self.device = 'cpu'
self.x_col = src_x.columns.tolist()
self.y_col = src_y.columns.tolist()
self.α = 0.00005
self.c = 0.01
self.m = 64
self.ncritic = 5
self.input_dim = src_x.shape[1]
self.output_dim = src_y.shape[1]
self.normalize_idx_list = normalize_idx_list
self.limit_y_range = limit_y_range
self.train_stage_1_epochs = train_stage_1_epochs
self.train_stage_2_epochs = train_stage_2_epochs
# scaled feature
self.scaler_x = StandardScaler().fit(src_x.loc[:])
src_x.loc[:] = self.scaler_x.transform(src_x.loc[:])
tgt_x.loc[:] = self.scaler_x.transform(tgt_x.loc[:])
# scaled output if you need
if self.limit_y_range == True:
self.scaler_y = MinMaxScaler().fit(src_y.loc[:])
src_y.loc[:] = self.scaler_y.transform(src_y.loc[:])
src_y.loc[:] = self.scaler_y.transform(src_y.loc[:])
# pd.DataFrame -> torch.FloatTensor
src_x,src_y = torch.FloatTensor(src_x.values),torch.FloatTensor(src_y.values)
tgt_x,tgt_y = torch.FloatTensor(tgt_x.values),torch.FloatTensor(tgt_y.values)
# make two dataset
self.src_dataset = TensorDataset(src_x,src_y)
self.tgt_dataset = TensorDataset(tgt_x,tgt_y)
# src data encoder
self.SRC_F = nn.Sequential(
nn.Linear(self.input_dim,128),
nn.ReLU(),
nn.Linear(128,128),
nn.ReLU(),
)
# target data encoder
self.TGT_F = nn.Sequential(
nn.Linear(self.input_dim,128),
nn.ReLU(),
nn.Linear(128,128),
nn.ReLU(),
)
# regression head
self.regression = nn.Sequential(
nn.Linear(128,128),
nn.Dropout(0.25),# more robust
nn.ReLU(),
nn.Linear(128,self.output_dim),
)
if limit_y_range == True:
self.regression = nn.Sequential(self.regression,nn.Sigmoid())
# regression loss function
self.reg_loss = nn.SmoothL1Loss()
# 判别器最后一层去掉sigmoid reference Wasserstein GAN(reference Wasserstein GAN)
self.discriminator = nn.Sequential(nn.Linear(128,1)) # 線性分類器
# optimizer train_stage_1(回歸訓練)
self.S_optimizer = optim.Adam(self.SRC_F.parameters(),lr=1e-4)
self.R_optimizer = optim.Adam(self.regression.parameters(),lr=1e-4)
# optimizer train_stage_2(GAN訓練)不要用基于动量的优化算法(包括momentum和Adam),推荐RMSProp,SGD也行(reference Wasserstein GAN)
self.T_optimizer = optim.RMSprop(self.TGT_F.parameters(),lr=self.α)
self.D_optimizer = optim.RMSprop(self.discriminator.parameters(),lr=self.α)
def forward(self,src_x,tgt_x):
src_feat,tgt_feat = self.SRC_F(src_x),self.TGT_F(tgt_x) #特徵抽取
src_reg,tgt_reg = self.regression(src_feat),self.regression(tgt_feat) #regression預測
src_domain,tgt_domain = self.discriminator(src_feat),self.discriminator(tgt_feat) #discriminator區分domain
return src_reg,src_domain,tgt_reg,tgt_domain
def train_S_R(self,src_x,src_y):
'''
input : src_x(FloatTensor),src_y(FloatTensor)
output : loss(Scalar)
update_method : 一般監督學習
'''
self.SRC_F.train()
self.regression.train()
# forward
src_feat = self.SRC_F(src_x)
y_hat = self.regression(src_feat)
# compute loss
loss = self.reg_loss(y_hat,src_y).mean()
loss.backward()
# update weight
self.S_optimizer.step()
self.R_optimizer.step()
self.S_optimizer.zero_grad()
self.R_optimizer.zero_grad()
return loss.item()
def train_T_D(self,src_x,tgt_x,tgt_y):
'''
input: src_x(FloatTensor),tgt_x(FloatTensor),tgt_y(FloatTensor)
return :d_loss(Scalar),t_loss(Scalar),r_loss(Scalar)
'''
# 生成器和判别器的loss不取log(reference Wasserstein GAN)
# train discriminator ncritic times
for i in range(self.ncritic):
src_feat = self.SRC_F(src_x).detach()
tgt_feat = self.TGT_F(tgt_x).detach()
d_loss = -torch.mean(self.discriminator(src_feat)) + torch.mean(self.discriminator(tgt_feat))
d_loss.backward()
self.D_optimizer.step()
self.D_optimizer.zero_grad()
# 每次更新判别器的参数之后把它们的绝对值截断到不超过一个固定常数c
for p in self.discriminator.parameters():
p.data.clamp_(-self.c,self.c)
# train TGT_F
tgt_feat = self.TGT_F(tgt_x)
t_loss = -torch.mean(self.discriminator(tgt_feat))
t_loss.backward()
self.T_optimizer.step()
self.T_optimizer.zero_grad()
# train regression
tgt_reg = self.regression(tgt_feat.detach())
r_loss = self.reg_loss(tgt_reg,tgt_y).mean()
r_loss.backward()
self.R_optimizer.step()
self.R_optimizer.zero_grad()
return d_loss.item(),t_loss.item(),r_loss.item()
def train_stage_1(self,num_epoch=3000,log_interval=100):
history = []
for ep in tqdm(range(num_epoch)):
idx = random.sample([*range(len(self.src_dataset))],self.m)
src_x,src_y = self.src_dataset[idx]
loss = self.train_S_R(src_x,src_y)
history.append(loss)
if ep % log_interval == 0:
print("ep:{} loss:{}".format(ep,loss))
plt.plot(history,label='train_loss')
plt.legend()
plt.show()
def train_stage_2(self,num_epoch=10000,log_interval=100):
d_history = []
t_history = []
r_history = []
for ep in tqdm(range(num_epoch)):
tgt_idx = random.sample([*range(len(self.tgt_dataset))],self.m)
src_idx = random.sample([*range(len(self.src_dataset))],self.m)
tgt_x,tgt_y = self.tgt_dataset[tgt_idx]
src_x,src_y = self.src_dataset[src_idx]
d_loss,t_loss,r_loss = self.train_T_D(src_x,tgt_x,tgt_y)
d_history.append(d_loss)
t_history.append(t_loss)
r_history.append(r_loss)
if ep % log_interval == 0:
print("ep:{} d_loss:{} t_loss:{} r_loss:{}".format(ep,d_loss,t_loss,r_loss))
plt.plot(d_history,label='d_loss')
plt.plot(t_history,label='t_loss')
plt.plot(r_history,label='r_loss')
plt.legend()
plt.show()
def train(self,log_interval=100):
print('start train')
self.train_stage_1(self.train_stage_1_epochs,log_interval)
self.train_stage_2(self.train_stage_2_epochs,log_interval)
print('end train')
@staticmethod
def normalize(x):
'''
x : pandas.DataFrame()
return : normalize x
'''
x_idx,x_col = x.index,x.columns
x = x.values
x = x / x.sum(axis=1).reshape(-1,1)
return pd.DataFrame(x,index=x_idx,columns=x_col)
def predict(self,tgt_x):
'''
input: pd.DataFrame()
output: pd.DataFrame()
'''
self.TGT_F.eval()
self.regression.eval()
tgt_x = self.scaler_x.transform(tgt_x)
tgt_x = torch.FloatTensor(tgt_x)
tgt_feat = self.TGT_F(tgt_x)
tgt_reg = self.regression(tgt_feat).detach().cpu().numpy()
tgt_reg = | pd.DataFrame(tgt_reg,columns=self.y_col) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from google.cloud import bigquery
# %reload_ext google.cloud.bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# +
from notebooks import parameters
DATASET = parameters.LATEST_DATASET
LOOKUP_TABLES = parameters.LOOKUP_TABLES
print(f"Dataset to use: {DATASET}")
print(f"Lookup tables: {LOOKUP_TABLES}")
# +
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import matplotlib.pyplot as plt
import os
plt.style.use('ggplot')
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
def cstr(s, color='black'):
return "<text style=color:{}>{}</text>".format(color, s)
# -
cwd = os.getcwd()
cwd = str(cwd)
print("Current working directory is: {cwd}".format(cwd=cwd))
# ### Get the list of HPO IDs
#
# ### NOTE: This assumes that all of the relevant HPOs have a person table.
hpo_id_query = f"""
SELECT REPLACE(table_id, '_person', '') AS src_hpo_id
FROM
`{DATASET}.__TABLES__`
WHERE table_id LIKE '%person'
AND table_id
NOT LIKE '%unioned_ehr_%'
AND table_id NOT LIKE '\\\_%'
"""
site_df = pd.io.gbq.read_gbq(hpo_id_query, dialect='standard')
get_full_names = f"""
select * from {LOOKUP_TABLES}.hpo_site_id_mappings
"""
full_names_df = pd.io.gbq.read_gbq(get_full_names, dialect='standard')
# +
full_names_df.columns = ['org_id', 'src_hpo_id', 'site_name', 'display_order']
columns_to_use = ['src_hpo_id', 'site_name']
full_names_df = full_names_df[columns_to_use]
full_names_df['src_hpo_id'] = full_names_df['src_hpo_id'].str.lower()
# +
cols_to_join = ['src_hpo_id']
site_df = pd.merge(site_df, full_names_df, on=['src_hpo_id'], how='left')
# -
# # There should not be duplicate rows.
# ## visit_occurrence table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, visit_concept_id, visit_start_date, visit_start_datetime, visit_end_date, visit_end_datetime,
visit_type_concept_id, provider_id, care_site_id, visit_source_value, visit_source_concept_id,
admitting_source_concept_id, admitting_source_value, discharge_to_concept_id,
discharge_to_source_value, preceding_visit_occurrence_id,
COUNT(*) as cnt
FROM
`{DATASET}.unioned_ehr_visit_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_visit_occurrence`) AS t2
ON
t1.visit_occurrence_id=t2.visit_occurrence_id
WHERE
t1.visit_concept_id!=0 AND t1.visit_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9
'''.format(DATASET=DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
visit_occurrence = foreign_key_df.groupby(
['src_hpo_id']).size().reset_index().rename(columns={
0: 'visit_occurrence'
}).sort_values(["visit_occurrence"]).set_index("src_hpo_id")
visit_occurrence = visit_occurrence.reset_index()
visit_occurrence
# ## condition_occurrence table
#
# #### NOTE: have to cast as date for the datetime objects to avoid a runtime error - temporary fix for a larger issue
# +
condition_query = f"""
SELECT
src_hpo_id,
person_id, condition_concept_id, condition_start_date,
CAST(condition_start_datetime AS DATE) as condition_start_datetime, condition_end_date,
CAST(condition_end_datetime AS DATE) as condition_end_datetime, condition_type_concept_id, stop_reason, provider_id, visit_occurrence_id,
condition_source_value, condition_source_concept_id, condition_status_source_value, condition_status_concept_id,
COUNT(*) as cnt
FROM
`{DATASET}.unioned_ehr_condition_occurrence` AS t1
JOIN
`{DATASET}._mapping_condition_occurrence` AS t2
ON
t1.condition_occurrence_id = t2.condition_occurrence_id
WHERE
t1.condition_concept_id!=0 AND
t1.condition_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14
"""
foreign_key_df = | pd.io.gbq.read_gbq(condition_query, dialect='standard') | pandas.io.gbq.read_gbq |
"""
:mod:`orion.algo.pbt.pb2
========================
"""
import copy
import logging
import time
import numpy as np
import pandas
from orion.algo.pbt.pb2_utils import select_config
from orion.algo.pbt.pbt import PBT
from orion.core.utils.flatten import flatten
from orion.core.worker.trial import Trial
logger = logging.getLogger(__name__)
class PB2(PBT):
"""Population Based Bandits
Warning: PB2 is broken in current version v0.2.4. We are working on a fix to be released in
v0.2.5, ETA July 2022.
Population Based Bandits is a variant of Population Based Training using probabilistic model
to guide the search instead of relying on purely random perturbations.
PB2 implementation uses a time-varying Gaussian process to model the optimization curves
during training. This implementation is based on ray-tune implementation. Oríon's version
supports discrete and categorical dimensions, and offers better resiliency to broken
trials by using back-tracking.
See PBT documentation for more information on how to use PBT algorithms.
For more information on the algorithm,
see original paper at https://arxiv.org/abs/2002.02518.
Parker-Holder, Jack, <NAME>, and <NAME>.
"Provably efficient online hyperparameter optimization with population-based bandits."
Advances in Neural Information Processing Systems 33 (2020): 17200-17211.
Parameters
----------
space: `orion.algo.space.Space`
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed for the random number generator used to sample new trials.
Default: ``None``
population_size: int, optional
Size of the population. No trial will be continued until there are `population_size`
trials executed until lowest fidelity. If a trial is broken during execution at lowest
fidelity, the algorithm will sample a new trial, keeping the population of *non-broken*
trials at `population_size`. For efficiency it is better to have less workers running than
population_size. Default: 50.
generations: int, optional
Number of generations, from lowest fidelity to highest one. This will determine how
many branchings occur during the execution of PBT. Default: 10
exploit: dict or None, optional
Configuration for a ``pbt.exploit.BaseExploit`` object that determines
when if a trial should be exploited or not. If None, default configuration
is a ``PipelineExploit`` with ``BacktrackExploit`` and ``TruncateExploit``.
fork_timeout: int, optional
Maximum amount of time in seconds that an attempt to mutate a trial should take, otherwise
algorithm.suggest() will raise ``SuggestionTimeout``. Default: 60
"""
requires_type = "real"
requires_dist = "linear"
requires_shape = "flattened"
def __init__(
self,
space,
seed=None,
population_size=50,
generations=10,
exploit=None,
fork_timeout=60,
):
super().__init__(
space,
seed=seed,
population_size=population_size,
generations=generations,
exploit=exploit,
fork_timeout=fork_timeout,
)
@property
def configuration(self):
"""Return tunable elements of this algorithm in a dictionary form
appropriate for saving.
"""
config = copy.deepcopy(super().configuration)
config["pb2"].pop("explore", None)
return config
def _generate_offspring(self, trial):
"""Try to promote or fork a given trial."""
new_trial = trial
if not self.has_suggested(new_trial):
raise RuntimeError(
"Trying to fork a trial that was not registered yet. This should never happen"
)
attempts = 0
start = time.perf_counter()
while (
self.has_suggested(new_trial)
and time.perf_counter() - start <= self.fork_timeout
):
trial_to_explore = self.exploit_func(
self.rng,
trial,
self.lineages,
)
if trial_to_explore is None:
return None, None
elif trial_to_explore is trial:
new_params = {}
trial_to_branch = trial
logger.debug("Promoting trial %s, parameters stay the same.", trial)
else:
new_params = flatten(self._explore(self.space, trial_to_explore))
trial_to_branch = trial_to_explore
logger.debug(
"Forking trial %s with new parameters %s",
trial_to_branch,
new_params,
)
# Set next level of fidelity
new_params[self.fidelity_index] = self.fidelity_upgrades[
trial_to_branch.params[self.fidelity_index]
]
new_trial = trial_to_branch.branch(params=new_params)
new_trial = self.space.transform(self.space.reverse(new_trial))
logger.debug("Attempt %s - Creating new trial %s", attempts, new_trial)
attempts += 1
if (
self.has_suggested(new_trial)
and time.perf_counter() - start > self.fork_timeout
):
raise RuntimeError(
f"Could not generate unique new parameters for trial {trial.id} in "
f"less than {self.fork_timeout} seconds. Attempted {attempts} times."
)
return trial_to_branch, new_trial
def _explore(self, space, base: Trial):
"""Generate new hyperparameters for given trial.
Derived from PB2 explore implementation in Ray (2022/02/18):
https://github.com/ray-project/ray/blob/master/python/ray/tune/schedulers/pb2.py#L131
"""
data, current = self._get_data_and_current()
bounds = {dim.name: dim.interval() for dim in space.values()}
df = data.copy()
# Group by trial ID and hyperparams.
# Compute change in timesteps and reward.
diff_reward = (
df.groupby(["Trial"] + list(bounds.keys()))["Reward"]
.mean()
.diff()
.reset_index(drop=True)
)
df["y"] = diff_reward
df["R_before"] = df.Reward - df.y
df = df[~df.y.isna()].reset_index(drop=True)
# Only use the last 1k datapoints, so the GP is not too slow.
df = df.iloc[-1000:, :].reset_index(drop=True)
# We need this to know the T and Reward for the weights.
if not df[df["Trial"] == self.get_id(base)].empty:
# N ow specify the dataset for the GP.
y_raw = np.array(df.y.values)
# Meta data we keep -> episodes and reward.
t_r = df[["Budget", "R_before"]]
hparams = df[bounds.keys()]
x_raw = pandas.concat([t_r, hparams], axis=1).values
newpoint = (
df[df["Trial"] == self.get_id(base)]
.iloc[-1, :][["Budget", "R_before"]]
.values
)
new = select_config(
x_raw, y_raw, current, newpoint, bounds, num_f=len(t_r.columns)
)
new_config = base.params.copy()
for i, col in enumerate(hparams.columns):
if isinstance(base.params[col], int):
new_config[col] = int(new[i])
else:
new_config[col] = new[i]
else:
new_config = base.params
return new_config
def _get_data_and_current(self):
"""Generate data and current objects used in _explore function.
data is a pandas DataFrame combining data from all completed trials.
current is a numpy array with hyperparameters from uncompleted trials.
"""
data_trials = []
current_trials = []
for trial in self.registry:
if trial.status == "completed":
data_trials.append(trial)
else:
current_trials.append(trial)
data = self._trials_to_data(data_trials)
if current_trials:
current = np.asarray(
[
[trial.params[key] for key in self.space.keys()]
for trial in current_trials
]
)
else:
current = None
return data, current
def _trials_to_data(self, trials):
"""Generate data frame to use in _explore method."""
rows = []
cols = ["Trial", "Budget"] + list(self.space.keys()) + ["Reward"]
for trial in trials:
values = [trial.params[key] for key in self.space.keys()]
lst = (
[self.get_id(trial), trial.params[self.fidelity_index]]
+ values
+ [trial.objective.value]
)
rows.append(lst)
data = | pandas.DataFrame(rows, columns=cols) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.