prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding: utf-8
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
def cut_bins(x, bins=10, method='equal'):
"""
对x进行分bin,返回每个样本的bin值和每个bin的下界
Parameters
----------
x: numpy.ndarray or pandas.Series
变量
bins: int or list, default 10
分bin的个数
method: str, default 'equal pop', options ['equal', 'quantile', 'point']
分bin方式,'equal'是等样本量分bin,'quantile'为使用分位点分bin, 'point'为使用指定的分位点分bin
Returns
-------
bin_no: numpy.ndarray
每个样本的bin值
"""
if method not in ('equal', 'quantile', 'point'):
raise ValueError('method only choose "quantile" or "point"')
if method == 'equal':
if type(bins) != int:
raise ValueError('when choose "point" method, bins need int number')
bin_no = pd.qcut(x, q=bins, labels=range(1, bins + 1), precision=10).astype(int)
elif method == 'quantile':
if type(bins) not in (list, np.ndarray):
raise ValueError('when choose "quantile" method, bins need list or np.ndarray type')
bin_no = pd.qcut(x, q=bins, labels=range(1, len(bins) + 1), precision=10).astype(int)
elif method == 'point':
if type(bins) not in (list, np.ndarray):
raise ValueError('when choose "point" method, bins need list or np.ndarray type')
bin_no = np.digitize(x, bins=bins, right=False)
return bin_no
def show_model_performance(truth, predict, bins=10, title=None, save_path=None):
"""
计算模型的TPR、FPR,绘制ROC曲线、TPR-FPR曲线和Sloping曲线
Parameters
----------
truth: numpy.ndarray
样本的真实标签
predict: numpy.ndarray
样本的预测分数
bins: int, default 10
分bin个数
title: str, default None
图片名称,通常以数据集命名,eg. ins、oos、oot
save_path: str, default None
图片存储路径
Returns
-------
df_sloping: DataFrame
每个bin的target rate和模型分均值
auc: float
模型AUC值
ks: float
模型ks值
"""
n = truth.size # 样本量
fpr, tpr, thresholds = roc_curve(truth, predict)
diff = tpr - fpr
auc_value = auc(fpr, tpr)
ks = diff.max()
maxidx = 1.0 * diff.argmax() / diff.size
cut_point = thresholds[diff.argmax()]
reject_porp = round(100.0 * (predict >= cut_point).sum() / predict.shape[0], 2)
df_tmp = pd.DataFrame({'truth': truth, 'predict': predict})
df_tmp['bin'] = cut_bins(df_tmp['predict'], bins=bins, method='equal')
group = df_tmp.groupby('bin')
df_sloping = group['truth'].count().to_frame().reset_index(drop=False)
df_sloping.columns = ['bin', 'sample_count']
df_sloping['target_rate'] = group['truth'].mean().values
df_sloping['avg_score'] = group['predict'].mean().values
plt.figure(figsize=(12, 3), dpi=200)
plt.subplot(1, 4, 1)
plt.plot(fpr, tpr, linewidth=0.8)
plt.plot((0, 1), (0, 1), color='k', linestyle='dashed', linewidth=0.5)
plt.plot(fpr[diff.argmax()], tpr[diff.argmax()], 'r.', markersize=5)
plt.axis(xmin=0.0, xmax=1.0)
plt.axis(ymin=0.0, ymax=1.0)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.xlabel('false positive rate', fontsize=6)
plt.ylabel('true positive rate', fontsize=6)
plt.title('AUC = {0}'.format(round(auc_value, 3)), fontsize=7)
plt.subplot(1, 4, 2)
plt.hist(predict, bins=30, normed=True, facecolor='mediumaquamarine', alpha=0.9)
plt.axvline(x=np.mean(predict), color='powderblue', linestyle='dashed', linewidth=0.7)
plt.axvline(x=np.mean(truth), color='lightcoral', linestyle='dashed', linewidth=0.7)
plt.title('Tru = {0}, Pred = {1}'.format(round(truth.mean(), 3), round(predict.mean(), 3)), fontsize=7)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.xlabel('score', fontsize=6)
plt.ylabel('probability', fontsize=6)
plt.subplot(1, 4, 3)
plt.plot(np.linspace(0, 1, diff.size), tpr, linewidth=0.8, color='cornflowerblue', label='TPR')
plt.plot(np.linspace(0, 1, diff.size), fpr, linewidth=0.8, color='firebrick', label='FPR')
plt.plot(np.linspace(0, 1, diff.size), diff, linewidth=0.8, color='slategray', label='TPR - FPR')
plt.plot((maxidx, maxidx), (0.0, ks), linewidth=0.4, color='r')
plt.axis(xmin=0.0, xmax=1.0)
plt.axis(ymin=0.0, ymax=1.0)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.ylabel('tpr / fpr', fontsize=6)
plt.legend(loc=2, fontsize=6)
plt.title('KS = {0}, Thres = {1}, Reject {2}%'.format(round(ks, 3), round(cut_point, 4), reject_porp), fontsize=7)
plt.subplot(1, 4, 4)
plt.plot(df_sloping['bin'], df_sloping['avg_score'], 'b.-', linewidth=0.8, label='Prediction', markersize=3)
plt.plot(df_sloping['bin'], df_sloping['target_rate'], 'r.-', linewidth=0.8, label='Truth', markersize=3)
plt.axhline(predict.mean(), color='powderblue', linestyle='dashed', linewidth=0.7, label='Overall Avg score')
plt.axhline(truth.mean(), color='lightcoral', linestyle='dashed', linewidth=0.7, label='Overall Target rate')
plt.legend(loc=2, fontsize=6)
plt.xticks(df_sloping['bin'], fontsize=5)
plt.yticks(fontsize=5)
plt.xlabel('bin', fontsize=6)
plt.ylabel('target rate', fontsize=6)
plt.title('Sample = {0}, Bins = {1}'.format(n, df_sloping.shape[0]), fontsize=7)
if title is not None:
plt.suptitle(title, fontsize=10, x=0.02, y=1.04, horizontalalignment='left')
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=1.0)
if save_path is not None:
if save_path.endswith('.png') or save_path.endswith('.jpg'):
plt.savefig(save_path, bbox_inches='tight')
elif os.path.isdir(save_path):
plt.savefig(os.path.join(save_path, 'model_performance({0}).png'.format(title)), bbox_inches='tight')
else:
raise ValueError('No such file or directory: {0}'.format(save_path))
plt.show()
plt.close()
return df_sloping, auc_value, ks
def calc_listing_overdue_rate(df, score, target, bins=10, plot=False, title='', save_path=None):
"""
根据score等样本量分bin,计算每个bin中的累积标的逾期率
Parameters
----------
df: DataFrame
score: str
分数列名
target: str
target列名
bins: int or list, default 10
分bin的方式,如果是int则表示将模型分从小到大排列后均匀分成几个bin,如果是list则按指定切分点分bin
plot: bool, default False
是否绘图
title: str, default None
图片标题
save_path: str, default None
图片存储路径
Returns
-------
result: DataFrame
每个bin中的累积逾期率
"""
df_result = pd.DataFrame()
df_tmp = df[[score, target]].copy()
if type(bins) == int:
df_tmp['bin'] = cut_bins(df_tmp[score], bins=bins, method='equal')
elif type(bins) in (list, np.ndarray):
df_tmp['bin'] = cut_bins(df_tmp[score], bins=bins, method='quantile')
else:
raise ValueError('bins type can only be [int, list, np.ndarray]')
group = df_tmp.groupby('bin')
sample_cnt = group[target].count()
overdue_cnt = group[target].sum()
df_result['bin'] = np.arange(1, len(sample_cnt) + 1)
df_result['odue_rate'] = (overdue_cnt / sample_cnt).values
df_result['cum_odue_rate'] = (overdue_cnt.cumsum() / sample_cnt.cumsum()).values
if plot:
plt.figure(figsize=(4, 3), dpi=120)
plt.plot(df_result['bin'], df_result['odue_rate'], color='red', linewidth=0.6, marker='.', markersize=2,
label='overdue rate')
plt.plot(df_result['bin'], df_result['cum_odue_rate'], color='lightcoral', linewidth=0.6, marker='.',
markersize=2, label='cumulative overdue rate')
plt.xticks(df_result['bin'], fontsize=5)
plt.yticks(fontsize=5)
plt.xlabel('bin', fontsize=5)
plt.ylabel('target rate', fontsize=5)
plt.legend(loc=2, fontsize=5)
plt.title('Overdue rate{0}'.format(title), fontsize=7)
if save_path is not None:
if save_path.endswith('.png') or save_path.endswith('.jpg'):
plt.savefig(save_path, bbox_inches='tight')
elif os.path.isdir(save_path):
plt.savefig(os.path.join(save_path, 'target_rate_plot.png'), bbox_inches='tight')
else:
raise ValueError('No such file or directory: {0}'.format(save_path))
plt.show()
plt.close()
return df_result
def calc_amount_overdue_rate(df, score, principal, dueamount, bins=10, plot=False, title='', save_path=None):
"""
根据score等样本量分bin,计算每个bin中的金额逾期率
Parameters
----------
df: DataFrame
score: str
分数列名
principal: str
借款本金列名
dueamount: str
逾期本金列名
bins: int or list, default 10
分bin的方式,如果是int则表示将模型分从小到大排列后均匀分成几个bin,如果是list则按指定切分点分bin
plot: bool, default False
是否绘图
title: str, default None
图片标题
save_path: str, default None
图片存储路径
Returns
-------
result: DataFrame
每个bin中的累积逾期率
"""
df_result = | pd.DataFrame() | pandas.DataFrame |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from typing import Optional, Dict, Any, Union, Tuple, List
import pandas as pd
from pm4py.statistics.traces.generic.common import case_duration as case_duration_commons
from pm4py.util import exec_utils, constants, pandas_utils
from pm4py.util import variants_util
from pm4py.util import xes_constants as xes
from pm4py.util.business_hours import soj_time_business_hours_diff
from pm4py.util.constants import CASE_CONCEPT_NAME
from pm4py.util.xes_constants import DEFAULT_TIMESTAMP_KEY
class Parameters(Enum):
ATTRIBUTE_KEY = constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY
ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_TIMESTAMP_KEY
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
MAX_VARIANTS_TO_RETURN = "max_variants_to_return"
VARIANTS_DF = "variants_df"
ENABLE_SORT = "enable_sort"
SORT_BY_COLUMN = "sort_by_column"
SORT_ASCENDING = "sort_ascending"
MAX_RET_CASES = "max_ret_cases"
BUSINESS_HOURS = "business_hours"
WORKTIMING = "worktiming"
WEEKENDS = "weekends"
def get_variant_statistics(df: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Union[
List[Dict[str, int]], List[Dict[List[str], int]]]:
"""
Get variants from a Pandas dataframe
Parameters
-----------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Parameters.MAX_VARIANTS_TO_RETURN -> Maximum number of variants to return
variants_df -> If provided, avoid recalculation of the variants dataframe
Returns
-----------
variants_list
List of variants inside the Pandas dataframe
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
max_variants_to_return = exec_utils.get_param_value(Parameters.MAX_VARIANTS_TO_RETURN, parameters, None)
variants_df = exec_utils.get_param_value(Parameters.VARIANTS_DF, parameters, get_variants_df(df,
parameters=parameters))
variants_df = variants_df.reset_index()
variants_list = pandas_utils.to_dict_records(variants_df.groupby("variant").agg("count").reset_index())
variants_list = sorted(variants_list, key=lambda x: (x[case_id_glue], x["variant"]), reverse=True)
if max_variants_to_return:
variants_list = variants_list[:min(len(variants_list), max_variants_to_return)]
return variants_list
def get_variants_df_and_list(df: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Tuple[
pd.DataFrame, Union[List[Dict[str, int]], List[Dict[List[str], int]]]]:
"""
(Technical method) Provides variants_df and variants_list out of the box
Parameters
------------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Returns
------------
variants_df
Variants dataframe
variants_list
List of variants sorted by their count
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
variants_df = get_variants_df(df, parameters=parameters)
variants_stats = get_variant_statistics(df, parameters=parameters)
variants_list = []
for vd in variants_stats:
variant = vd["variant"]
count = vd[case_id_glue]
variants_list.append([variant, count])
variants_list = sorted(variants_list, key=lambda x: (x[1], x[0]), reverse=True)
return variants_df, variants_list
def get_cases_description(df: pd.DataFrame, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> Dict[
str, Dict[str, Any]]:
"""
Get a description of traces present in the Pandas dataframe
Parameters
-----------
df
Pandas dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that identifies the case ID
Parameters.TIMESTAMP_KEY -> Column that identifies the timestamp
enable_sort -> Enable sorting of traces
Parameters.SORT_BY_COLUMN -> Sort traces inside the dataframe using the specified column.
Admitted values: startTime, endTime, caseDuration
Parameters.SORT_ASCENDING -> Set sort direction (boolean; it true then the sort direction is ascending,
otherwise descending)
Parameters.MAX_RET_CASES -> Set the maximum number of returned traces
Returns
-----------
ret
Dictionary of traces associated to their start timestamp, their end timestamp and their duration
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
enable_sort = exec_utils.get_param_value(Parameters.ENABLE_SORT, parameters, True)
sort_by_column = exec_utils.get_param_value(Parameters.SORT_BY_COLUMN, parameters, "startTime")
sort_ascending = exec_utils.get_param_value(Parameters.SORT_ASCENDING, parameters, True)
max_ret_cases = exec_utils.get_param_value(Parameters.MAX_RET_CASES, parameters, None)
business_hours = exec_utils.get_param_value(Parameters.BUSINESS_HOURS, parameters, False)
worktiming = exec_utils.get_param_value(Parameters.WORKTIMING, parameters, [7, 17])
weekends = exec_utils.get_param_value(Parameters.WEEKENDS, parameters, [6, 7])
grouped_df = df[[case_id_glue, timestamp_key]].groupby(df[case_id_glue])
# grouped_df = df[[case_id_glue, timestamp_key]].groupby(df[case_id_glue])
first_eve_df = grouped_df.first()
last_eve_df = grouped_df.last()
del grouped_df
last_eve_df.columns = [str(col) + '_2' for col in first_eve_df.columns]
stacked_df = pd.concat([first_eve_df, last_eve_df], axis=1)
del first_eve_df
del last_eve_df
del stacked_df[case_id_glue]
del stacked_df[case_id_glue + "_2"]
stacked_df['caseDuration'] = stacked_df[timestamp_key + "_2"] - stacked_df[timestamp_key]
stacked_df['caseDuration'] = stacked_df['caseDuration'].astype('timedelta64[s]')
if business_hours:
stacked_df['caseDuration'] = stacked_df.apply(
lambda x: soj_time_business_hours_diff(x[timestamp_key], x[timestamp_key + "_2"], worktiming,
weekends), axis=1)
else:
stacked_df['caseDuration'] = stacked_df[timestamp_key + "_2"] - stacked_df[timestamp_key]
stacked_df['caseDuration'] = stacked_df['caseDuration'].astype('timedelta64[s]')
stacked_df[timestamp_key + "_2"] = stacked_df[timestamp_key + "_2"].astype('int64') // 10 ** 9
stacked_df[timestamp_key] = stacked_df[timestamp_key].astype('int64') // 10 ** 9
stacked_df = stacked_df.rename(columns={timestamp_key: 'startTime', timestamp_key + "_2": 'endTime'})
if enable_sort:
stacked_df = stacked_df.sort_values(sort_by_column, ascending=sort_ascending)
if max_ret_cases is not None:
stacked_df = stacked_df.head(n=min(max_ret_cases, len(stacked_df)))
ret = pandas_utils.to_dict_index(stacked_df)
return ret
def get_variants_df(df, parameters=None):
"""
Get variants dataframe from a Pandas dataframe
Parameters
-----------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Returns
-----------
variants_df
Variants dataframe
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes.DEFAULT_NAME_KEY)
if variants_util.VARIANT_SPECIFICATION == variants_util.VariantsSpecifications.STRING:
new_df = df.groupby(case_id_glue)[activity_key].agg(lambda col: constants.DEFAULT_VARIANT_SEP.join(pd.Series.to_list(col))).to_frame()
elif variants_util.VARIANT_SPECIFICATION == variants_util.VariantsSpecifications.LIST:
new_df = df.groupby(case_id_glue)[activity_key].agg(lambda col: tuple(pd.Series.to_list(col))).to_frame()
new_cols = list(new_df.columns)
new_df = new_df.rename(columns={new_cols[0]: "variant"})
return new_df
def get_variants_df_with_case_duration(df, parameters=None):
"""
Get variants dataframe from a Pandas dataframe, with case duration that is included
Parameters
-----------
df
Dataframe
parameters
Parameters of the algorithm, including:
Parameters.CASE_ID_KEY -> Column that contains the Case ID
Parameters.ACTIVITY_KEY -> Column that contains the activity
Parameters.TIMESTAMP_KEY -> Column that contains the timestamp
Returns
-----------
variants_df
Variants dataframe
"""
if parameters is None:
parameters = {}
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME)
activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes.DEFAULT_NAME_KEY)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY)
business_hours = exec_utils.get_param_value(Parameters.BUSINESS_HOURS, parameters, False)
worktiming = exec_utils.get_param_value(Parameters.WORKTIMING, parameters, [7, 17])
weekends = exec_utils.get_param_value(Parameters.WEEKENDS, parameters, [6, 7])
grouped_df = df[[case_id_glue, timestamp_key, activity_key]].groupby(df[case_id_glue])
df1 = None
if variants_util.VARIANT_SPECIFICATION == variants_util.VariantsSpecifications.STRING:
df1 = grouped_df[activity_key].agg(lambda col: constants.DEFAULT_VARIANT_SEP.join(pd.Series.to_list(col))).to_frame()
elif variants_util.VARIANT_SPECIFICATION == variants_util.VariantsSpecifications.LIST:
df1 = grouped_df[activity_key].agg(lambda col: tuple(pd.Series.to_list(col))).to_frame()
new_cols = list(df1.columns)
df1 = df1.rename(columns={new_cols[0]: "variant"})
first_eve_df = grouped_df.first()
last_eve_df = grouped_df.last()
del grouped_df
last_eve_df.columns = [str(col) + '_2' for col in first_eve_df.columns]
stacked_df = | pd.concat([first_eve_df, last_eve_df], axis=1) | pandas.concat |
'''
tRNA Adaptation Index
'''
import collections
import os
import json
import logging
import pandas as pd
import numpy as np
import scipy.stats.mstats
from sqlalchemy import create_engine
from ..alphabet import CODON_REDUNDANCY
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
db_path = os.path.join(os.getcwd(), 'data/db/seq.db')
engine = create_engine(f'sqlite+pysqlite:///{db_path}')
possible_codons = sorted(CODON_REDUNDANCY.keys())
test_set_query = """
select assembly_accession, species_taxid from assembly_source
"""
response_df = pd.read_sql(test_set_query, engine)
assembly_accession_ids = response_df['assembly_accession'].values
species_taxids = response_df['species_taxid'].values
logger.info('Computing adaptation index for all strains')
for i in range(len(assembly_accession_ids)):
if (i + 1) % 100 == 0:
logger.info(f'Assembly {i + 1:,} / {len(assembly_accession_ids):,}')
assembly_accession = assembly_accession_ids[i]
species_taxid = species_taxids[i]
trna_count = compute_trna_count(engine, assembly_accession)
weights = compute_trna_ai_weights(trna_count)
output_df = compute_genes_adaptation_index(
engine,
assembly_accession,
species_taxid,
weights,
)
output_df['adaptation_index'] /= output_df['adaptation_index'].max()
output_df.sort_values('adaptation_index', ascending=False).to_csv(
os.path.join(os.getcwd(), f'data/trn_adaptation_index/{assembly_accession}_tai.csv'),
index=False,
)
def compute_genes_adaptation_index(engine, assembly_accession, species_taxid, weights):
columns = [
'assembly_accession',
'species_taxid',
'row_id',
'protein_id',
'protein',
'gene',
'adaptation_index',
]
query = """
select rowid, * from sequences
where assembly_accession = ? and sequence_type = 'CDS'
"""
coding_sequences = pd.read_sql(query, engine, params=(assembly_accession,))
data = []
for tpl in coding_sequences.itertuples():
sequence = tpl.sequence
metadata = (
json.loads(tpl.metadata_json)
if tpl.metadata_json is not None
else {}
)
adaptation_index = compute_adaptation_index(sequence, weights)
data.append([
assembly_accession,
species_taxid,
tpl.rowid,
metadata.get('protein_id'),
metadata.get('protein'),
metadata.get('gene'),
adaptation_index,
])
return pd.DataFrame(data, columns=columns)
def compute_trna_count(engine, assembly_accession):
trna_codons = sorted(set(CODON_REDUNDANCY.keys()) - {'ATG', 'TGG', 'TGA', 'TAG', 'TAA'})
trna_df = load_trnas(engine, assembly_accession)
trna_count = collections.defaultdict(int)
for tpl in trna_df.itertuples():
if | pd.isnull(tpl.codon) | pandas.isnull |
import numpy as np
#import skimage.transform as sktransform
import random
import matplotlib.image as mpimg
import os
import pandas as pd
import matplotlib.pyplot as plt
import shutil
new_path = './track1/IMG/'
current_path = './out2rev/IMG/'
if not os.path.exists(new_path):
os.makedirs(new_path)
print('Folder created: ', new_path)
zero = 0
df = pd.read_csv('./out2rev/driving_log.csv', header=None)
counted_samples = np.unique(df.loc[:, 3], return_counts=True)
print('Index:', len(counted_samples[0]))
max_ind = np.argmax(counted_samples[1])
print('Zero:', zero, 'Max index', max_ind,
'Val max:', counted_samples[0][max_ind])
plt.figure(figsize=(20, 10))
index = np.arange(len(counted_samples[0]))
plt.bar(counted_samples[0], counted_samples[1], color='b', width=0.005)
plt.ylabel('Counts', fontsize=15)
plt.title('Training examples')
plt.savefig('counted_samples.png')
plt.clf()
updated = | pd.DataFrame() | pandas.DataFrame |
import pickle
import numpy as np
import pandas as pd
from sklearn.exceptions import ConvergenceWarning
from sklearn.mixture import BayesianGaussianMixture
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.testing import ignore_warnings
class DataTransformer(object):
"""Data Transformer.
Model continuous columns with a BayesianGMM and normalized to a scalar
[0, 1] and a vector.
Discrete columns are encoded using a scikit-learn OneHotEncoder.
Args:
n_cluster (int):
Number of modes.
epsilon (float):
Epsilon value.
"""
def __init__(self, n_clusters=10, epsilon=0.005):
self.n_clusters = n_clusters
self.epsilon = epsilon
@ignore_warnings(category=ConvergenceWarning)
def _fit_continuous(self, column, data):
gm = BayesianGaussianMixture(
self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001,
n_init=1
)
gm.fit(data)
components = gm.weights_ > self.epsilon
num_components = components.sum()
return {
'name': column,
'model': gm,
'components': components,
'output_info': [(1, 'tanh'), (num_components, 'softmax')],
'output_dimensions': 1 + num_components,
}
def _fit_discrete(self, column, data):
ohe = OneHotEncoder(sparse=False)
ohe.fit(data)
categories = len(ohe.categories_[0])
return {
'name': column,
'encoder': ohe,
'output_info': [(categories, 'softmax')],
'output_dimensions': categories
}
def fit(self, data, discrete_columns=tuple()):
self.output_info = []
self.output_dimensions = 0
if not isinstance(data, pd.DataFrame):
self.dataframe = False
data = pd.DataFrame(data)
else:
self.dataframe = True
self.dtypes = data.infer_objects().dtypes
self.meta = []
for column in data.columns:
column_data = data[[column]].values
if column in discrete_columns:
meta = self._fit_discrete(column, column_data)
else:
meta = self._fit_continuous(column, column_data)
self.output_info += meta['output_info']
self.output_dimensions += meta['output_dimensions']
self.meta.append(meta)
def _transform_continuous(self, column_meta, data):
components = column_meta['components']
model = column_meta['model']
means = model.means_.reshape((1, self.n_clusters))
stds = np.sqrt(model.covariances_).reshape((1, self.n_clusters))
features = (data - means) / (4 * stds)
probs = model.predict_proba(data)
n_opts = components.sum()
features = features[:, components]
probs = probs[:, components]
opt_sel = np.zeros(len(data), dtype='int')
for i in range(len(data)):
pp = probs[i] + 1e-6
pp = pp / pp.sum()
opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)
idx = np.arange((len(features)))
features = features[idx, opt_sel].reshape([-1, 1])
features = np.clip(features, -.99, .99)
probs_onehot = np.zeros_like(probs)
probs_onehot[np.arange(len(probs)), opt_sel] = 1
return [features, probs_onehot]
def _transform_discrete(self, column_meta, data):
encoder = column_meta['encoder']
return encoder.transform(data)
def transform(self, data):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
values = []
for meta in self.meta:
column_data = data[[meta['name']]].values
if 'model' in meta:
values += self._transform_continuous(meta, column_data)
else:
values.append(self._transform_discrete(meta, column_data))
return np.concatenate(values, axis=1).astype(float)
def _inverse_transform_continuous(self, meta, data, sigma):
model = meta['model']
components = meta['components']
u = data[:, 0]
v = data[:, 1:]
if sigma is not None:
u = np.random.normal(u, sigma)
u = np.clip(u, -1, 1)
v_t = np.ones((len(data), self.n_clusters)) * -100
v_t[:, components] = v
v = v_t
means = model.means_.reshape([-1])
stds = np.sqrt(model.covariances_).reshape([-1])
p_argmax = np.argmax(v, axis=1)
std_t = stds[p_argmax]
mean_t = means[p_argmax]
column = u * 4 * std_t + mean_t
return column
def _inverse_transform_discrete(self, meta, data):
encoder = meta['encoder']
return encoder.inverse_transform(data)
def inverse_transform(self, data, sigmas):
start = 0
output = []
column_names = []
for meta in self.meta:
dimensions = meta['output_dimensions']
columns_data = data[:, start:start + dimensions]
if 'model' in meta:
sigma = sigmas[start] if sigmas else None
inverted = self._inverse_transform_continuous(meta, columns_data, sigma)
else:
inverted = self._inverse_transform_discrete(meta, columns_data)
output.append(inverted)
column_names.append(meta['name'])
start += dimensions
output = np.column_stack(output)
output = | pd.DataFrame(output, columns=column_names) | pandas.DataFrame |
"""
Prepare training and testing datasets as CSV dictionaries
Created on 11/26/2018
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(slide, root_dir, label):
ids = []
try:
for id in os.listdir(root_dir+'/pos'):
if '.png' in id:
ids.append([slide, root_dir+'/pos/'+id, label, 1])
else:
print('Skipping ID:', id)
except FileNotFoundError:
print('Ignore:', root_dir+'/pos')
try:
for id in os.listdir(root_dir+'/neg'):
if '.png' in id:
ids.append([slide, root_dir+'/neg/'+id, label, 0])
else:
print('Skipping ID:', id)
except FileNotFoundError:
print('Ignore:', root_dir+'/neg')
return ids
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(path='../tiles/', ref_file='../dummy_His_MUT_joined.csv'):
if not os.path.isdir(path):
os.mkdir(path)
import Cutter
Cutter.cut()
allimg = image_ids_in(path)
ref = pd.read_csv(ref_file, header=0)
big_images = []
negimg = intersection(ref.loc[ref['label'] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref['label'] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, path + "{}".format(i), 0])
for i in posimg:
big_images.append([i, path + "{}".format(i), 1])
datapd = pd.DataFrame(big_images, columns=['slide', 'path', 'label'])
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cut=0.3):
trlist = []
telist = []
valist = []
for i in range(2):
subset = alll.loc[alll['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = | pd.concat(valist) | pandas.concat |
from sympy import *
import pandas as pd
from random import random
def random_optimization(xl, xu, n, function):
x = Symbol('x')
f = parse_expr(function)
iteration = 0
data = | pd.DataFrame(columns=['iteration','xl','xu','x','f(x)','max_x','max_f(x)']) | pandas.DataFrame |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_dist_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_PREV_TO_NEXT not in move_df
move_df.generate_dist_features()
assert_frame_equal(move_df, expected)
def test_generate_time_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
1.0,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1.0,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'time_to_prev',
'time_to_next',
'time_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TIME_PREV_TO_NEXT not in move_df
move_df.generate_time_features()
assert_frame_equal(move_df, expected)
def test_generate_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'speed_to_prev',
'speed_to_next',
'speed_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert SPEED_PREV_TO_NEXT not in move_df
move_df.generate_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_move_and_stop_by_radius():
move_df = _default_move_df()
new_move_df = move_df.generate_move_and_stop_by_radius(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
'nan',
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
'move',
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
'nan',
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
'stop',
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
'situation',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert SITUATION not in move_df
move_df.generate_move_and_stop_by_radius()
assert_frame_equal(move_df, expected)
def test_time_interval():
move_df = _default_move_df()
assert move_df.time_interval() == Timedelta('0 days 00:00:06')
def test_get_bbox():
move_df = _default_move_df()
assert_allclose(
move_df.get_bbox(), (39.984093, 116.31924, 39.984222, 116.319405)
)
def test_min():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.min(), expected)
def test_max():
move_df = _default_move_df()
expected = Series(
data=[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.max(), expected)
def test_count():
move_df = _default_move_df()
expected = Series(
data=[4, 4, 4, 4],
index=['lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.count(), expected)
def test_group_by():
move_df = _default_move_df()
expected = _default_pandas_df()
expected = expected.groupby('id').mean()
assert_frame_equal(move_df.groupby(TRAJ_ID).mean(), expected)
def test_select_dtypes():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236],
[39.984198, 116.319322],
[39.984224, 116.319402],
[39.984224, 116.319402],
],
columns=['lat', 'lon'],
index=[0, 1, 2, 3],
)
assert_frame_equal(move_df.select_dtypes(include='float64'), expected)
def test_astype():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39, 116, 1224741185000000000, 1],
[39, 116, 1224741186000000000, 1],
[39, 116, 1224741191000000000, 2],
[39, 116, 1224741191000000000, 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
result = move_df.astype('int64')
assert_frame_equal(result, expected)
def test_sort_values():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3, 0, 1],
)
assert_frame_equal(
move_df.sort_values(by=TRAJ_ID, ascending=False), expected
)
def test_reset_index():
move_df = _default_move_df()
move_df = move_df.loc[1:]
assert_array_equal(move_df.index, [1, 2, 3])
move_df.reset_index(inplace=True)
assert_array_equal(move_df.index, [0, 1, 2])
def test_set_index():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05')],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06')],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11')],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11')],
],
columns=['lat', 'lon', 'datetime'],
index=[1, 1, 2, 2],
)
expected.index.name = 'id'
assert_frame_equal(move_df.set_index('id'), expected)
try:
move_df.set_index('datetime', inplace=True)
assert False
except AttributeError:
assert True
def test_drop():
move_df = _default_move_df()
move_df[UID] = [1, 1, 2, 3]
move_test = move_df.drop(columns=[UID])
assert UID not in move_test
assert UID in move_df
assert isinstance(move_test, PandasMoveDataFrame)
move_test = move_df.drop(index=[0, 1])
assert move_test.len() == 2
assert isinstance(move_test, PandasMoveDataFrame)
move_df.drop(columns=[UID], inplace=True)
assert UID not in move_df
assert isinstance(move_df, PandasMoveDataFrame)
try:
move_df.drop(columns=[LATITUDE], inplace=True)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except AttributeError:
pass
try:
move_df.drop(columns=[LONGITUDE], inplace=True)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except AttributeError:
pass
try:
move_df.drop(columns=[DATETIME], inplace=True)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except AttributeError:
pass
def test_duplicated():
move_df = _default_move_df()
expected = [False, True, False, True]
assert_array_equal(move_df.duplicated(TRAJ_ID), expected)
expected = [False, False, True, False]
assert_array_equal(
move_df.duplicated(subset=DATETIME, keep='last'), expected
)
def test_drop_duplicates():
move_df = _default_move_df()
move_test = move_df.drop_duplicates()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, | Timestamp('2008-10-23 05:53:11') | pandas.Timestamp |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
#-- fix for tensorflow 2.0 version ---
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
import numpy as np
import os
import matplotlib.pyplot as plt
import traceback
import warnings
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib as mpl
import pandas as pd
from tensorflow.python.client import device_lib
from tensorflow_probability.python.models.bayesgp.scripts import calibration
from tensorflow_probability.python.models.bayesgp.scripts import sensitivity
import copy
import math
class Calibration_model():
def __init__(self, sim_inputs_pars, sim_outputs, exp_inputs, exp_outputs, model_info = None, kernel_type = 'RBF', noise_level = 1e-3, labels = []):
# Inputs:
# sim_inputs_pars := N x D numpy array of simulation inputs and calibration parameter values.
# The first columns must correspond to the input variables and the remaining
# columns must correspond to the calibration parameters.
# sim_outputs := N-dimensional numpy vector of outputs
# exp_inputs := N x D numpy array of experimental input values. The variable columns
# must have the same order as the input variable columns of sim_inputs_pars
# exp_outputs := N-dimensional numpy vector of outputs
# model_info = dictionary containing
# 1) a dictionary containing the hyperparameter samples.
# 2) numpy array of samples for the calibration parameters
# 3) the value of the noise variance
# 4) the type of kernel used
# the dictionary must be generated from a previous run of the model with the same set of inputs and outputs arrays
# if model_info is not provided, the kernel_type and noise level can be provided or will be set as default
# kernel_type := string specifying the type of kernel to be used. Options are
# 'RBF', 'Matern12', 'Matern32', 'Matern52'
# noise_level := variance of the Gaussian noise for the normalized data
# labels:= list containing labels for the input variables and the calibration parameters. A default list is
# generated if this is not specified
# Checking that the Gaussian noise variance is between 0 and 1
if model_info:
warnings.warn("Retrieving model info from previous run. The set of inputs and outputs arrays must be the same as the previous run.")
try:
self.hyperpar_samples = copy.deepcopy(model_info['hyp_samples'])
self.par_samples = model_info['par_samples'].copy()
self.kernel_type = model_info['kernel_type']
noise_level = model_info['noise_level']
except Exception as e:
traceback.print_exc()
print('Failed to retrieve model info.')
else:
self.hyperpar_samples = {}
self.kernel_type = kernel_type
noise_level = noise_level
if (noise_level > 1) or (noise_level < 0):
raise Exception('Invalid value for the noise_level: ' + str(noise_level) + '. It should be between 0 and 1.')
if len(sim_inputs_pars.shape) == 1:
raise Exception('Array sim_inputs_pars of simulator input and calibration parameter must have at least 2 columns')
if len(exp_inputs.shape) == 1:
self.n_inputs = 1
Xexp = exp_inputs[:,None]
else:
self.n_inputs = exp_inputs.shape[1]
Xexp = exp_inputs
self.n_pars = sim_inputs_pars.shape[1] - self.n_inputs # number of calibration parameters
if self.n_pars <= 0:
raise Exception('Computed number of calibration parameters is less than or equal to 0! Array sim_inputs_pars is supposed to have more columns than array exp_inputs.')
# normalizing the data
mean_sim = np.mean(sim_inputs_pars, axis = 0)
std_sim = np.std(sim_inputs_pars, axis = 0, keepdims = True)
sim_in_norm = (sim_inputs_pars - mean_sim)/std_sim
exp_in_norm = (Xexp - mean_sim[:self.n_inputs])/std_sim[:,:self.n_inputs]
self.scaling_input = [mean_sim, std_sim]
# Normalizing the outputs
mean_y = np.mean(sim_outputs)
std_y = np.std(sim_outputs)
sim_out_norm = (sim_outputs - mean_y)/std_y
exp_out_norm = (exp_outputs - mean_y)/std_y
self.scaling_output = [mean_y, std_y]
# The normalized calibration parameters will be given uniform distributions. We need to specify the bounds for these distributions.
lower_bounds = np.min(sim_in_norm[:,self.n_inputs:], axis = 0).tolist()
upper_bounds = np.max(sim_in_norm[:,self.n_inputs:], axis = 0).tolist()
# Initialize the model
self.model = calibration.Calibration(sim_in_norm, sim_out_norm, exp_in_norm, exp_out_norm, lower_bounds, upper_bounds, self.kernel_type, noise_level)
# Bounds needed for sensitivity analysis
mins_range = np.min(sim_inputs_pars, axis = 0, keepdims = True).T
maxs_range = np.max(sim_inputs_pars, axis = 0,keepdims = True).T
self.Range = np.concatenate([mins_range, maxs_range], axis = 1)
mins_range = np.min(sim_in_norm, axis = 0, keepdims = True).T
maxs_range = np.max(sim_in_norm, axis = 0,keepdims = True).T
self.Rangenorm = np.concatenate([mins_range, maxs_range], axis = 1)
if labels == []:
self.input_labels = ['x' + str(i) for i in range(self.n_inputs)]
self.par_labels = ['p' + str(i) for i in range(self.n_pars)]
self.labels = self.input_labels + self.par_labels
elif (len(labels) != self.n_inputs + self.n_pars) or not(all(isinstance(s, str) for s in labels)):
raise Exception('Invalid input for labels')
else:
self.labels = labels
self.input_labels = labels[:self.n_inputs]
self.par_labels = labels[self.n_inputs:]
return
def run_mcmc(self, mcmc_samples,num_leapfrog_steps = 3, estimate_noise = False, em_iters = 400, learning_rate = 0.05, warm_up = True, step_size = 0.01):
# Inputs:
# mcmc_samples := number of desired samples for the hyperparameters
# num_leap_frog_steps = number of leap frog steps for the HMC sampler
# estimated_noise := Boolean that indicates if the model should estimate a noise variance or keep it fixed (estimation is done wuth EM MCMC)
# em_iters := number of EM-steps to perform if the noise variance is estimated
# learning_rate := learning rate for optimizer if the noise variance is estimated
# warm_up := Assuming the noise is kept fixed (i.e estimate_noise == False ), this Boolean indicates if an adaptive step size is computed during a "warm up" phase
# step_size := step size to use for the HMC sampler if warm_up == False
# model_info = dictionary containing
# 1) dictionary with samples of hyperparameters as well loss function history (if noise is estimated)
# 2) numpy array of calibration parameters
# 2) the value of the noise variance
# 3) the type of kernel used
#
if estimate_noise == False:
print('Noise variance is fixed.')
if warm_up:
# Execute a warmup phase to compute an adaptive step size
burn_in = mcmc_samples//2
num_warmup_iters = burn_in
try:
print('Excecuting the warmup.')
step_size, next_state = self.model.warmup(num_warmup_iters = num_warmup_iters, num_leapfrog_steps = num_leapfrog_steps)
if step_size < 1e-4:
warnings.warn("Estimated step size is low. (less than 1e-4)")
print('Sampling in progress.')
self.par_samples, hyperpar_samples, acceptance_rate = self.model.mcmc(mcmc_samples = mcmc_samples, num_burnin_steps =burn_in,step_size = 0.9*step_size,
num_leapfrog_steps = num_leapfrog_steps, initial_state = next_state)
if acceptance_rate < 0.1:
warnings.warn("Acceptance rate was low (less than 0.1)")
except Exception as e:
traceback.print_exc()
print('Sampling failed. Increase the noise level or decrease the step size or the number of leap frog steps if necessary.')
else:
try:
burn_in = mcmc_samples
print('Sampling in progress.')
self.par_samples, hyperpar_samples, acceptance_rate = self.model.mcmc(mcmc_samples = mcmc_samples, num_burnin_steps =burn_in,step_size = 0.9*step_size,
num_leapfrog_steps = num_leapfrog_steps)
if acceptance_rate < 0.1:
warnings.warn("Acceptance rate was low (less than 0.1)")
except Exception as e:
traceback.print_exc()
print('Sampling failed. Increase the noise level or decrease the step size or the number of leap frog steps if necessary.')
else:
print('Estimating the noise variance using EMMCMC')
try:
num_warmup_iters = mcmc_samples//2
self.par_samples, hyperpar_samples, loss_history,_ = self.model.EM_with_MCMC(num_warmup_iters = num_warmup_iters, em_iters = em_iters,
mcmc_samples = mcmc_samples, num_leapfrog_steps = num_leapfrog_steps, learning_rate = learning_rate)
except Exception as e:
traceback.print_exc()
loc_samples, varsim_samples, betaspar_samples, betasx_samples, betad_samples, vard_samples = hyperpar_samples
self.hyperpar_samples['sim_kernel_variance'] = varsim_samples
self.hyperpar_samples['disc_kernel_variance'] = vard_samples
self.hyperpar_samples['sim_inputs_kernel_inverse_lengthscales'] = betasx_samples
self.hyperpar_samples['sim_pars_kernel_inverse_lengthscales'] = betaspar_samples
self.hyperpar_samples['disc_kernel_inverse_lengthscales'] = betad_samples
self.hyperpar_samples['sim_gp_constant_mean_function'] = loc_samples
model_info = {}
model_info['hyp_samples'] = copy.deepcopy(self.hyperpar_samples)
model_info['par_samples'] = self.par_samples.copy()
model_info['kernel_type'] = self.kernel_type
model_info['noise_level'] = self.model.noise
return model_info
def plot_chains(self, directory_path1 = None, directory_path2 = None):
# Function used to plot the chains from the mcmc sampling and scatter plot
# for the calibration parameters
# Inputs:
# directory_path1:= directory where to save the mcmc samples plot. It defaults to the current directory if not specified
# directory_path2:= directory where to save the scatter plot. It defaults to the current directory if not specified
if len(self.hyperpar_samples) == 0:
raise Exception('Hyperparameter samples must be generated or retrieved first.')
# plotting the mcmc chains
nplots = 2*self.n_inputs + 2*self.n_pars + 3
fig, axes = plt.subplots(nplots, 1, figsize=(20, 2.0*nplots),sharex=True)
# 1) Plotting and saving the chains for the inverse lengthscale
betasx_samples = self.hyperpar_samples['sim_inputs_kernel_inverse_lengthscales']
mcmc_samples = len(betasx_samples)
t = np.arange(mcmc_samples)
k = -1
for i in range(self.n_inputs):
k = k+1
axes[k].plot(t,betasx_samples[:,i])
title = self.input_labels[i] + 'sim_inverse_lengthscale_samples'
axes[k].set_title(title)
betaspar_samples = self.hyperpar_samples['sim_pars_kernel_inverse_lengthscales']
for i in range(self.n_pars):
k = k+1
axes[k].plot(t,betaspar_samples[:,i])
title = self.par_labels[i] + 'sim_inverse_lengthscale_samples'
axes[k].set_title(title)
betad_samples = self.hyperpar_samples['disc_kernel_inverse_lengthscales']
for i in range(self.n_inputs):
k = k+1
axes[k].plot(t,betad_samples[:,i])
title = self.input_labels[i] + 'disc_inverse_lengthscale_samples'
axes[k].set_title(title)
# 2) Plotting and saving the chains for the variances
varsim_samples = self.hyperpar_samples['sim_kernel_variance']
k = k+1
axes[k].plot(t,varsim_samples)
title = 'sim_kernel_variance_samples'
axes[k].set_title(title)
vard_samples = self.hyperpar_samples['disc_kernel_variance']
k = k+1
axes[k].plot(t,vard_samples)
title = 'disc_kern_variance_samples'
axes[k].set_title(title)
# 3) Plotting and saving the chain for the simulator mean function
loc_samples = self.hyperpar_samples['sim_gp_constant_mean_function']
k = k+1
axes[k].plot(t,loc_samples)
title = 'sim_constant_mean_function_samples'
axes[k].set_title(title)
# 4) Plotting and saving the chains for the calibration parameters
# We first need to convert to the proper scale
mean_sim, std_sim = self.scaling_input
par_samples_right_scale = self.par_samples*std_sim[0,self.n_inputs:] + mean_sim[self.n_inputs:]
for i in range(self.n_pars):
k = k+1
axes[k].plot(t,par_samples_right_scale[:,i])
title = self.par_labels[i] + '_samples'
axes[k].set_title(title)
if directory_path1 == None:
directory_path1 = os.getcwd()
if not(os.path.isdir(directory_path1)):
raise Exception('Invalid directory path ', directory_path1)
figpath ='mcmc_chains.png'
figpath = os.path.join(directory_path1, figpath)
plt.savefig(figpath)
plt.close()
# 4) scatter plot for the calibration parameters
if directory_path2 == None:
directory_path2 = os.getcwd()
if not(os.path.isdir(directory_path2)):
raise Exception('Invalid directory path ', directory_path2)
df = pd.DataFrame(par_samples_right_scale, columns = self.par_labels)
figpath = 'par_scatter.png'
figpath = os.path.join(directory_path2, figpath)
plt.figure(figsize = (12,12))
| pd.plotting.scatter_matrix(df) | pandas.plotting.scatter_matrix |
import streamlit as st
import streamlit.components.v1 as components
from streamlit_folium import folium_static
# import folium
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import immo
import ssl
# to avoid SSLCertVerificationError
ssl._create_default_https_context = ssl._create_unverified_context
# search from data.gouv.fr geo-dvf
@st.cache
def load_data(code_commune):
# json_data = immo.dvf_commune(postcode)
url = "https://files.data.gouv.fr/geo-dvf/latest/csv/2021/communes/"+str(code_commune)[:2]+"/"+str(code_commune)+".csv"
df = pd.read_csv(url)
df.date_mutation = pd.to_datetime(df.date_mutation).dt.date
df = df.dropna(subset=['valeur_fonciere','surface_reelle_bati'])
df['prixm2'] = df.valeur_fonciere/df.surface_reelle_bati
df = df[df.prixm2<10000]
df.prixm2 = df.prixm2.astype(int)
df['marker_color'] = | pd.cut(df['prixm2'], bins=4,labels=['blue','green', 'yellow', 'red']) | pandas.cut |
''' ATP Matches Data Pipeline '''
import datetime as dt, pandas as pd
def run_pipeline(start_year = 1968, end_year = dt.datetime.now().year + 1):
# Extract match data
tour_files = []
for i in range(start_year, end_year):
url_base = r'https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master/atp_matches_'
url_end = '{}.csv'.format(i)
tour_files.append(url_base + url_end)
df_tour = | pd.read_csv(tour_files[0]) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
| assert_almost_equal(casted2.values, exp_values) | pandas.util.testing.assert_almost_equal |
import pandas as pd
from schoonmaken.common import Person, Task
def save_data(people, save_path):
pass
def retrieve_data(data_path) -> list[tuple]:
'''
Layout of data:
[
(person_name: str, [TaskData, ...]),
...
]
'''
td = []
return td
def retrieve_person_data(person_name: str, data_path: str):
td = []
return
def csv(task_table: list[list]):
dataframe = | pd.DataFrame(task_table) | pandas.DataFrame |
import pkg_resources
import os
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from windea_tool import weibull
from windea_tool import plotting
turbines_dict = {'Enercon E-70 (2300 kW)':'Enercon_E-70_2300kW.xlsx',
'Enercon E-115 (3000 kW)':'Enercon_E-115_3000kW.xlsx'}
class Windea:
def __init__(self, name, start=0, stop=30, step=1):
self.turbines_container = {}
self.locations_container = {}
self.analysis_container = {}
self.name = name
self.df_main = {}
def add_turbine(self, name, rho = 1.225, verfügbarkeit = 1, path=""):
new_turbine = Turbine(name=name, rho=rho, verfügbarkeit=verfügbarkeit, path=path)
self.turbines_container[name] = new_turbine
def add_location(self, name, delta_v = 1, A=None, k=None, v_m=None, h = None,
start = 0, stop = 160, step = 10, type_windprofil = None, v_r = None, h_r = None,
z_0 = None, a = None,
type=None, path=""):
new_location = Location(name, delta_v = delta_v, A=A, k=k, v_m=v_m, type=type, path=path)
self.locations_container[name] = new_location
# override Location with wind data in reference height with data in hub height
if type_windprofil is not None:
self.add_windprofil(location=name, start=start, stop=stop, step=step,
type=type_windprofil, v_r=v_r, h_r=h_r, z_0=z_0, a=a)
df_windprofil = self.locations_container[name].df_windprofil
df_wp_detailed = self.locations_container[name].df_wp_detailed
v_h = self.get_v_in_h(name, h=h)
new_location = Location(name, delta_v = delta_v, A=A, k=k, v_m=v_h, type=type, path=path)
new_location.df_windprofil = df_windprofil
new_location.df_wp_detailed = df_wp_detailed
new_location.windprofil_type = type_windprofil
self.locations_container[name] = new_location
def add_windprofil(self, location, start, stop, step, type, v_r, h_r, z_0 = None, a = None):
self.locations_container[location].windprofil(start=start, stop=stop, step=step,
type=type, v_r=v_r, h_r=h_r, z_0=z_0, a=a)
def get_v_in_h(self, location, h):
return self.locations_container[location].df_windprofil.query("h==" + str(h))["v"].values[0]
def analyse(self, flautenanalyse = True):
if len(self.locations_container) > 1 and len(self.turbines_container) > 1:
sys.exit("Mehrere Turbinen und mehrere Standorte. Bitte Auswahl treffen")
if len(self.locations_container) == 1:
for turb, turb_obj in self.turbines_container.items():
# get location_name and location_object of the single location in loc_dict of Analysis object
loc_name = list(self.locations_container.keys())[0]
loc_obj = list(self.locations_container.values())[0]
# document location_name in turbine object and turbine_name in location object
turb_obj.locations.append(loc_name)
loc_obj.turbines.append(turb_obj.name)
# initialize df_main of Analysis object
df_main = pd.DataFrame()
df_main['v'] = turb_obj.df_turbine['v']
df_main['h'] = loc_obj.df_histogramm['h']
df_main['P'] = turb_obj.df_turbine['P']
# perform calculations of energy yield
df_main['E'] = df_main['h'] * df_main['P'] * 8760 / 1000 ** 2 * turb_obj.verfügbarkeit # GWh
df_main['E_h'] = df_main['E'] / df_main['E'].sum()
df_main['E_sum'] = ''
df_main.loc[0, 'E_sum'] = df_main['E'].sum()
print("Prognose gesamter Energieertrag pro Jahr in GWh: ", df_main['E'].sum())
# add rho to data output
df_main["rho"] = turb_obj.df_turbine["rho"]
# assign df_main to object
turb_obj.df_main = df_main
# Flautenanalyse
if flautenanalyse:
self.flautenanalyse(turb_obj)
# add flautenanalyse result to data output
turb_obj.df_main["Anzahl der Stunden mit Flaute"] = ''
turb_obj.df_main.loc[0, "Anzahl der Stunden mit Flaute"] = turb_obj.flaute
self.postprocessing()
elif len(self.locations_container) > 1:
print("Mehrere Locations")
for loc, loc_obj in self.locations_container.items():
# get turbine_name and turbine_object of the single turbine in turbine_dict of Analysis object
turb_name = list(self.turbines_container.keys())[0]
turb_obj = list(self.turbines_container.values())[0]
# document location_name in turbine object and turbine_name in location object
loc_obj.turbines.append(turb_name)
turb_obj.locations.append(loc)
# initialize df_main of Analysis object
df_main = pd.DataFrame()
df_main['v'] = turb_obj.df_turbine['v']
df_main['h'] = loc_obj.df_histogramm['h']
df_main['P'] = turb_obj.df_turbine['P']
# perform calculations of energy yield
df_main['E'] = df_main['h'] * df_main['P'] * 8760 / 1000 ** 2 * turb_obj.verfügbarkeit # GWh
df_main['E_h'] = df_main['E'] / df_main['E'].sum()
df_main['E_sum'] = ''
df_main.loc[0, 'E_sum'] = df_main['E'].sum()
print("Prognose gesamter Energieertrag pro Jahr in GWh: ", df_main['E'].sum())
# assign df_main to object
loc_obj.df_main = df_main
# Flautenanalyse
if flautenanalyse:
self.flautenanalyse(loc_obj)
# add flautenanalyse result to data output
loc_obj.df_main["Anzahl der Stunden mit Flaute"] = ''
loc_obj.df_main.loc[0, "Anzahl der Stunden mit Flaute"] = loc_obj.flaute
self.postprocessing()
else:
print("Fehler in analyse()")
def flautenanalyse(self, obj):
first_non_zero = obj.df_main["P"].ne(0).idxmax()
h_sum_flaute = obj.df_main["h"].iloc[0:first_non_zero].sum()
print("Anzahl der Stunden mit Flaute "+obj.name+": ", round(h_sum_flaute * 8760,2))
obj.flaute = h_sum_flaute * 8760
#last_non_zero = obj.df_main["P"][obj.df_main["P"] != 0].index[-1]
#h_sum_sturm = obj.df_main["h"].iloc[last_non_zero:-1].sum()
#print("Anzahl der Stunden mit Sturmabschaltung: ", round(h_sum_sturm * 8760, 2))
#h_sum_betrieb = obj.df_main["h"].iloc[first_non_zero:last_non_zero].sum()
#print("Betriebsstunden: ", round(h_sum_betrieb * 8760, 2))
#print(obj.df_main["h"].sum())
def plot(self, selected_plots=["windhistogramm", "turbine", "main", "ertrag", "ertrag_h", "windprofil", "flautenanalyse"],
show=True):
fig_list = []
if "flautenanalyse" in selected_plots:
if list(self.locations_container.values())[0].flaute is not None:
fig_list.append(plotting.plot_flaute(self.locations_container))
elif list(self.turbines_container.values())[0].flaute is not None:
fig_list.append(plotting.plot_flaute(self.turbines_container))
if "windhistogramm" in selected_plots:
fig_list.append(plotting.plot_weibull(self.locations_container))
fig_list.append(plotting.plot_messung(self.locations_container))
if "turbine" in selected_plots:
fig_list.append(plotting.plot_turbine(self.turbines_container))
if "main" in selected_plots:
if len(self.locations_container) == 1:
for turb in self.turbines_container.values():
fig_list.append(plotting.plot_main(turb))
if len(self.locations_container) > 1:
for loc in self.locations_container.values():
fig_list.append(plotting.plot_main(loc))
if "ertrag" in selected_plots:
if len(self.locations_container) == 1:
fig_list.append(plotting.plot_ertrag(self.turbines_container))
if len(self.locations_container) > 1:
fig_list.append(plotting.plot_ertrag(self.locations_container))
if "ertrag_h" in selected_plots:
if len(self.locations_container) == 1:
fig_list.append(plotting.plot_ertrag_h(self.turbines_container))
if len(self.locations_container) > 1:
fig_list.append(plotting.plot_ertrag_h(self.locations_container))
if "windprofil" in selected_plots:
for loc, loc_obj in self.locations_container.items():
if loc_obj.df_windprofil is not None:
fig_list.append(plotting.plot_windprofil(loc_obj))
fig_list = filter(None, fig_list)
self.fig_list = fig_list
if show:
plt.show()
def postprocessing(self):
self.pp_data=pd.DataFrame()
if len(self.locations_container) == 1:
df_list = []
loc_obj = list(self.locations_container.values())[0]
q = pd.DataFrame({loc_obj.name: []})
df_list.append(q)
df_list.append(loc_obj.df_histogramm)
for turb in loc_obj.turbines:
a = pd.DataFrame({turb:[]})
df_list.append(a)
df_list.append(self.turbines_container[turb].df_main)
self.pp_data = pd.concat(df_list, axis=1)
if len(self.locations_container) > 1:
df_list = []
turb_obj = list(self.turbines_container.values())[0]
q = pd.DataFrame({turb_obj.name: []})
df_list.append(q)
df_list.append(turb_obj.df_turbine)
for loc in turb_obj.locations:
a = pd.DataFrame({loc: []})
df_list.append(a)
df_list.append(self.locations_container[loc].df_main)
self.pp_data = pd.concat(df_list, axis=1)
def save_plots(self, path=""):
plots_dir = os.path.join(path, "plots")
if not os.path.exists(plots_dir):
os.makedirs(plots_dir)
for fig in self.fig_list:
fig.savefig(os.path.join(plots_dir, fig.texts[0].get_text() + ".png"))
def save_data(self, path=""):
Excelwriter = pd.ExcelWriter(os.path.join(path,self.name)+".xlsx", engine="xlsxwriter")
self.pp_data.to_excel(Excelwriter, sheet_name=self.name, index=False)
Excelwriter.save()
def save(self, path=""):
print('save all')
windea_dir = os.path.join(path, self.name)
if not os.path.exists(windea_dir):
os.makedirs(windea_dir)
self.save_plots(path=windea_dir)
self.save_data(path=windea_dir)
class Turbine:
def __init__(self, name, rho, verfügbarkeit, path=""):
self.name = name
self.locations = []
self.df_turbine = load_turbine(name, path)
self.df_turbine["P"] *= rho
self.df_turbine["rho"] *= rho
self.rho = rho
self.verfügbarkeit = verfügbarkeit
self.flaute = None
class Location:
def __init__(self, name, delta_v = 1, A=None, k=None, v_m=None, type=None, path=""):
self.name = name
self.turbines = []
self.A = A
self.k = k
self.v_m = v_m
self.delta_v = delta_v
self.type = type
self.df_windprofil = None
self.df_messung = None
self.df_weibull = None
self.flaute = None
# weibull oder messung
if type == "weibull":
self.df_weibull, self.df_weibull_detailed, self.A, self.v_m = weibull.weibull_windhistogramm(A=A, k=k, v_m=v_m, step = delta_v)
self.df_histogramm = self.df_weibull
if type == "messung":
self.df_messung = pd.read_excel(path, engine = 'openpyxl')
self.df_messung["Frequency"] /= 100
self.df_histogramm = pd.DataFrame()
self.df_histogramm['v'] = self.df_messung["to"]
freq = list(self.df_messung["Frequency"])
freq.append(0.0)
freq_mean = []
for i in range(0, len(freq)):
freq_mean.append((freq[i] + freq[i+1]) / 2)
if i == (len(freq) - 2):
break
self.df_histogramm["h"] = freq_mean
first_row = pd.DataFrame({"v":[0], "h":[0]})
self.df_histogramm = pd.concat([first_row, self.df_histogramm], ignore_index=True)
def windprofil(self, start, stop, step, type, v_r, h_r, z_0 = None, a = None):
df_windprofil = pd.DataFrame()
h = np.arange(start, stop + step, step)
df_windprofil['h'] = h
df_wp_detailed = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from os.path import join as joinPaths
from os.path import isdir
from os.path import isfile
from os import listdir as ls
from IPython.display import display, Markdown, Latex
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.pyplot import cm
from multiprocessing import Pool
from glob import glob
from os import path
import scipy
from scipy import integrate
from scipy.signal import butter, lfilter
# Definition of constants
# matplotlib
PLOTWIDTH = 16
PLOTHEIGHT = 9
DEBUG = False
# deprecated file format format for Data coming from Boxes with old firmware -> depends on number of columns
columns = [
"time",
"latitude",
"longitude",
"elevation",
"rot_x",
"rot_y",
"rot_z",
"acc_x",
"acc_y",
"acc_z",
"mag_x",
"mag_y",
"mag_z",
"roll",
"pitch",
"yaw",
]
columns2 = [
"time",
"runtime",
"gpstime",
"latitude",
"longitude",
"elevation",
"rot_x",
"rot_y",
"rot_z",
"acc_x",
"acc_y",
"acc_z",
"mag_x",
"mag_y",
"mag_z",
"roll",
"pitch",
"yaw",
]
### Data aggregation and cleaning
def readLogFile(
logFilePath,
columns=columns,
skipheader=3,
verbose=False,
lowMemory=True,
errorOnBadLine=False,
engine="python",
):
"""
readLogFile(logFilePath, columns=columns, skipheader=2, skipfooter=1):
opens the given path, tries to read in the data, convert it to a dataframe
and append it.
returns a dataframe containing the data from a given csv file
"""
if verbose: print("processing file: {}".format(logFilePath))
if not isfile(logFilePath):
print("no such file: {} -> skipping".format(logFile))
return None
try:
tempDataFrame = pd.read_csv(
logFilePath,
skiprows=skipheader,
names=columns,
low_memory=lowMemory,
error_bad_lines=errorOnBadLine,
skipfooter=1,
engine=engine,
)
if verbose: print(tempDataFrame.info())
except:
print("could not process file: {}, skipping".format(logFilePath))
return None
return tempDataFrame
def cleanDataFrame(
df,
roundTimeStamp=False,
toDateTime=True,
dateTimeIndex = True,
replaceNan=True,
verbose=False,
correctTimeByGPS=True,
timeZone="Europe/Berlin",
dropDuplicateIndices=True,
):
if df.empty:
print("empty dataframe, skipping!")
return pd.DataFrame()
# convert relevant columns to strings
if replaceNan:
if verbose: print("cleaning NaNs")
df.fillna(method="ffill", inplace=True)
if roundTimeStamp:
if verbose: print("rounding time")
df["time"].round(roundTimeStamp)
if toDateTime:
if verbose: print("converting timestamps")
df["time"] = | pd.to_datetime(df["time"], unit="s", utc=True) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pytest
from pandas.util import hash_pandas_object
import dask.dataframe as dd
from dask.dataframe import _compat
from dask.dataframe._compat import tm
from dask.dataframe.utils import assert_eq
@pytest.mark.parametrize(
"obj",
[
pd.Series([1, 2, 3]),
pd.Series([1.0, 1.5, 3.2]),
pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
pd.Series(["a", "b", "c"]),
pd.Series([True, False, True]),
pd.Index([1, 2, 3]),
pd.Index([True, False, True]),
pd.DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
_compat.makeMissingDataframe(),
_compat.makeMixedDataFrame(),
_compat.makeTimeDataFrame(),
_compat.makeTimeSeries(),
_compat.makeTimedeltaIndex(),
],
)
def test_hash_pandas_object(obj):
a = hash_pandas_object(obj)
b = hash_pandas_object(obj)
if isinstance(a, np.ndarray):
np.testing.assert_equal(a, b)
else:
assert_eq(a, b)
def test_categorical_consistency():
# Check that categoricals hash consistent with their values, not codes
# This should work for categoricals of any dtype
for s1 in [
pd.Series(["a", "b", "c", "d"]),
| pd.Series([1000, 2000, 3000, 4000]) | pandas.Series |
"""Tests for the sdv.constraints.tabular module."""
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data, instance._separator, columns)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'#a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'#a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': | pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']) | pandas.to_datetime |
import matplotlib.dates as mdates
from tqdm import tqdm as tqdm
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from CometTS.CometTS import interpolate_gaps
import argparse
import os
sns.set(color_codes=True)
# Functions for a seasonal auto-regressive integrated moving average analysis
# using CometTS on a time series of satellite imagery.
def run_plot_TS(
ARIMA_GDF,
figsize=(
12,
6),
y_val_alpha=1,
scatter_alpha=1,
error_alpha=0.2,
y_label="Brightness",
x_label="Date",
title_label="ARIMA_Trend",
figname='',
custom_x_axis=True,
show_grid=True,
show_legend=True,
min_count=0.5,
ymin=0,
ymax=5000):
print("Plotting...")
C = 0
# ARIMA GDF is the output GDF from TS_Trend function
for item in ARIMA_GDF['ID'].unique():
C += 1
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots(figsize=figsize)
gdf3 = ARIMA_GDF[(ARIMA_GDF.ID == item)]
title = title_label
# gdf3 = gdf3.sort_values(['date'])
# gdf3 = TS_Trend(gdf3, CMA_Val=5, CutoffDate="2017/08/31")
gdf3 = gdf3.sort_values(['date'])
x = gdf3['date']
xdate = x.astype('O')
xdate = mdates.date2num(xdate)
y = gdf3['mean']
anomaly = gdf3['Anomaly']
if 'SeasonalForecast' in gdf3.columns:
if C == 1:
gdf_holder = gdf3
else:
gdf_holder = gdf_holder.append(gdf3)
T = gdf3['SeasonalForecast']
if 'observations' in gdf3.columns:
count = gdf3['observations']
err_plus = gdf3['SeasonalError_Pos']
err_minus = gdf3['SeasonalError_Neg']
# Set the min_count value as a value from 0 to 1 (0 to 100%)
# ax.set_ylim([ymin,ymax])
# This will filter out observations where over n% of a polygon is
# masked out
if min_count > 0:
z = gdf3['count']
zmax = gdf3['count'].max()
z = z / zmax
xdate = xdate[z >= min_count]
y = y[z >= min_count]
if 'observations' in gdf3.columns:
count = count[z >= min_count]
err_plus = err_plus[z >= min_count]
err_minus = err_minus[z >= min_count]
if len(xdate) == len(gdf3['Trend']):
# plot regression line, if desired
# idx = np.isfinite(xdate) & np.isfinite(y)
# p2 = np.poly1d(np.polyfit(xdate[idx], y[idx], 1))
ax.plot(
xdate,
gdf3['Trend'],
'-',
label="Linear Forecast",
color='#00C5B0',
alpha=y_val_alpha)
# plot running mean regression line
# RM=(y.rolling(window=6,center=True, min_periods=2).median())
# ax.plot(xdate, RM, '-',label="Moving Median", alpha=y_val_alpha)
# Seasonal Forecast
T = interpolate_gaps(T, limit=3)
ax.plot(
xdate,
T,
'-',
label="Seasonal Forecast",
alpha=1,
color='#FF8700')
# scatter points-median top, mean bottom
ax.scatter(
xdate,
y,
label="Mean",
s=50,
color='black',
alpha=scatter_alpha)
ax.scatter(
xdate,
anomaly,
label="Anomalies",
s=50,
color='red',
alpha=scatter_alpha)
#ax.scatter(xdate, y2, label="Mean", s=50, color='red',alpha=scatter_alpha,marker='x')
# if desired, plot error band
plt.fill_between(
xdate,
err_plus,
err_minus,
alpha=error_alpha,
color='black',
label="Forecast MAE")
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
if 'observations' in gdf3.columns:
ax.scatter(
xdate,
count,
label="# of obs",
s=50,
color='#330DD0',
alpha=y_val_alpha,
marker='d')
ax.yaxis.set_tick_params(labelsize=12)
if custom_x_axis:
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
plt.rc('xtick', labelsize=12)
# ax.set_xticks(xdate)
#ax.set_xticklabels(x, rotation=50, fontsize=10)
#ax.tick_params(axis='x', which='major', pad=xticklabel_pad)
# ax.xaxis.set_major_formatter(dateformat)
#ax.set_xlim(datetime.date(settings.plot['x_min'], 1, 1),datetime.date(settings.plot['x_max'], 12, 31))
if show_grid:
ax.grid(
b=True,
which='minor',
color='black',
alpha=0.75,
linestyle=':')
if show_legend:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, ncol=1, loc='center right',
bbox_to_anchor=[1.1, 0.5],
columnspacing=1.0, labelspacing=0.0,
handletextpad=0.0, handlelength=1.5,
fancybox=True, shadow=True,
fontsize='x-small')
ax.set_title(title)
plt.tight_layout()
plt.show()
# save with figname
if len(figname) > 0:
plt.savefig(figname, dpi=500)
def TS_Trend(gdf3, CMA_Val=3, CutoffDate="2017/08/31",Uncertainty=2):
x = gdf3['date']
# Split data into a before and after event, i.e. a Hurricane.
# If no event simply set as last date in dataset, or a middle date.
CutoffDate = datetime.datetime.strptime(CutoffDate, '%Y/%m/%d').date()
# Do some date conversion
xcutoff = mdates.date2num(CutoffDate)
xdate = x.astype('O')
xdate = mdates.datestr2num(xdate)
gdf3['xdate'] = xdate
# Presently geared toward monthly trends only, could split this into days.
gdf3['Month'] = pd.DatetimeIndex(gdf3['date']).month
# Create a new GDF for before the event
gdf4 = gdf3.loc[gdf3['xdate'] <= xcutoff]
gdf3 = gdf3.sort_values(['date'])
gdf4 = gdf4.sort_values(['date'])
# print(gdf4)
# print(gdf3)
# Start ARIMA
y = gdf4['mean']
gdf4['CMA'] = y.rolling(window=CMA_Val, center=True).mean()
gdf4['Div'] = gdf4['mean'] / gdf4['CMA']
# print(gdf4['Div'])
f = gdf4.groupby(['Month'])['Div'].mean()
f = pd.DataFrame({'Month': f.index, 'SeasonalTrend': f.values})
gdf4 = gdf4.merge(f, on='Month')
gdf3 = gdf3.merge(f, on='Month')
# Seasonal ARIMA
gdf4['Deseasonalized'] = gdf4['mean'] / gdf4['SeasonalTrend']
y = gdf4['Deseasonalized']
z = gdf4['xdate']
gdf3 = gdf3.sort_values(['date'])
idx = np.isfinite(z) & np.isfinite(y)
if not any(idx) is False:
# Apply ARIMA to the oringial GDF
p2 = np.poly1d(np.polyfit(z[idx], y[idx], 1))
gdf3['Trend'] = p2(gdf3['xdate'])
gdf3['SeasonalForecast'] = gdf3['SeasonalTrend'] * gdf3['Trend']
gdf4['Trend'] = p2(gdf4['xdate'])
gdf4['SeasonalForecast'] = gdf4['SeasonalTrend'] * gdf4['Trend']
Error = Uncertainty * np.nanmean(abs(gdf4['SeasonalForecast'] - gdf4['mean']))
gdf3['SeasonalError_Pos'] = gdf3['SeasonalForecast'] + Error
gdf3['SeasonalError_Neg'] = gdf3['SeasonalForecast'] - Error
Neg_Anom = gdf3['mean'] < gdf3['SeasonalError_Neg']
Pos_Anom = gdf3['mean'] > gdf3['SeasonalError_Pos']
gdf5 = gdf3.where(Neg_Anom | Pos_Anom)
gdf3['Anomaly'] = gdf5['mean']
# print(gdf3['Anomaly'])
# print(gdf3['Anomaly'])
return gdf3
else:
return gdf3
def calc_TS_Trends(CometTSOutputCSV="/San_Juan_FullStats.csv", outname="/FullStats_TS_Trend.csv", CMA_Val=3, CutoffDate= "2017/12/31",Uncertainty=2):
print("Calculating...")
C = 0
main_gdf = | pd.read_csv(CometTSOutputCSV) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Summarizes raster statistics in parallel from soilgrids to estonian soil polygons
-------------------
copyright : (C) 2018-2020 by <NAME>
email : alexander.kmoch at ut.ee
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the MIT License *
* *
***************************************************************************/
"""
from dask.distributed import Client
import geopandas as gpd
import pandas as pd
import fiona
from rasterstats import zonal_stats
import logging
import datetime
log_level = logging.INFO
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
fh = logging.FileHandler('script_output.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if __name__ == "__main__":
# client = Client()
client = Client(processes=True,
n_workers=2,
threads_per_worker=1,
memory_limit='12GB')
print(client.scheduler_info()['services'])
logger.info("client ready at ... {} ... at {}".format(client.scheduler_info()['services'], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
soilgrids = {
'sand': 'soil250_grid_sand_sd',
'silt': 'soil250_grid_silt_sd',
'clay': 'soil250_grid_clay_sd',
'rock': 'soil250_grid_coarsefrag_sd',
'bd': 'soil250_grid_bulkdens_sd',
'soc': 'soil250_grid_soc_sd',
'awc': 'soil250_grid_awc_sd',
'k_sat': 'soil250_grid_k_sat_sd'
}
raster_file_collection = []
template_raster_conf = {
'variable_name': 'sand',
'layer_num': 1,
'actual_file_ref': '/run/user/1817077476/alex_tmp_geo/soilgrids_download/soil250_grid_sand_sd6_3301.tif'
# 'actual_file_ref': '../soilgrids_download/soil250_grid_sand_sd6_3301.tif'
}
base_path = '/home/DOMENIS.UT.EE/kmoch/soil_paper_materials/WORK_TMP'
for layer_num in range(1,8):
for layer_type in soilgrids.keys():
file_name = f"{base_path}/soilgrids_download/{soilgrids[layer_type]}{layer_num}_3301.tif"
# file_name = f"../soilgrids_download/{soilgrids[layer_type]}{layer_num}_3301.tif"
try:
with open(file_name, 'r') as fh:
# Load configuration file values
print(file_name)
template_raster_conf = {
'variable_name': layer_type,
'layer_num': layer_num,
'actual_file_ref': file_name
}
raster_file_collection.append(template_raster_conf)
except FileNotFoundError:
# Keep preset values
print("NOT " + f"{soilgrids[layer_type]}{layer_num}")
logger.warn("NOT " + f"{soilgrids[layer_type]}{layer_num}")
# '../data_deposit/EstSoil-EH_id_tmp.shp'
geom_template = f'{base_path}/EstSoil-EH_id_tmp.shp'
print("reading in template ESTSOIL geoms")
logger.info("reading in template ESTSOIL geoms ... {} ... at {}".format(geom_template, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
geom_template_df = gpd.read_file(geom_template, encoding='utf-8')
refdf_scattered = client.scatter(geom_template_df, broadcast=True)
# for raster_conf_dict in raster_file_collection:
def inner_raster_summary(raster_conf_dict, scattered_df):
# raster_summary(raster_conf_dict)
# print(f"Starting with {raster_conf_dict['actual_file_ref']}")
print("Starting with {} at {}".format(raster_conf_dict['actual_file_ref'], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
logger.info("Starting with {} at {}".format(raster_conf_dict['actual_file_ref'], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
variable_name = raster_conf_dict['variable_name']
layer_num = raster_conf_dict['layer_num']
actual_file_ref = raster_conf_dict['actual_file_ref']
tif_src = actual_file_ref
# shp_tmp_out = f"../data_deposit/EstSoil-EH_{variable_name}{layer_num}_zonal_stats.shp"
csv_tmp_out = f"../data_deposit/EstSoil-EH_{variable_name}{layer_num}_zonal_stats.csv"
parquet_tmp_out = f"../data_deposit/EstSoil-EH_{variable_name}{layer_num}_zonal_stats.parquet.gzip"
with fiona.open(geom_template) as vector_src:
# src_crs = vector_src.crs
# display(src_crs)
# src_schema = vector_src.schema
# display(src_schema)
# src_schema['properties']["mean"] = "float"
# src_schema['properties']["std"] = "float"
print("zonal stats")
outputs = zonal_stats(vector_src,
tif_src,
stats="mean std",
# geojson_out=True,
all_touched=True)
# with fiona.open(shp_tmp_out, "w", driver="ESRI Shapefile", schema=src_schema, crs=src_crs) as collection:
# collection.writerecords(outputs)
# print(len(collection))
# collection.flush()
print("output df preps")
geo_stats_df = | pd.DataFrame(outputs) | pandas.DataFrame |
import pandas as pd
lista_valores = [1,2,3]
lista_indices = ['a', 'b', 'c']
serie = pd.Series(lista_valores, index=lista_indices)
print(serie)
lista_notas = [[6,7,8],[8,9,5],[6,9,7]]
lista_indices2 = ['Matematicas', 'historia', 'fisica']
lista_nombres = ['Antonio', 'Maria', 'Pedro']
dataframe = | pd.DataFrame(lista_notas, index=lista_indices2, columns=lista_nombres) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import signal as ssig
from scipy import stats as spst
import os
import re
import string
from salishsea_tools import geo_tools
import netCDF4 as nc
class Cast:
def __init__(self,fpath):
mSta,mLat,mLon,df=readcnv(fpath)
self.sta=mSta
self.lat=mLat
self.lon=mLon
self.df=df
self.source=fpath
class zCast:
def __init__(self,updf,downdf):
self.uCast=updf
self.dCast=downdf
class rawCast:
def __init__(self):
self.uCast=dict()
self.dCast=dict()
class dataPair:
def __init__(self,zval,varval):
self.z=zval
self.val=varval
def fmtVarName(strx):
""" transform string into one that meets python naming conventions"""
vName=re.sub('[^a-zA-Z0-9_\-\s/]','',strx.strip())
vName=re.sub('[\s/]','_',vName)
vName=re.sub('-','_',vName)
if re.match('[0-9]',vName):
vName='_'+vName
return vName
def rolling_window(a, window):
# source: http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html
# use example: np.mean(rolling_window(x, 3), -1)
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def rolling_window_padded(a,window):
# extend rolling window to be same lenth as input array by duplicating first and last values
# even values not symmetric
test=rolling_window(a,window)
while window>1:
if window%2==0:
test=np.concatenate(([test[0,:]],test),axis=0)
else:
test=np.concatenate((test,[test[-1,:]]),axis=0)
window+=-1
return test
def amp(var,dim=0):
return np.nanmax(var,dim)-np.nanmin(var,dim)
#def remSurfTurb(val,z,dz):
# edges=np.arange(0,dz,2)
# binned=np.digitize(z,edges)
# for jj in range(1,len(edges)):
# ll=(binned==jj)&(~np.isnan(val))
# if np.sum(ll)>0:
# if amp(val[ll])>.5*np.nanmax(val):
# val[ll]=np.nan
# return val
def readcnv(fpath):
alphnumlist=list(string.ascii_letters)+list(string.digits)
# define regexes for reading headers:
reSta=re.compile('(?<=\*\*\sStation:)\s?([0-9])+\s?') # assumes numeric station identifiers
reLat=re.compile('(?<=\*\*\sLatitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([NS])')
reLon=re.compile('(?<=\*\*\sLongitude\s=)\s?([\-0-9\.]+)\s([\-\.0-9]+)\s?([EW])')
# start_time = May 08 2002 09:39:10
reST=re.compile('(?<=\#\sstart_time\s=).*')
#reTZ=re.compile('(?<=\*\*\s...\s\(Time\)\s=).*')
#reCr=re.compile('(?<=\*\*\sCruise:).*')
reNam=re.compile('(?<=\#\sname\s)([0-9]+)\s=\s(.*)\:\s?(.*)\s?')
# define regex for finding searching:
spStart=re.compile('^\s*[0-9]') # starts with space characters followed by digit
headers=list()
#lineno=0
mSta=None
mLat=None
mLon=None
with open(fpath, 'rt', encoding="ISO-8859-1") as f:
for fline in f:
if fline.startswith('**'):
if reSta.search(fline):
mSta=reSta.search(fline).groups()
if reLat.search(fline):
mLat=reLat.search(fline).groups()
if reLon.search(fline):
mLon=reLon.search(fline).groups()
elif reNam.search(fline):
headers.append(fmtVarName(reNam.search(fline).groups(1)[1]))
elif fline.startswith('*END*'):
break
#lineno+=1
#still in with file open
df=pd.read_csv(f,delim_whitespace=True,names=headers)
# file closed
return mSta,mLat,mLon,df
def bindepth(inP,inV,edges,targets=[],prebin=False):
# calculate depth-associated variables
# 1st calculate bin averages of depth and variable
# then use np interp to estimate at-grid-point values
# edges must be monotonically increasing
if prebin==True:
newP,newV=bindepth(inP,inV,np.arange(edges[0],edges[-1],.05),prebin=False)
inP=newP
inV=newV
inP=inP[~np.isnan(inV)]
inV=inV[~np.isnan(inV)]
binned=np.digitize(inP,edges)
Pa=np.empty(len(edges)-1)
Va=np.empty(len(edges)-1)
if len(targets) == 0:
Pi=.5*(edges[:-1]+edges[1:])
else:
Pi=targets[:(len(edges)-1)]
Vi=np.empty(len(edges)-1)
for jj in range(1,len(edges)):
ll=(binned==jj) #&(~np.isnan(inV))
if np.sum(ll)>0:
Pa[jj-1]=np.mean(inP[ll])
Va[jj-1]=np.mean(inV[ll])
else:
Pa[jj-1]=np.nan
Va[jj-1]=np.nan
# linearly extrapolate some values, but not beyond range of original data
pnew=Pa[0]-(Pa[1]-Pa[0])
vnew=Va[0]-(Va[1]-Va[0])
Pa=np.concatenate(([pnew],Pa))
Va=np.concatenate(([vnew],Va))
Vi=np.interp(Pi,Pa[~np.isnan(Va)],Va[~np.isnan(Va)],right=np.nan,left=np.nan)
Vi[Pi>np.max(inP)]=np.nan
Vi[Pi<np.min(inP)]=np.nan
return Pi, Vi
def cXfromX(X):
X=np.array(X)
X[np.isnan(X)]=-5
Y=np.nan*X
iii=(X>0)&(X<100)
Y[iii]=-np.log(X[iii]/100.0)/.25
return Y
def turbReg(m,Cx,fl):
return np.maximum(0.0,m[0]*Cx-m[1]*fl-m[2])
def loadDataFRP_init(exp='all'):
if exp not in {'exp1', 'exp2', 'exp3', 'all'}:
print('option exp='+exp+' is not defined.')
raise
with open('/ocean/shared/SalishSeaCastData/FRPlume/stationsDigitizedFinal.csv','r') as fa:
df0_a=pd.read_csv(fa,header=0,na_values='None')
with open('/ocean/shared/SalishSeaCastData/FRPlume/util/stationsDigitized_ancillary.csv','r') as fb:
df0_b=pd.read_csv(fb,header=0,na_values='None')
df0=pd.merge(df0_a,df0_b,how='left',on=['Station','Date'])
# calculate correction factor for sb19 turbidity (divide sb19 turbidity by tcor)
x=df0.loc[(df0.ALS_Turb_NTU>0)&(df0.sb19Turb_uncorrected>0)]['sb19Turb_uncorrected'].values
x=x[:,np.newaxis]
tcor=1.0/np.linalg.lstsq(x,df0.loc[(df0.ALS_Turb_NTU>0)&(df0.sb19Turb_uncorrected>0)]['ALS_Turb_NTU'],rcond=None)[0]
# rewritten in terms of fitting true turb to observed turb for consistency with paper
if exp=='exp1':
df0=df0.drop(df0.index[df0.Date != 20170410])
elif exp=='exp2':
df0=df0.drop(df0.index[df0.Date != 20170531])
elif exp=='exp3':
df0=df0.drop(df0.index[df0.Date != 20171101])
basedir1='/ocean/shared/SalishSeaCastData/FRPlume/ctd/20170410/'
basedir2='/ocean/shared/SalishSeaCastData/FRPlume/ctd/20170531/'
basedir3='/ocean/shared/SalishSeaCastData/FRPlume/ctd/20171101/'
dir19='19-4561/4_derive'
dir25='25-0363/4_derive'
dir19T10='19-4561/4b_deriveTEOS10'
dir25T10='25-0363/4a_deriveTEOS10'
f19=dict()
f25=dict()
if (exp=='exp1' or exp=='all'):
f19[1]='fraser2017101.cnv'
f19[2]='fraser2017102.cnv'
f19[3]='fraser2017103.cnv'
f19[4]='fraser2017104.cnv'
f19[5]='fraser2017105.cnv'
f19[6]='fraser2017106.cnv'
f19[7]='fraser2017107.cnv'
f19[8]='fraser2017108.cnv'
f19[9]='fraser2017109.cnv'
f25[1]='fraser2017001.cnv'
f25[2]='fraser2017002.cnv'
f25[3]='fraser2017003.cnv'
f25[4]='fraser2017004.cnv'
f25[5]='fraser2017005.cnv'
f25[6]='fraser2017006.cnv'
f25[7]='fraser2017007.cnv'
f25[8]='fraser2017008.cnv'
f25[9]='fraser2017009.cnv'
if (exp=='exp2' or exp=='all'):
f19[10]='fraser2017110.cnv'
f19[11]='fraser2017111.cnv'
f19[12]='fraser2017112.cnv'
f19[13]='fraser2017113.cnv'
f19[14.1]='fraser2017114.cnv'
f19[14.2]='fraser2017114.cnv'
f19[15]='fraser2017115.cnv'
f19[16]='fraser2017116.cnv'
f19[17]='fraser2017117.cnv'
f19[18]='fraser2017118.cnv'
f25[10]='fraser2017010.cnv'
f25[11]='fraser2017011.cnv'
f25[12]='fraser2017012.cnv'
f25[13]='fraser2017013.cnv'
f25[14.1]='fraser2017014.cnv'
f25[14.2]='fraser2017014.cnv'
f25[15]='fraser2017015.cnv'
f25[16]='fraser2017016.cnv'
f25[17]='fraser2017017.cnv'
f25[18]='fraser2017018.cnv'
if (exp=='exp3' or exp=='all'):
f19[19]='fraser2017119.cnv'
f19[20]='fraser2017120.cnv'
f19[21]='fraser2017121.cnv'
f19[22]='fraser2017122.cnv'
f19[23]='fraser2017123.cnv'
f19[24]='fraser2017124.cnv'
f25[19]='fraser2017019.cnv'
f25[20]='fraser2017020.cnv'
f25[21]='fraser2017021.cnv'
f25[22]='fraser2017022.cnv'
f25[23]='fraser2017023.cnv'
f25[24]='fraser2017024.cnv'
fpath19=dict()
fpath25=dict()
clist=np.sort([ii for ii in f19.keys()])
for ii in clist:
if ii<10:
fpath19[ii]=os.path.join(basedir1,dir19T10,f19[ii])
fpath25[ii]=os.path.join(basedir1,dir25T10,f25[ii])
elif ii<19:
fpath19[ii]=os.path.join(basedir2,dir19T10,f19[ii])
fpath25[ii]=os.path.join(basedir2,dir25T10,f25[ii])
else:
fpath19[ii]=os.path.join(basedir3,dir19T10,f19[ii])
fpath25[ii]=os.path.join(basedir3,dir25T10,f25[ii])
cast19=dict()
cast25=dict()
for ii in clist:
cast19[ii]=Cast(fpath19[ii])
cast25[ii]=Cast(fpath25[ii])
return df0, clist, tcor, cast19, cast25
def loadDataFRP(exp='all',sel='narrow',dp=1.0):
if exp not in {'exp1', 'exp2', 'all'}:
print('option exp='+exp+' is not defined.')
raise
if sel not in {'narrow', 'wide'}:
print('option sel='+sel+' is not defined.')
raise
df0, clist, tcor, cast19, cast25 = loadDataFRP_init(exp=exp)
zCasts=dict()
for nn in clist:
ip=np.argmax(cast25[nn].df['prSM'].values)
ilag=df0.loc[df0.Station==nn,'ishift_sub19'].values[0]
pS_pr=df0.loc[df0.Station==nn,'pS_pr'].values[0]
pE_pr=df0.loc[df0.Station==nn,'pE_pr'].values[0]
pS_tur=df0.loc[df0.Station==nn,'pStart25'].values[0]
pE_tur=df0.loc[df0.Station==nn,'pEnd25'].values[0]
if sel=='narrow':
pS=pS_tur
pE=pE_tur
prebin=False
elif sel=='wide':
pS=pS_pr
pE=pE_pr
prebin=True
pmax=cast25[nn].df.loc[ip,'prSM']
edges=np.arange(dp/2,pmax+dp,dp)
#edges=np.arange(0,pmax+dp,dp)
parDZ=.78
xmisDZ=.36
turbDZ=.67
pshiftdict={'gsw_ctA0':0.0,'gsw_srA0':0.0,'xmiss':xmisDZ,'seaTurbMtr':turbDZ,'par':parDZ,
'wetStar':0.0,'sbeox0ML_L':0.0}
dCast=pd.DataFrame()
uCast=pd.DataFrame()
for var in ('gsw_ctA0','gsw_srA0','xmiss','par','wetStar','sbeox0ML_L'):
if not nn==14.2:
#downcast
inP=cast25[nn].df.loc[pS:ip]['prSM'].values-pshiftdict[var] # down p
inV=cast25[nn].df.loc[pS:ip][var].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
if var=='gsw_ctA0':
dCast=pd.DataFrame(p,columns=['prSM'])
dCast[var]=out
else:# special case where there is no downcast
if var=='gsw_ctA0':
dCast=pd.DataFrame(np.nan*np.ones(10),columns=['prSM'])
dCast[var]=np.nan*np.ones(10)
if not nn==14.1:
#upcast
inP=cast25[nn].df.loc[ip:pE]['prSM'].values-pshiftdict[var] # down p
inV=cast25[nn].df.loc[ip:pE][var].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
if var=='gsw_ctA0':
uCast=pd.DataFrame(p,columns=['prSM'])
uCast[var]=out
else:# special case where there is no upcast
if var=='gsw_ctA0':
uCast=pd.DataFrame(np.nan*np.ones(10),columns=['prSM'])
uCast[var]=np.nan*np.ones(10)
if not nn==14.2:
#turbidity downcast
inP=cast25[nn].df.loc[pS:ip]['prSM'].values-turbDZ # down p
inV0=cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['seaTurbMtr'].values # down var
if sel=='wide':
# additional QC for broader data selection
ii1=amp(rolling_window_padded(inV0,5),-1)>.5*np.nanmax(inV0)
# get rid of near-zero turbidity values; seem to be dropped signal
ii2=np.nanmin(rolling_window_padded(inV0,5),-1)<.3
inV0[np.logical_or(ii1,ii2)]=np.nan
inV=ssig.medfilt(inV0,3) # down var
if sel=='wide': # exclude above surface data
with np.errstate(invalid='ignore'):
inV[inP<.1]=np.nan
p, tur=bindepth(inP,inV,edges,prebin=prebin)
dCast['turb']=tur*1.0/tcor
else: # special case where there is no downcast
dCast['turb']=np.nan*np.ones(10)
if not nn==14.1:
#turbidity upcast
inP=cast25[nn].df.loc[ip:pE]['prSM'].values-turbDZ # up p
inV0=cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['seaTurbMtr'].values # up var
if sel=='wide':
# additional QC for broader data selection
ii1=amp(rolling_window_padded(inV0,5),-1)>.5*np.nanmax(inV0)
# get rid of near-zero turbidity values; seem to be dropped signal
ii2=np.nanmin(rolling_window_padded(inV0,5),-1)<.3
inV0[np.logical_or(ii1,ii2)]=np.nan
inV=ssig.medfilt(inV0,3) # down var
if sel=='wide': # exclude above surface data
with np.errstate(invalid='ignore'):
inV[inP<.1]=np.nan
p, tur=bindepth(inP,inV,edges,prebin=prebin)
uCast['turb']=tur*1.0/tcor
else: # special case where there is no upcasts
uCast['turb']=np.nan*np.ones(10)
zCasts[nn]=zCast(uCast,dCast)
# fix first 2 casts for which sb25 pump did not turn on. use sb19
if (exp=='exp1' or exp=='all'):
for nn in range(1,3):
uCast=zCasts[nn].uCast
dCast=zCasts[nn].dCast
ip=np.argmax(cast25[nn].df['prSM'].values)
ilag=df0.loc[df0.Station==nn,'ishift_sub19'].values[0]
pS_pr=df0.loc[df0.Station==nn,'pS_pr'].values[0]
pE_pr=df0.loc[df0.Station==nn,'pE_pr'].values[0]
pS_tur=df0.loc[df0.Station==nn,'pStart25'].values[0]
pE_tur=df0.loc[df0.Station==nn,'pEnd25'].values[0]
if sel=='narrow':
pS=pS_tur
pE=pE_tur
elif sel=='wide':
pS=pS_pr
pE=pE_pr
pmax=cast25[nn].df.loc[ip,'prSM']
edges=np.arange(dp/2,pmax+dp,dp)
#edges=np.arange(0,pmax+dp,dp)
##temperature
#downcast
inP=cast25[nn].df.loc[pS:ip]['prSM'].values # down p
inV=cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['gsw_ctA0'].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
dCast['gsw_ctA0']=out
#upcast
inP=cast25[nn].df.loc[ip:pE]['prSM'].values # up p
inV=cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['gsw_ctA0'].values # up var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
uCast['gsw_ctA0']=out
##sal
#downcast
inP=cast25[nn].df.loc[pS:ip]['prSM'].values # down p
inV=cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['gsw_srA0'].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
dCast['gsw_srA0']=out
#upcast
inP=cast25[nn].df.loc[ip:pE]['prSM'].values # up p
inV=cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['gsw_srA0'].values # up var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
uCast['gsw_srA0']=out
##xmiss: xmis25=1.14099414691*xmis19+-1.6910134322
#downcast
inP=cast25[nn].df.loc[pS:ip]['prSM'].values-xmisDZ # down p
inV=1.14099414691*cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['CStarTr0'].values-1.6910134322 # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
dCast['xmiss']=out
#upcast
inP=cast25[nn].df.loc[ip:pE]['prSM'].values-xmisDZ # up p
inV=1.14099414691*cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['CStarTr0'].values-1.6910134322 # up var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges,prebin=prebin)
uCast['xmiss']=out
uCast['wetStar']=np.nan
dCast['wetStar']=np.nan
uCast['sbeox0ML_L']=np.nan
dCast['sbeox0ML_L']=np.nan
zCasts[nn]=zCast(uCast,dCast)
return df0, zCasts
def loadDataFRP_raw(exp='all',sel='narrow',meshPath='/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc'):
import gsw # use to convert p to z
if exp not in {'exp1', 'exp2', 'exp3', 'all'}:
print('option exp='+exp+' is not defined.')
raise
if sel not in {'narrow', 'wide'}:
print('option sel='+sel+' is not defined.')
raise
df0, clist, tcor, cast19, cast25 = loadDataFRP_init(exp=exp)
zCasts=dict()
for nn in clist:
zCasts[nn]=rawCast()
ip=np.argmax(cast25[nn].df['prSM'].values)
ilag=df0.loc[df0.Station==nn,'ishift_sub19'].values[0]
pS_pr=df0.loc[df0.Station==nn,'pS_pr'].values[0]
pE_pr=df0.loc[df0.Station==nn,'pE_pr'].values[0]
pS_tur=df0.loc[df0.Station==nn,'pStart25'].values[0]
pE_tur=df0.loc[df0.Station==nn,'pEnd25'].values[0]
if sel=='narrow':
pS=pS_tur
pE=pE_tur
elif sel=='wide':
pS=pS_pr
pE=pE_pr
parDZ=.78
xmisDZ=.36
turbDZ=.67
zshiftdict={'gsw_ctA0':0.0,'gsw_srA0':0.0,'xmiss':xmisDZ,'seaTurbMtr':turbDZ,'par':parDZ,
'wetStar':0.0,'sbeox0ML_L':0.0}
for var in ('gsw_ctA0','gsw_srA0','xmiss','par','wetStar','sbeox0ML_L'):
if not nn==14.2:
#downcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[pS:ip]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-zshiftdict[var] # down z
inV=cast25[nn].df.loc[pS:ip][var].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].dCast[var]=dataPair(inP,inV)
else:# special case where there is no downcast
zCasts[nn].dCast[var]=dataPair(np.nan,np.nan)
if not nn==14.1:
#upcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[ip:pE]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-zshiftdict[var] # down z
inV=cast25[nn].df.loc[ip:pE][var].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].uCast[var]=dataPair(inP,inV)
else:# special case where there is no upcast
zCasts[nn].uCast[var]=dataPair(np.nan,np.nan)
if not nn==14.2:
#turbidity downcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[pS:ip]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-turbDZ # down z
inV0=cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['seaTurbMtr'].values # down var
if sel=='wide':
# additional QC for broader data selection
ii1=amp(rolling_window_padded(inV0,5),-1)>.5*np.nanmax(inV0)
# get rid of near-zero turbidity values; seem to be dropped signal
ii2=np.nanmin(rolling_window_padded(inV0,5),-1)<.3
inV0[np.logical_or(ii1,ii2)]=np.nan
inV=ssig.medfilt(inV0,3) # down var
if sel=='wide': # exclude above surface data
with np.errstate(invalid='ignore'):
inV[inP<.1]=np.nan
zCasts[nn].dCast['turb_uncor']=dataPair(inP,inV)
zCasts[nn].dCast['turb']=dataPair(inP,inV*1.0/tcor)
else: # special case where there is no downcast
zCasts[nn].dCast['turb_uncor']=dataPair(np.nan,np.nan)
zCasts[nn].dCast['turb']=dataPair(np.nan,np.nan)
if not nn==14.1:
#turbidity upcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[ip:pE]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-turbDZ # up z
inV0=cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['seaTurbMtr'].values # up var
if sel=='wide':
# additional QC for broader data selection
ii1=amp(rolling_window_padded(inV0,5),-1)>.5*np.nanmax(inV0)
# get rid of near-zero turbidity values; seem to be dropped signal
ii2=np.nanmin(rolling_window_padded(inV0,5),-1)<.3
inV0[np.logical_or(ii1,ii2)]=np.nan
inV=ssig.medfilt(inV0,3) # down var
if sel=='wide': # exclude above surface data
with np.errstate(invalid='ignore'):
inV[inP<.1]=np.nan
zCasts[nn].uCast['turb_uncor']=dataPair(inP,inV)
zCasts[nn].uCast['turb']=dataPair(inP,inV*1.0/tcor)
else: # special case where there is no upcasts
zCasts[nn].uCast['turb_uncor']=dataPair(np.nan,np.nan)
zCasts[nn].uCast['turb']=dataPair(np.nan,np.nan)
# fix first 2 casts for which sb25 pump did not turn on. use sb19
if (exp=='exp1' or exp=='all'):
for nn in range(1,3):
ip=np.argmax(cast25[nn].df['prSM'].values)
ilag=df0.loc[df0.Station==nn,'ishift_sub19'].values[0]
pS_pr=df0.loc[df0.Station==nn,'pS_pr'].values[0]
pE_pr=df0.loc[df0.Station==nn,'pE_pr'].values[0]
pS_tur=df0.loc[df0.Station==nn,'pStart25'].values[0]
pE_tur=df0.loc[df0.Station==nn,'pEnd25'].values[0]
if sel=='narrow':
pS=pS_tur
pE=pE_tur
elif sel=='wide':
pS=pS_pr
pE=pE_pr
##temperature
#downcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[pS:ip]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg']) # down z
inV=cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['gsw_ctA0'].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].dCast['gsw_ctA0']=dataPair(inP,inV)
#upcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[ip:pE]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg']) # up z
inV=cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['gsw_ctA0'].values # up var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].uCast['gsw_ctA0']=dataPair(inP,inV)
##sal
#downcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[pS:ip]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg']) # down z
inV=cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['gsw_srA0'].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].dCast['gsw_srA0']=dataPair(inP,inV)
#upcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[ip:pE]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg']) # up z
inV=cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['gsw_srA0'].values # up var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].uCast['gsw_srA0']=dataPair(inP,inV)
##xmiss: xmis25=1.14099414691*xmis19+-1.6910134322
#downcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[pS:ip]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-xmisDZ # down z
inV=1.14099414691*cast19[nn].df.loc[(pS+ilag):(ip+ilag)]['CStarTr0'].values-1.6910134322 # down var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].dCast['xmiss']=dataPair(inP,inV)
#upcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[ip:pE]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-xmisDZ # up p
inV=1.14099414691*cast19[nn].df.loc[(ip+ilag):(pE+ilag)]['CStarTr0'].values-1.6910134322 # up var
if sel=='wide':
inV[inP<.1]=np.nan
zCasts[nn].dCast['wetStar']=dataPair(np.nan,np.nan)
zCasts[nn].uCast['wetStar']=dataPair(np.nan,np.nan)
zCasts[nn].dCast['sbeox0ML_L']=dataPair(np.nan,np.nan)
zCasts[nn].uCast['sbeox0ML_L']=dataPair(np.nan,np.nan)
return df0, zCasts
def loadDataFRP_SSGrid(exp='all',sel='narrow',meshPath='/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702.nc'):
import gsw # only in this function; use to convert p to z
if exp not in {'exp1', 'exp2', 'all'}:
print('option exp='+exp+' is not defined.')
raise
if sel not in {'narrow', 'wide'}:
print('option sel='+sel+' is not defined.')
raise
df0, clist, tcor, cast19, cast25 = loadDataFRP_init(exp=exp)
# load mesh
mesh=nc.Dataset(meshPath,'r')
tmask=mesh.variables['tmask'][0,:,:,:]
gdept=mesh.variables['gdept_0'][0,:,:,:]
gdepw=mesh.variables['gdepw_0'][0,:,:,:]
nav_lat=mesh.variables['nav_lat'][:,:]
nav_lon=mesh.variables['nav_lon'][:,:]
mesh.close()
zCasts=dict()
for nn in clist:
ip=np.argmax(cast25[nn].df['prSM'].values)
ilag=df0.loc[df0.Station==nn,'ishift_sub19'].values[0]
pS_pr=df0.loc[df0.Station==nn,'pS_pr'].values[0]
pE_pr=df0.loc[df0.Station==nn,'pE_pr'].values[0]
pS_tur=df0.loc[df0.Station==nn,'pStart25'].values[0]
pE_tur=df0.loc[df0.Station==nn,'pEnd25'].values[0]
if sel=='narrow':
pS=pS_tur
pE=pE_tur
prebin=False
elif sel=='wide':
pS=pS_pr
pE=pE_pr
prebin=True
jj, ii=geo_tools.find_closest_model_point(df0.loc[df0.Station==nn]['LonDecDeg'].values[0],
df0.loc[df0.Station==nn]['LatDecDeg'].values[0], nav_lon, nav_lat)
zmax=-1*gsw.z_from_p(cast25[nn].df.loc[ip,'prSM'],
df0.loc[df0.Station==nn]['LatDecDeg'])
edges=gdepw[:,jj,ii]
targets=gdept[:,jj,ii]
edges=edges[edges<zmax]
targets=targets[:(len(edges)-1)]
parDZ=.78
xmisDZ=.36
turbDZ=.67
zshiftdict={'gsw_ctA0':0.0,'gsw_srA0':0.0,'xmiss':xmisDZ,'seaTurbMtr':turbDZ,'par':parDZ,
'wetStar':0.0,'sbeox0ML_L':0.0}
dCast=pd.DataFrame()
uCast=pd.DataFrame()
for var in ('gsw_ctA0','gsw_srA0','xmiss','par','wetStar','sbeox0ML_L'):
if not nn==14.2:
#downcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[pS:ip]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-zshiftdict[var] # down z
inV=cast25[nn].df.loc[pS:ip][var].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges=edges,targets=targets,prebin=prebin)
if var=='gsw_ctA0':
dCast=pd.DataFrame(p,columns=['depth_m'])
dCast['indk']=np.arange(0,len(p))
dCast[var]=out
else:# special case where there is no downcast
if var=='gsw_ctA0':
dCast=pd.DataFrame(np.nan*np.ones(10),columns=['depth_m'])
dCast['indk']=np.nan*np.ones(10)
dCast[var]=np.nan*np.ones(10)
if not nn==14.1:
#upcast
inP=-1*gsw.z_from_p(cast25[nn].df.loc[ip:pE]['prSM'].values,
df0.loc[df0.Station==nn]['LatDecDeg'])-zshiftdict[var] # down z
inV=cast25[nn].df.loc[ip:pE][var].values # down var
if sel=='wide':
inV[inP<.1]=np.nan
p, out=bindepth(inP,inV,edges=edges,targets=targets,prebin=prebin)
if var=='gsw_ctA0':
uCast= | pd.DataFrame(p,columns=['depth_m']) | pandas.DataFrame |
import datetime as dt
import streamlit as st
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import numerapi
import plotly.express as px
from utils import *
# setup backend
napi = numerapi.SignalsAPI()
leaderboard_df = pd.DataFrame(napi.get_leaderboard(limit = 10_000))
MODELS_TO_CHECK = leaderboard_df['username'].sort_values().to_list()
DEFAULT_MODELS = [
'kenfus',
'kenfus_t_500',
'kenfus_t_600',
'kenfus_t_700'
]
ROUNDS_TO_SHOW = 20
# setup website
st.set_page_config(page_title = 'Numerai Dashboard')
st.title('Numerai Dashboard')
st.write(
'''
The Numerai Tournament is where you build machine learning models on abstract financial data to predict the stock market. Your models can be staked with the NMR cryptocurrency to earn rewards based on performance.
'''
)
st.subheader('Motivation')
st.write(
'''
To decide which model is best, you need to compare them on different criteria. On the [official leaderboard](https://signals.numer.ai/tournament), this is difficult to do.
'''
)
st.header('Scoring')
st.write(
'''
You are primarily scored on the correlation `corr` between your predictions and the targets.
You are also scored on meta model contribution `mmc`. The higher the better.
'''
)
st.subheader('MMC')
st.write(
'''
Each user is incentivized to maximize their individual correlation score. But Numerai wants to maximize the meta model's correlation score, where the meta model is the stake weighted ensemble of all submissions.
Meta model contribution `mmc` is designed to bridge this gap. Whereas correlation rewards individual performance, `mmc` rewards contribution to the meta model's correlation or group performance.
'''
)
with st.sidebar:
st.header('Settings')
st.write('# Graphs')
hover_mode = st.checkbox('Detailed hover mode')
show_only_resolved_rounds = st.checkbox('Show only resolved rounds', value = False)
selected_models = st.multiselect(
'Select models for reputation analysis:',
MODELS_TO_CHECK,
DEFAULT_MODELS
)
st.write('# Returns')
cum_corr = st.checkbox('Cumulative returns')
mmc_multi = st.selectbox(
'Select multiplier for MMC',
[0.5, 1, 2, 3],
3
)
rep_dfs = []
for model in selected_models:
df_model_rank_rep = pd.DataFrame(napi.daily_model_performances(model))
df_model_rank_rep['model'] = model
#df_model_rank_rep.sort_values('')
rep_dfs.append(df_model_rank_rep)
rep_dfs = | pd.concat(rep_dfs) | pandas.concat |
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_solve_time = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_dvh_metrics = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
df_clinical_criteria = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
weights_list = []
weight_columns = []
# Iterate through each prediction in the list of prediction_names
for prediction in prediction_names:
# Make a dataloader that loads predicted dose distributions
prediction_paths = get_paths(f'{cs.prediction_dir}/{prediction}', ext='csv')
prediction_dose_loader = DataLoader(prediction_paths, mode_name='predicted_dose') # Set prediction loader
# Evaluate predictions and plans with respect to ground truth
dose_evaluator = EvaluateDose(patient_data_loader, prediction_dose_loader)
populate_error_dfs(dose_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
'Prediction')
# Make dataloader for plan dose distributions
for opt_name in optimizer_names:
print(opt_name)
# Get the paths of all optimized plans for prediction
cs.get_optimization_directories(prediction, opt_name)
weights_list, weight_columns = populate_weights_df(cs, weights_list)
populate_solve_time_df(cs, df_solve_time)
# Make data loader to load plan doses
plan_paths = get_paths(cs.plan_dose_from_pred_dir, ext='csv') # List of all plan dose paths
plan_dose_loader = DataLoader(plan_paths, mode_name='predicted_dose') # Set plan dose loader
plan_evaluator = EvaluateDose(patient_data_loader, plan_dose_loader) # Make evaluation object
# Ignore prediction name if no data exists, o/w populate DataFrames
if not patient_data_loader.file_paths_list:
print('No patient information was given to calculate metrics')
else:
# Evaluate prediction errors
populate_error_dfs(plan_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
opt_name)
# Clean up weights
weights_df = pd.DataFrame(weights_list, columns=weight_columns)
weights_df.set_index(['Objective', 'Structure', 'Patients', 'Dose_type', 'Prediction'], inplace=True)
weights_df = weights_df.unstack('Prediction')
# Save dose and DVH error DataFrames
df_dose_error.to_csv(consolidate_data_paths['dose'])
df_dvh_metrics.to_csv(consolidate_data_paths['dvh'])
df_clinical_criteria.to_csv(consolidate_data_paths['clinical_criteria'])
weights_df.to_csv(consolidate_data_paths['weights'])
df_solve_time.to_csv(consolidate_data_paths['solve_time'])
# Loads the DataFrames that contain consolidated data
df_dose_error = pd.read_csv(consolidate_data_paths['dose'], index_col=[0, 1])
df_dvh_metrics = pd.read_csv(consolidate_data_paths['dvh'], index_col=[0, 1, 2, 3])
df_clinical_criteria = pd.read_csv(consolidate_data_paths['clinical_criteria'], index_col=[0, 1, 2, 3])
df_ref_dvh_metrics = pd.read_csv(consolidate_data_paths['ref_dvh'], index_col=[0, 1, 2, 3], squeeze=True)
df_ref_dvh_metrics.index.set_names(df_dvh_metrics.index.names, inplace=True)
df_ref_clinical_criteria = pd.read_csv(consolidate_data_paths['ref_clinical_criteria'], index_col=[0, 1, 2, 3],
squeeze=True)
df_ref_clinical_criteria.index.set_names(df_clinical_criteria.index.names, inplace=True)
df_objective_data = | pd.read_csv(consolidate_data_paths['weights'], index_col=[0, 1, 2, 3], header=[0, 1]) | pandas.read_csv |
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sbn
import click
# define functions
def getUnique(df):
"""Calcualtes percentage of unique reads"""
return (
df.loc[df[0] == "total_nodups", 1].values[0]
/ df.loc[df[0] == "total_mapped", 1].values[0]
)
def getCisTrans(df):
"""Calcualtes percentage of cis and trans chromosomal reads"""
cis = (
df.loc[df[0] == "cis", 1].values[0]
/ df.loc[df[0] == "total_nodups", 1].values[0]
)
trans = (
df.loc[df[0] == "trans", 1].values[0]
/ df.loc[df[0] == "total_nodups", 1].values[0]
)
return {"cis": cis, "trans": trans}
def getCisDist(df):
"""Calcualtes percentage of reads at certain distance"""
result = {}
for dist in [
"cis_1kb+",
"cis_2kb+",
"cis_4kb+",
"cis_10kb+",
"cis_20kb+",
"cis_40kb+",
]:
result[dist] = (
df.loc[df[0] == dist, 1].values[0]
/ df.loc[df[0] == "pair_types/UU", 1].values[0]
)
return result
@click.command()
@click.option("--inputdir", help="Directory with the stats files.")
@click.option("--resultsdir", help="Directory for the Results")
def qc1(inputdir, resultsdir):
# set wd
os.chdir(inputdir)
# load in stats
stats = {
i.split("_")[0]: pd.read_csv(i, sep="\t", header=None)
for i in os.listdir()
if "stats" in i
}
# get unique reads
unique = {barcode: getUnique(frame) for barcode, frame in stats.items()}
uniqueF = pd.DataFrame(unique, index=[0])
uniqueMolt = | pd.melt(uniqueF) | pandas.melt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 13 11:22:34 2022
@author: mariaolaru
"""
import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
import scipy.stats as stat
import xarray as xr
from matplotlib import pyplot as plt
import math
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
from datetime import date
import datetime
def get_files(data_dir):
files = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
if '.DS_Store' in files:
i = files.index('.DS_Store')
del files[i]
return files
def make_dir(fp, dirname):
path = os.path.join(fp, dirname)
if not os.path.isdir(path):
os.mkdir(path)
return path
def preproc_data(df, label, file=None):
if label == 'pkg':
df_out = df[df['Off_Wrist'] == 0]
col_keep = ['Date_Time', 'BK', 'DK', 'Tremor_Score']
df_out = df_out.loc[:, col_keep]
df_out = df_out.rename(columns={'Date_Time': 'pkg_dt',
'BK': 'pkg_bk',
'DK': 'pkg_dk',
'Tremor_Score': 'pkg_tremor'})
df_out['pkg_bk'] = df_out['pkg_bk']*-1
elif label == 'apple':
df_out = df.rename(columns={'time': 'timestamp'})
if 'tremor' in file:
df_out = df_out.rename(columns={'probability': 'apple_tremor'})
if 'dyskinesia' in file:
df_out = df_out.rename(columns={'probability': 'apple_dk'})
return df_out
def merge_targets(dfp, df_out):
if df_out.empty:
return dfp
df_merged = df_out.merge(dfp, how='outer')
return df_merged
def print_loop(i, num, message, file):
print('(' + str(i+1) + '/' + str(num) + ') ' + message + ': ', file)
def preproc_files(data_dir):
files = get_files(data_dir)
nfiles = len(files)
df_pkg = pd.DataFrame([])
df_apple = pd.DataFrame([])
for i in range(nfiles):
file = files[i]
print_loop(i, nfiles, 'preprocessing file', file)
file_fp = os.path.join(data_dir, file)
df = pd.read_csv(file_fp)
if 'BK' in df.columns: #pkg data
dfp = preproc_data(df, 'pkg')
df_pkg = merge_targets(dfp, df_pkg)
pkg_dir = make_dir(data_dir, 'orig_pkg')
os.replace(file_fp, os.path.join(pkg_dir, file))
if 'time' in df.columns: #apple data
dfp = preproc_data(df, 'apple', file)
df_apple = merge_targets(dfp, df_apple)
apple_dir = make_dir(data_dir, 'orig_apple')
os.replace(file_fp, os.path.join(apple_dir, file))
if not df_pkg.empty:
out_pkg_file = 'pkg_2min_scores.csv'
df_pkg.to_csv(os.path.join(data_dir, out_pkg_file), index=False)
if not df_apple.empty:
out_apple_file = 'apple_1min_scores.csv'
df_apple.to_csv(os.path.join(data_dir, out_apple_file), index=False)
def pivot_df(df):
#assumes values of pivot table are in column #1 and columns are column #0 & #2
dfp = df.pivot_table(index = df.index,
values = [df.columns[1]],
columns = [df.columns[0], df.columns[2]])
dfp.columns = ['_'.join(map(str, col)) for col in dfp.columns]
return dfp
def average_2min_scores(df_psd):
#find indices of timestamp on even minutes
s = pd.Series(df_psd.index.minute % 2 == 1)
odd_i = s[s].index.values
odd_prev_i = odd_i-1
diff = (df_psd.index[odd_i] - df_psd.index[odd_prev_i]).astype('timedelta64[m]')
s = | pd.Series(diff == 1) | pandas.Series |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[ | pd.Timestamp("2011-01-01") | pandas.Timestamp |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": | pd.Int64Dtype() | pandas.Int64Dtype |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Representation Probing """
from typing import Optional, Tuple, Union
from absl import app
from absl import logging
from datasets import DatasetDict
from datasets import load_from_disk
from einops import rearrange
from einops import repeat
import jax
import jax.numpy as jnp
import jax.random as jr
import numpy as np
import pandas as pd
from pandas import DataFrame
import toolz.curried as T
from tqdm import trange
import tree
from probing._src.configurable import configurable
from probing._src.constants import COLORS
from probing.representations import data
from probing.representations import models
@configurable
def repr_probing( # pytype: disable=annotation-type-mismatch
repr_ds: Optional[str] = None,
preds_path: Optional[str] = None,
results_path: Optional[str] = None,
seed: int = 12345,
nb_seeds: int = 5,
nb_points: int = 10,
batch_size: int = 64,
n_training_steps: int = 4000,
max_parallel: int = -1,
log_freq: int = 0,
max_batch_size: int = 1024,
ds_fits_in_vram: bool = True,
learning_rate: float = 1e-4,
hidden_sizes: Tuple[int] = (512, 512),
validation_split: str = 'validation',
) -> Tuple[DataFrame, DataFrame]:
"""Run representation probing.
Depending on the representation size, we may need to do jobs in smaller
batches.
Args:
seed: Random seed
nb_seeds: Number of random seeds per point
nb_points: Number of point to run along the curve
batch_size: Batch size for each model.
n_training_steps: Number of training steps.
max_parallel: Maximum number of models that can be trained in parallel.
log_freq: Logging frequency
max_batch_size: Maximum batch size to use during evaluation.
learning_rate: Learning rate
hidden_sizes: Size of each hidden layer.
repr_dataset: Directory containing a hf dataset with representations.
preds: path to store predictions
results: path to store results in
ds_fits_in_vram: predicate indicating if the dataset fits in VRAM. This
should only be set as a last resort, max_parallel is much faster.
validation_split: split to use for calculating validation metrics. this
should be `validattion` or `test`.
"""
if not isinstance(repr_ds, DatasetDict):
repr_ds = load_from_disk(repr_ds)
if validation_split == 'train':
raise ValueError(
'validation split cannot be train, choose one of "validation" or "test".'
)
if validation_split == "test":
logging.warning('received validation_split="test".')
jobs = data.generate_jobs(
repr_ds['train'],
nb_seeds=nb_seeds,
nb_points=nb_points,
seed=seed,
)
# configure chex compile assertions
chex_expect_num_compile = 1
if len(jobs) % max_parallel != 0:
logging.warning(
'the # of jobs (%d) should be divisible by max_parallel (%d), otherwise'
'jax will have to recompile every step for the last set of models.',
len(jobs), max_parallel)
chex_expect_num_compile = 2
val_ds = repr_ds[validation_split]
# Create RNGs
# Initialise the model's parameters and the optimiser's state.
# each initialization uses a different rng.
n_models = len(jobs)
rng = jr.PRNGKey(seed)
rngs = jr.split(rng, n_models)
rngs, init_rngs, data_rngs = zip(*[jr.split(rng, 3) for rng in rngs])
train_ds = repr_ds['train']
val_ds = repr_ds['test']
# build models.
# Depending on the representation size, we may need to do jobs in smaller
# batches, however, we will maintain the same functions throughout.
# only the parameter sets need get reset.
input_shape = np.shape(train_ds[0]['hidden_states'])
n_classes = len(train_ds[0]['label'])
init_fn, update_fn, metrics_fn = models.build_models(
input_shape,
hidden_sizes,
batch_size=batch_size,
n_classes=n_classes,
learning_rate=learning_rate)
# create train iter
train_iter = data.jax_multi_iterator(
train_ds,
batch_size,
ds_fits_in_vram=ds_fits_in_vram,
max_traces=chex_expect_num_compile,
)
# add vmaps
update_fn = jax.vmap(update_fn)
# validation function uses the same data for all models.
valid_fn = jax.vmap(metrics_fn, in_axes=(0, None))
evaluate = models.evaluate(valid_fn, val_ds, max_batch_size)
# Create inner loop --->
inner_loop = _repr_curve_inner(train_iter, init_fn, update_fn, evaluate,
log_freq, n_training_steps)
# zip up the rngs into jobs and partition s.t. < max_parallel
inner_jobs = list(zip(jobs, rngs, init_rngs, data_rngs))
if max_parallel > 0:
inner_jobs = T.partition_all(max_parallel, inner_jobs)
else:
inner_jobs = [inner_jobs]
records, preds = zip(*T.map(inner_loop, inner_jobs))
df = _format_predictions(val_ds, preds, jobs)
# store results
results = _generate_results(records)
df_result = pd.DataFrame.from_records(results)
# maybe save to files
if preds_path:
df.to_csv(preds_path, index=False)
if results_path:
df_result.to_csv(results_path, index=False)
return df, df_result
@T.curry
def _repr_curve_inner(
train_iter,
init_fn,
update_fn,
evaluate,
log_freq,
n_training_steps,
jobset,
):
# unpack the jobset
jobs, rngs, init_rngs, data_rngs = zip(*jobset)
# initialize
params, opt_state = init_fn(init_rngs)
train_iter = train_iter(jobs)
rngs = jnp.asarray(rngs)
# log_freq = 400
# batch: <num_models, batch_size, *input_shape>
train_pbar = trange(int(n_training_steps), desc='Training')
for step in train_pbar:
batch = next(train_iter)
params, opt_state, up_metrics, rngs = update_fn(params, rngs, opt_state,
batch)
if log_freq > 0 and (step + 1) % log_freq == 0:
val_metrics = evaluate(params)
train_pbar.set_postfix(
avg_loss=jnp.mean(up_metrics['loss']),
# best average jsd.
val_jsd=jnp.min(jnp.mean(val_metrics['jensenshannon_div'], axis=-1)),
)
# validation
val_metrics = evaluate(params)
# avg across examples
per_job_val_metrics = tree.map_structure(lambda x: jnp.mean(x, axis=1),
val_metrics)
results = _metrics_to_results(per_job_val_metrics, jobs, n_training_steps)
results = list(results)
return results, val_metrics
def _generate_results(records):
"""" Generates and saves results. """
results = [r for resset in records for r in resset]
return results
def _metrics_to_results(metrics, jobs, t):
def make_result(item):
i, job = item
res = tree.map_structure(lambda x: x[i], metrics)
res.update({
'seed': job['seed'],
'objects': job['num_objects'],
'samples': job['samples'],
't': t,
})
return res
return T.map(make_result, enumerate(jobs))
def _format_predictions(ds, results, jobs) -> pd.DataFrame:
""" Save predictions to a CSV
cols:
example_idx,object_idx,preds,seed,samples
Args:
ds: Dataset peds were made on
preds <n_jobs,n_examples,n_classes>: Predictions for all jobs.
jobs <n_jobs,2>: List of tuples counting <seed,point>
"""
# chex.assert_equal_shape([ds.labels, preds[0]])
# we may have done the work in jobsets, stack preds
# preds = jnp.concatenate(preds)
results = tree.map_structure(lambda *x: jnp.concatenate(x), *results)
# shapes = tree.map_structure(lambda x: x.shape, results)
results['class_id'] = repeat(np.asarray(ds['class_id']),
'num_ex -> n num_ex',
n=len(jobs))
results['template_idx'] = repeat(np.asarray(ds['template_idx']),
'num_ex -> n num_ex',
n=len(jobs))
# add job metadata
for k in ('samples', 'num_objects', 'seed'):
results[k] = repeat(np.asarray([x[k] for x in jobs]),
'n -> n num_ex',
num_ex=len(ds))
# add colors
results['objects'] = results.pop('num_objects')
preds = results.pop('preds')
for i, color in enumerate(COLORS):
results[color] = preds[:, :, i]
# should now be all <n_jobs,n_examples>
# ic(tree.map_structure(lambda x: x.shape, results))
# flatten all
results = tree.map_structure(
lambda x: rearrange(x, 'n_jobs nex -> (n_jobs nex)'), results)
# -> csv
df = | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import string
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.common_metadata import make_meta, store_schema_metadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.naming import DEFAULT_METADATA_VERSION
from kartothek.io_components.metapartition import (
MetaPartition,
_unique_label,
parse_input_to_metapartition,
partition_labels_from_mps,
)
from kartothek.serialization import DataFrameSerializer, ParquetSerializer
def test_store_single_dataframe_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
assert len(meta_partition.data) == 0
expected_key = "dataset_uuid/core/test_label.parquet"
assert meta_partition.files == {"core": expected_key}
assert meta_partition.label == "test_label"
files_in_store = list(store.keys())
expected_num_files = 1
assert len(files_in_store) == expected_num_files
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_key)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_key)
assert len(files_in_store) == expected_num_files - 1
def test_store_single_dataframe_as_partition_no_metadata(store, metadata_version):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=False,
)
assert len(partition.data) == 0
expected_file = "dataset_uuid/core/test_label.parquet"
assert partition.files == {"core": expected_file}
assert partition.label == "test_label"
# One meta one actual file
files_in_store = list(store.keys())
assert len(files_in_store) == 1
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
def test_load_dataframe_logical_conjunction(
store, meta_partitions_files_only, metadata_version, metadata_storage_format
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="cluster_1",
data={"core": df},
metadata_version=metadata_version,
logical_conjunction=[("P", ">", 4)],
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
predicates = None
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 7, 8, 9], "L": [5, 6, 7, 8, 9], "TARGET": [15, 16, 17, 18, 19]}
).set_index(np.arange(5, 10))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 6), ("TARGET", "<", 18)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame({"P": [7], "L": [7], "TARGET": [17]}).set_index(
np.array([7])
)
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 2), ("TARGET", "<", 17)], [("TARGET", "==", 19)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 9], "L": [5, 6, 9], "TARGET": [15, 16, 19]}
).set_index(np.array([5, 6, 9]))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
def test_store_multiple_dataframes_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_2 = pd.DataFrame({"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]})
mp = MetaPartition(
label="cluster_1",
data={"core": df, "helper": df_2},
metadata_version=metadata_version,
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
expected_file = "dataset_uuid/core/cluster_1.parquet"
expected_file_helper = "dataset_uuid/helper/cluster_1.parquet"
assert meta_partition.files == {
"core": expected_file,
"helper": expected_file_helper,
}
assert meta_partition.label == "cluster_1"
files_in_store = list(store.keys())
assert len(files_in_store) == 2
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_file)
stored_df = DataFrameSerializer.restore_dataframe(
store=store, key=expected_file_helper
)
pdt.assert_frame_equal(df_2, stored_df)
files_in_store.remove(expected_file_helper)
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_load_dataframes(
meta_partitions_files_only, store_session, predicate_pushdown_to_io
):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
expected_df_2 = pd.DataFrame(OrderedDict([("P", [1]), ("info", ["a"])]))
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert len(mp.data) == 2
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
pdt.assert_frame_equal(data["helper"], expected_df_2, check_dtype=False)
empty_mp = MetaPartition("empty_mp", metadata_version=mp.metadata_version)
empty_mp.load_dataframes(
store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert empty_mp.data == {}
def test_remove_dataframes(meta_partitions_files_only, store_session):
mp = meta_partitions_files_only[0].load_dataframes(store=store_session)
assert len(mp.data) == 2
mp = mp.remove_dataframes()
assert mp.data == {}
def test_load_dataframes_selective(meta_partitions_files_only, store_session):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, tables=["core"]
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
def test_load_dataframes_columns_projection(
meta_partitions_evaluation_files_only, store_session
):
expected_df = pd.DataFrame(OrderedDict([("P", [1]), ("L", [1]), ("HORIZON", [1])]))
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session, tables=["PRED"], columns={"PRED": ["P", "L", "HORIZON"]}
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["PRED"], expected_df, check_dtype=False)
def test_load_dataframes_columns_raises_missing(
meta_partitions_evaluation_files_only, store_session
):
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(ValueError) as e:
meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session,
tables=["PRED"],
columns={"PRED": ["P", "L", "HORIZON", "foo", "bar"]},
)
assert str(e.value) == "Columns cannot be found in stored dataframe: bar, foo"
def test_load_dataframes_columns_table_missing(
meta_partitions_evaluation_files_only, store_session
):
# test behavior of load_dataframes for columns argument given
# specifying table that doesn't exist
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(
ValueError,
match=r"You are trying to read columns from invalid table\(s\). .*PRED_typo.*",
):
mp.load_dataframes(
store=store_session,
columns={"PRED_typo": ["P", "L", "HORIZON", "foo", "bar"]},
)
# ensure typo in tables argument doesn't raise, as specified in docstring
dfs = mp.load_dataframes(store=store_session, tables=["PRED_typo"])
assert len(dfs) > 0
def test_from_dict():
df = pd.DataFrame({"a": [1]})
dct = {"data": {"core": df}, "label": "test_label"}
meta_partition = MetaPartition.from_dict(dct)
pdt.assert_frame_equal(meta_partition.data["core"], df)
assert meta_partition.metadata_version == DEFAULT_METADATA_VERSION
def test_eq():
df = pd.DataFrame({"a": [1]})
df_same = pd.DataFrame({"a": [1]})
df_other = | pd.DataFrame({"a": [2]}) | pandas.DataFrame |
"""
author: <NAME> & <NAME>
Implementation of the climate data-utils for our training framework (i.e. on
synthetic SDE data).
This is mainly a copy of the data_utils.py file from the official implementation
of GRU-ODE-Bayes: https://github.com/edebrouwer/gru_ode_bayes.
"""
import torch
import pandas as pd
import numpy as np
import math
from torch.utils.data import Dataset, DataLoader
from scipy import special
class ODE_DatasetNumpy(Dataset):
"""Dataset class for ODE type of data. Fed from numpy arrays.
Args:
times array of times
ids ids (ints) of patients (samples)
values value matrix, each line is one observation
masks observation mask (1.0 means observed, 0.0 missing)
"""
def __init__(self, times, ids, values, masks):
assert times.shape[0] == ids.shape[0]
assert times.shape[0] == values.shape[0]
assert values.shape == masks.shape
times = times.astype(np.float32)
values = values.astype(np.float32)
masks = masks.astype(np.float32)
df_values = pd.DataFrame(values, columns=[f"Value_{i}" for i in range(values.shape[1])])
df_masks = pd.DataFrame(masks, columns=[f"Mask_{i}" for i in range(masks.shape[1])])
self.df = pd.concat([
pd.Series(times, name="Time"),
pd.Series(ids, name="ID"),
df_values,
df_masks,
], axis=1)
self.df.sort_values("Time", inplace=True)
self.length = self.df["ID"].nunique()
self.df.set_index("ID", inplace=True)
def __len__(self):
return self.length
def __getitem__(self, idx):
subset = self.df.loc[idx]
covs = self.df.loc[idx,"Time"] # To change !! TODO: this does not return cov from data
## returning also idx to allow empty samples
return {"idx": idx, "y": 0, "path": subset, "cov": covs }
class ODE_Dataset(Dataset):
"""
Dataset class for ODE type of data. With 2 values.
Can be fed with either a csv file containg the dataframe or directly with a panda dataframe.
One can further provide samples idx that will be used (for training / validation split purposes.)
"""
def __init__(self, csv_file=None, cov_file=None, label_file=None, panda_df=None, cov_df=None, label_df=None, root_dir="./", t_mult=1.0, idx=None, jitter_time=0, validation = False, val_options = None):
"""
Args:
csv_file CSV file to load the dataset from
panda_df alternatively use pandas df instead of CSV file
root_dir directory of the CSV file
t_mult multiplier for time values (1.0 default)
jitter_time jitter size (0 means no jitter), to add randomly to Time.
Jitter is added before multiplying with t_mult
validation boolean. True if this dataset is for validation purposes
val_options dictionnary with validation dataset options.
T_val : Time after which observations are considered as test samples
max_val_samples : maximum number of test observations per trajectory.
"""
self.validation = validation
if panda_df is not None:
assert (csv_file is None), "Only one feeding option should be provided, not both"
self.df = panda_df
self.cov_df = cov_df
self.label_df = label_df
else:
assert (csv_file is not None) , "At least one feeding option required !"
self.df = pd.read_csv(root_dir + "/" + csv_file)
assert self.df.columns[0]=="ID"
if label_file is None:
self.label_df = None
else:
self.label_df = | pd.read_csv(root_dir + "/" + label_file) | pandas.read_csv |
#!/usr/bin/env python3
"""Module to download cryptocurrency ohlc data"""
import time
import datetime as dt
import logging
import json
import requests
import pandas as pd
def from_datetime_to_unix(date):
'''in: datetime, out: unix_timestamp'''
return int(time.mktime(date.timetuple()))
def from_unix_to_date(date):
'''in: unix_timestamp, out: datetime'''
value = dt.datetime.fromtimestamp(date)
return value.date()
def str_to_datetime(time_str):
'''in: str, out: datetime'''
return dt.datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
def columns_to_upper_case(df_ohlc):
'''in : df, out : df, Makes all columns of df start with capital letter'''
columns = list(df_ohlc.columns)
for column in columns:
if column[0].isupper():
pass
else:
tmp_column_name = column[0].upper() + column[1:]
df_ohlc.rename(index=str, columns={column: tmp_column_name}, inplace=True)
return df_ohlc
def correct_ohlc_df(df_ohlc, frequency=None, cols_to_drop=None):
"""Function to modify df to the required format, checking for wrong entries
and filling nans
Args:
df_ohlc (DataFrame):
Close, Open, Low, High and Volume columns are necessary
frequency (str):
Resample frequency 'D', 'W', 'M', if None - do not resample data
cols_to_drop (list):
names of unnecessary columns in df
Returns:
df (DataFrame):
Repaired df
"""
if cols_to_drop is None:
cols_to_drop = []
if str(type(df_ohlc.index[0])) == "<class 'pandas._libs.tslib.Timestamp'>":
pass
else:
df_ohlc.index = pd.to_datetime(df_ohlc.index)
# resampling data if needed
if frequency is not None:
df_ohlc = df_ohlc.resample(frequency).agg({
'Close': 'last',
'High': 'max',
'Low': 'min',
'Open': 'first',
'Volume': 'sum',
})
df_before_correction = df_ohlc
# make ohlc right
count_of_ohlc_mistakes = 0
for index, row in df_ohlc.iterrows():
if row['Low'] > min(row['Close'], row['Open'], row['High']):
df_ohlc.loc[index, 'Low'] = min(row['Close'], row['Open'], row['High']) * 0.999
count_of_ohlc_mistakes += 1
if row['High'] < max(row['Close'], row['Open'], row['Low']):
df_ohlc.loc[index, 'High'] = max(row['Close'], row['Open'], row['Low']) * 1.001
count_of_ohlc_mistakes += 1
if row['Volume'] < 0:
df_ohlc.loc[index, 'Volume'] = abs(row['Volume'])
count_of_ohlc_mistakes += 1
# delete duplicates
logging.debug('Duplicates found: %s', len(df_ohlc[df_ohlc.index.duplicated()]))
df_ohlc = df_ohlc[~df_ohlc.index.duplicated()]
df_ohlc.fillna(method='ffill', inplace=True)
logging.debug('Missed candles added: %s', len(df_ohlc) - len(df_before_correction))
return df_ohlc
def get_ohlc_cryptocompare_once(first_ticker, second_ticker, end_date=dt.datetime.now(),
aggregate=1, interval_key='day'):
""" Retrieve limited bulk of ohlc cryptocurrency data from Cryptocompare.
Args:
first_ticker (str):
Crypto symbol(BTC).
second_ticker (str):
Crypto symbol(USD).
aggregate (int):
How many points should be made into one
interval_key (str):
Time interval of data points
end_date (datetime):
Last moment in ohlc data
Returns:
df_ohlc (pandas.DataFrame):
DF containing the opening price, high price, low price,
closing price, and volume.
Note:
Data is limited(only 2000 point of data will be given)
"""
limit = 2000
df_ohlc = pd.DataFrame()
interval_dict = {'minute': 'histominute', 'hour': 'histohour', 'day': 'histoday'}
freq_dict = {'minute': '1M', 'hour': '1H', 'day': '1D'}
end_date_unix = from_datetime_to_unix(end_date)
url = 'https://min-api.cryptocompare.com/data/{}'.format(interval_dict[interval_key]) +\
'?fsym={}'.format(first_ticker) +\
'&tsym={}'.format(second_ticker) +\
'&limit={}'.format(limit) +\
'&aggregate={}'.format(aggregate) +\
'&toTs={}'.format(str(end_date_unix))
response = requests.get(url)
resp_dict = json.loads(response.text)
# parsing response dict to pieces
if resp_dict["Response"] == "Success":
data = resp_dict['Data']
df_ohlc = pd.DataFrame(data)
df_ohlc = columns_to_upper_case(df_ohlc)
df_ohlc['Date'] = [dt.datetime.fromtimestamp(d) for d in df_ohlc.Time]
df_ohlc['Volume'] = [v for v in df_ohlc.Volumeto]
df_ohlc.set_index('Date', inplace=True)
df_ohlc.index.name = 'Date'
df_ohlc = correct_ohlc_df(df_ohlc, freq_dict[interval_key])
elif resp_dict["Response"] == "Error":
logging.error("There was an error in response from cryptocompare: %s", resp_dict)
else:
logging.error("Unknown response from cryptocompare: %s", resp_dict)
return df_ohlc
def get_ohlc_cryptocompare(first_ticker, second_ticker, start_date,
end_date=dt.datetime.now(), **kwargs):
""" Retrieves ohlc cryptocurrency data from Cryptocompare.
Args:
first_ticker (str):
Crypto symbol(BTC).
second_ticker (str):
Crypto symbol(USD).
start_date (datetime):
First moment in ohlc data
end_date (datetime):
Optional.Last moment in ohlc data
aggregate (int):
Optional.How many points should be made into one
interval_key (str):
Optional.Time interval of data points
Returns:
df_total (pandas.DataFrame):
DF containing the opening price, high price, low price,
closing price, and volume.
Note:
This this loop for get_ohlc_cryptocompare_once
"""
freq_dict = {'minute': '1M', 'hour': '1H', 'day': '1D'}
df_total = get_ohlc_cryptocompare_once(first_ticker, second_ticker,
end_date=end_date, **kwargs)
new_start_date = df_total.index.min()
while new_start_date > start_date:
df_tmp = get_ohlc_cryptocompare_once(first_ticker, second_ticker,
end_date=new_start_date, **kwargs)
new_start_date = df_tmp.index.min()
frames = [df_tmp, df_total]
df_total = | pd.concat(frames) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 09:59:23 2021
@author: <NAME>
Install Packages:
- pip install pandas
Draws a Gantt chart per trial
Each event is on a different y-tick
"""
# eNPHR 1.0 SS_6
#
from ganttChartDrawer import draw_group
import pandas as pd
import numpy as np
# Read and organize cleversys data
def read_cleversys(filename):
# Read csv files
df = | pd.read_excel('cleversys_events/Trial event export ' + filename + '.xlsx', skiprows=[0,1,2,3,4,5]) | pandas.read_excel |
"""
Name: shread.py
Author: <NAME>, Reclamation Technical Service Center
Description: Utilities for downloading and processing snow products
ADD CLASSES / FUNCTIONS DEFINED
ADD WHA
"""
import ftplib
import os
import tarfile
import gzip
from osgeo import gdal
import csv
import logging
import glob
from osgeo import osr
import zipfile
from osgeo import ogr
import fileinput
import datetime as dt
import configparser
import sys
import argparse
import urllib.request
import requests
from requests.auth import HTTPDigestAuth
import time
import geojson
import json
import rasterstats
from rasterstats import zonal_stats
import numpy as np
import pandas as pd
import geopandas as gpd
from lxml import etree
import fiona
import rasterio
from rasterio.warp import calculate_default_transform, reproject, Resampling
from pyproj import Transformer
import base64
import itertools
import ssl
import pytz
import xarray as xr
import rioxarray
from tzlocal import get_localzone
from getpass import getpass
try:
from urllib.parse import urlparse
from urllib.request import urlopen, Request, build_opener, HTTPCookieProcessor
from urllib.error import HTTPError, URLError
except ImportError:
from urlparse import urlparse
from urllib2 import urlopen, Request, HTTPError, URLError, build_opener, HTTPCookieProcessor
def main(config_path, start_date, end_date, time_int, prod_str):
"""SHREAD main function
Parameters
---------
config_path : string
relative file path to config file
start_date : string
first day of data in %Y%m%d format
end_date : string
last day of data in %Y%m%d format
time_int : string
Accepted: any Python 'freq' arguments
- D: day
- W: week
- M: month
- Y, YS: year
- SM, SMS
- twoweekstart
prod_str : string
comma separated list of products
Accepted:
- snodas
- srpt
- modscag
- modis # not yet supported
Returns
-------
None
Notes
-----
-i, --ini, config_path : INI file path
-s, --start : start date in %Y%m%d format
-e, --end : end date in %Y%m%d format
-t, --time : time interval
-p, --prod : product list
"""
# read config file
cfg = config_params()
cfg.read_config(config_path)
cfg.proc_config()
# develop date list
start_date = dt.datetime.strptime(start_date, '%Y%m%d')
end_date = dt.datetime.strptime(end_date, '%Y%m%d')
date_list = pd.date_range(start_date, end_date, freq=time_int).tolist()
# create list of products
prod_list = prod_str.split(',')
# download data
# snodas
if 'snodas' in prod_list:
for date_dn in date_list:
error_flag = False
try:
download_snodas(cfg, date_dn)
except:
logger.info("download_snodas: error downloading srpt for '{}'".format(date_dn))
error_flag = True
if error_flag is False:
try:
org_snodas(cfg, date_dn)
except:
logger.info("org_snodas: error processing snodas for '{}'".format(date_dn))
# srpt
if 'srpt' in prod_list:
for date_dn in date_list:
error_flag = False
try:
download_srpt(cfg, date_dn)
except:
logger.info("download_srpt: error downloading srpt for '{}'".format(date_dn))
error_flag = True
if error_flag is False:
try:
org_srpt(cfg, date_dn)
except:
logger.info("org_srpt: error processing srpt for '{}'".format(date_dn))
# modscag
if 'modscag' in prod_list:
for date_dn in date_list:
error_flag = False
try:
download_modscag(cfg, date_dn)
except:
logger.info("download_modscag: error downloading modscag for '{}'".format(date_dn))
error_flag = True
if error_flag is False:
try:
org_modscag(cfg, date_dn)
except:
logger.info("org_modscag: error processing modscag for '{}'".format(date_dn))
# moddrfs
if 'moddrfs' in prod_list:
for date_dn in date_list:
error_flag = False
try:
download_moddrfs(cfg, date_dn)
except:
logger.info("download_moddrfs: error downloading moddrfs for '{}'".format(date_dn))
error_flag = True
if error_flag is False:
try:
org_moddrfs(cfg, date_dn)
except:
logger.info("org_moddrfs: error processing moddrfs for '{}'".format(date_dn))
# modis
if 'modis' in prod_list:
for date_dn in date_list:
error_flag = False
try:
download_modis(cfg, date_dn)
except:
logger.info("download_modis: error downloading modis for '{}'".format(date_dn))
error_flag = True
# if error_flag is False:
# try:
# org_modis(cfg, date_dn)
# except:
# logger.info("org_modis: error processing modis for '{}'".format(date_dn))
# swann
if 'swann' in prod_list:
try:
batch_swann(cfg, date_list, time_int)
except:
logger.info("batch_swann: error downloading swann")
def parse_args():
parser = argparse.ArgumentParser(
description=' SHREAD',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-i', '--ini', metavar='PATH',
type=lambda x: is_valid_file(parser, x), help='Input file')
parser.add_argument(
'-s', '--start', metavar='start_date', help='start date')
parser.add_argument(
'-e', '--end', metavar='end_date', help='end date')
parser.add_argument(
'-t', '--time', metavar='time_interval', help='time interval')
parser.add_argument(
'-p', '--prod', metavar='product_list', help='product list')
args = parser.parse_args()
return args
def is_valid_file(parser, arg):
if not os.path.isfile(arg):
parser.error('The file {} does not exist!'.format(arg))
else:
return arg
logger = logging.getLogger(__name__)
class config_params:
"""config params container
Attributes
----------
"""
def __init__(self):
""" """
def __str__(self):
""" """
return '<config_params>'
def read_config(self, config_path):
"""Read and parse config file
Parameters
---------
config_path : string
relative file path to config file
Returns
-------
None
Notes
-----
"""
config = configparser.RawConfigParser()
error_flag = False
error_wd_sec_flag = False
error_earthdata_sec_flag = False
error_snodas_sec_flag = False
error_modis_sec_flag = False
error_nohrsc_sec_flag = False
error_jpl_sec_flag = False
error_swann_sec_flag = False
try:
config.read_file(open(config_path))
logger.info("read_config: reading config file '{}'".format(config_path))
except:
logger.error("read_config: config file could not be read, " +
"is not an input file, or does not exist")
error_flag = True
# check that all sections are present
wd_sec = "wd"
earthdata_sec = "earthdata"
snodas_sec = "snodas"
modis_sec = "modis"
nohrsc_sec = "nohrsc"
jpl_sec = "jpl"
swann_sec = "swann"
# ADD SECTIONS AS NEW SNOW PRODUCTS ARE ADDED
cfg_secs = config.sections()
# verify existence of common required sections
if wd_sec not in cfg_secs:
logger.error(
"read_config: config file missing [{}] section".format(wd_sec))
error_flag = True
error_wd_sec_flag = True
if earthdata_sec not in cfg_secs:
logger.error(
"read_config: config file missing [{}] section".format(earthdata_sec))
error_flag = True
error_earthdata_sec_flag = True
if snodas_sec not in cfg_secs:
logger.error(
"read_config: config file missing [{}] section".format(snodas_sec))
error_flag = True
error_snodas_sec_flag = True
if modis_sec not in cfg_secs:
logger.error(
"read_config: config file missing [{}] section".format(modis_sec))
error_flag = True
error_modis_sec_flag = True
if nohrsc_sec not in cfg_secs:
logger.error(
"read_config: config file missing [{}] section".format(nohrsc_sec))
error_flag = True
error_nohrsc_sec_flag = True
if jpl_sec not in cfg_secs:
logger.error(
"read_config: config file missing [{}] section".format(jpl_sec))
error_flag = True
error_jpl_sec_flag = True
# read file
# wd section
if error_wd_sec_flag == False:
logger.info("[wd]")
#- dir_work
try:
self.dir_work = config.get(wd_sec, "dir_work")
logger.info("read config: reading 'dir_work' {}".format(self.dir_work))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_work", wd_sec))
error_flag = True
#- dir_db
try:
self.dir_db = config.get(wd_sec, "dir_db")
logger.info("read config: reading 'dir_db' {}".format(self.dir_db))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_db", wd_sec))
error_flag = True
#- dir_arch
try:
self.dir_arch = config.get(wd_sec, "dir_arch")
logger.info("read config: reading 'dir_arch' {}".format(self.dir_arch))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_arch", wd_sec))
error_flag = True
#- arch_flag
try:
self.arch_flag = config.get(wd_sec, "arch_flag")
logger.info("read config: reading 'arch_flag' {}".format(self.arch_flag))
except:
logger.error("read_config: '{}' missing from [{}] section".format("arch_flag", wd_sec))
error_flag = True
#- proj
try:
self.proj = config.get(wd_sec, "proj")
logger.info("read config: reading 'proj' {}".format(self.proj))
except:
logger.error("read_config: '{}' missing from [{}] section".format("proj", wd_sec))
error_flag = True
#- null_value
try:
self.null_value = config.get(wd_sec, "null_value")
logger.info("read config: reading 'null_value' {}".format(self.null_value))
except:
logger.error("read_config: '{}' missing from [{}] section".format("null_value", wd_sec))
error_flag = True
self.null_value = int(cfg.null_value)
#- unit_sys
try:
self.unit_sys = config.get(wd_sec, "unit_sys")
logger.info("read config: reading 'unit_sys' {}".format(self.unit_sys))
except:
logger.error("read_config: '{}' missing from [{}] section".format("unit_sys", wd_sec))
error_flag = True
#- gdal_path
try:
self.gdal_path = config.get(wd_sec, "gdal_path")
logger.info("read config: reading 'gdal_path' {}".format(self.gdal_path))
except:
logger.error("read_config: '{}' missing from [{}] section".format("gdal_path", wd_sec))
error_flag = True
#- basin_poly_path
try:
self.basin_poly_path = config.get(wd_sec, "basin_poly_path")
logger.info("read config: reading 'basin_poly_path' {}".format(self.basin_poly_path))
except:
logger.error("read_config: '{}' missing from [{}] section".format("basin_poly_path", wd_sec))
error_flag = True
#- basin_points_path
try:
self.basin_points_path = config.get(wd_sec, "basin_points_path")
logger.info("read config: reading 'basin_points_path' {}".format(self.basin_points_path))
except:
logger.error("read_config: '{}' missing from [{}] section".format("basin_points_path", wd_sec))
error_flag = True
#- outut_type
try:
self.output_type = config.get(wd_sec, "output_type")
logger.info("read config: reading 'output_type' {}".format(self.output_type))
except:
logger.error("read_config: '{}' missing from [{}] section".format("output_type", wd_sec))
error_flag = True
#- output_format
try:
self.output_format = config.get(wd_sec, "output_format")
logger.info("read config: reading 'output_format' {}".format(self.output_format))
except:
logger.error("read_config: '{}' missing from [{}] section".format("output_format", wd_sec))
error_flag = True
# earthdata section
logger.info("[earthdata]")
if error_earthdata_sec_flag == False:
#- host_nohrsc
try:
self.username_earthdata = config.get(earthdata_sec, "username_earthdata")
logger.info("read config: reading 'username_earthdata' {}".format(self.username_earthdata))
except:
logger.error("read_config: '{}' missing from [{}] section".format("username_earthdata", earthdata_sec))
error_flag = True
#- password_earthdata
try:
self.password_earthdata = config.get(earthdata_sec, "password_earthdata")
logger.info("read config: reading 'password_earthdata'")
except:
logger.error("read_config: '{}' missing from [{}] section".format("password_earthdata", earthdata_sec))
error_flag = True
# snodas section
logger.info("[snodas]")
if error_snodas_sec_flag == False:
#- host_snodas
try:
self.host_snodas = config.get(snodas_sec, "host_snodas")
logger.info("read config: reading 'host_snodas' {}".format(self.host_snodas))
except:
logger.error("read_config: '{}' missing from [{}] section".format("host_snodas", snodas_sec))
error_flag = True
#- username_snodas
try:
self.username_snodas = config.get(snodas_sec, "username_snodas")
logger.info("read config: reading username_snodas {}".format(self.username_snodas))
except:
logger.error("read_config: '{}' missing from [{}] section".format("username_snodas", snodas_sec))
error_flag = True
#- password_snodas
try:
self.password_snodas = config.get(snodas_sec, "password_snodas")
logger.info("read config: reading password_snodas {}".format(self.password_snodas))
except:
logger.error("read_config: '{}' missing from [{}] section".format("password_snodas", snodas_sec))
error_flag = True
#- dir_ftp_snodas
try:
self.dir_ftp_snodas = config.get(snodas_sec, "dir_ftp_snodas")
logger.info("read config: reading host_snodas {}".format(self.dir_ftp_snodas))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_ftp_snodas", snodas_sec))
error_flag = True
#- null_value_snodas
try:
self.null_value_snodas = config.get(snodas_sec, "null_value_snodas")
logger.info("read config: reading host_snodas {}".format(self.null_value_snodas))
except:
logger.error("read_config: '{}' missing from [{}] section".format("null_value_snodas", snodas_sec))
error_flag = True
# modis section
# logger.info("[modis]")
# if error_modis_sec_flag == False:
# #- host_modis
# try:
# self.host_modis = config.get(modis_sec, "host_modis")
# logger.info("read config: reading 'host_modis' {}".format(self.modis_sec))
# except:
# logger.error("read_config: '{}' missing from [{}] section".format("host_modis", modis_sec))
# error_flag = True
#
# #- dir_http_aqua
# try:
# self.dir_http_aqua = config.get(modis_sec, "dir_http_aqua")
# logger.info("read config: reading 'dir_http_aqua {}'".format(self.dir_http_aqua))
# except:
# logger.error("read_config: '{}' missing from [{}] section".format("dir_http_aqua", modis_sec))
# error_flag = True
#
# #- dir_http_terra
# try:
# self.dir_http_terra = config.get(modis_sec, "dir_http_terra")
# logger.info("read config: reading 'dir_http_terra {}'".format(self.dir_http_terra))
# except:
# logger.error("read_config: '{}' missing from [{}] section".format("dir_http_terra", modis_sec))
# error_flag = True
# nohrsc section
logger.info("[nohrsc]")
if error_nohrsc_sec_flag == False:
#- host_nohrsc
try:
self.host_nohrsc = config.get(nohrsc_sec, "host_nohrsc")
logger.info("read config: reading 'host_nohrsc' {}".format(self.host_nohrsc))
except:
logger.error("read_config: '{}' missing from [{}] section".format("host_nohrsc", nohrsc_sec))
error_flag = True
#- dir_http_srpt
try:
self.dir_http_srpt = config.get(nohrsc_sec, "dir_http_srpt")
logger.info("read config: reading 'dir_http_srpt' {}".format(self.dir_http_srpt))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_http_srpt", nohrsc_sec))
error_flag = True
#- dir_http_nsa
try:
self.dir_http_nsa = config.get(nohrsc_sec, "dir_http_nsa")
logger.info("read config: reading 'dir_http_nsa' {}".format(self.dir_http_nsa))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_http_nsa", nohrsc_sec))
error_flag = True
#- srpt_flag
try:
self.srpt_flag = config.get(nohrsc_sec, "srpt_flag")
logger.info("read config: reading 'srpt_flag' {}".format(self.srpt_flag))
except:
logger.error("read_config: '{}' missing from [{}] section".format("srpt_flag", nohrsc_sec))
error_flag = True
# jpl section
logger.info("[jpl]")
if error_jpl_sec_flag == False:
#- host_jpl
try:
self.host_jpl = config.get(jpl_sec, "host_jpl")
logger.info("read config: reading 'host_jpl' {}".format(self.host_jpl))
except:
logger.error("read_config: '{}' missing from [{}] section".format("host_jpl", jpl_sec))
error_flag = True
#- username_jpl
try:
self.username_jpl = config.get(jpl_sec, "username_jpl")
logger.info("read config: reading 'username_jpl' {}".format(self.username_jpl))
except:
logger.error("read_config: '{}' missing from [{}] section".format("username_jpl", jpl_sec))
error_flag = True
#- password_jpl
try:
self.password_jpl = config.get(jpl_sec, "password_jpl")
logger.info("read config: reading 'password_jpl'")
except:
logger.error("read_config: '{}' missing from [{}] section".format("password_jpl", jpl_sec))
error_flag = True
#- dir_http_modscag
try:
self.dir_http_modscag = config.get(jpl_sec, "dir_http_modscag")
logger.info("read config: reading 'dir_http_modscag' {}".format(self.dir_http_modscag))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_http_modscag", jpl_sec))
error_flag = True
#- dir_http_moddrfs
try:
self.dir_http_moddrfs = config.get(jpl_sec, "dir_http_moddrfs")
logger.info("read config: reading 'dir_http_moddrfs' {}".format(self.dir_http_moddrfs))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_http_moddrfs", jpl_sec))
error_flag = True
#- ssl_verify
try:
self.ssl_verify = config.get(jpl_sec, "ssl_verify")
self.ssl_verify = str2bool(self.ssl_verify)
logger.info("read config: reading 'ssl_verify' {}".format(self.ssl_verify))
except:
logger.error("read_config: '{}' missing from [{}] section".format("ssl_verify", jpl_sec))
error_flag = True
# swann section
logger.info("[swann]")
if error_swann_sec_flag == False:
#- host_ua
try:
self.host_ua = config.get(swann_sec, "host_ua")
logger.info("read config: reading 'host_ua' {}".format(self.host_ua))
except:
logger.error("read_config: '{}' missing from [{}] section".format("host_ua", swann_sec))
error_flag = True
#- dir_ftp_swann_arc
try:
self.dir_ftp_swann_arc = config.get(swann_sec, "dir_ftp_swann_arc")
logger.info("read config: reading 'dir_ftp_swann_arc' {}".format(self.dir_ftp_swann_arc))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_ftp_swann_arc", swann_sec))
error_flag = True
#- dir_ftp_swann_rt
try:
self.dir_ftp_swann_rt = config.get(swann_sec, "dir_ftp_swann_rt")
logger.info("read config: reading 'dir_ftp_swann_rt' {}".format(self.dir_ftp_swann_rt))
except:
logger.error("read_config: '{}' missing from [{}] section".format("dir_ftp_swann_rt", swann_sec))
error_flag = True
if error_flag == True:
sys.exit()
def proc_config(self):
"""Read and parse config file
Parameters
---------
config_path : string
relative file path to config file
Returns
-------
None
Notes
-----
"""
# add gdal path
sys.path.append(self.gdal_path)
# add error checking
# open basin_poly
self.basin_poly = gpd.read_file(self.basin_poly_path)
# open basin points
if 'points' in self.output_type:
self.basin_points = gpd.read_file(self.basin_points_path)
# find bounding box for basin_poly
self.basin_poly_bbox_raw = self.basin_poly.total_bounds.tolist()
# if basin poly is not in EPSG:4326'convert bounding box coordinates
if self.proj != 'EPSG:4326':
transformer = Transformer.from_crs(self.proj, 'EPSG:4326')
xmin,ymin = transformer.transform(self.basin_poly_bbox_raw[0], self.basin_poly_bbox_raw[1])
xmax,ymax = transformer.transform(self.basin_poly_bbox_raw[2], self.basin_poly_bbox_raw[3])
self.basin_poly_bbox = [xmin,ymin,xmax,ymax]
elif self.proj == 'EPSG:4326':
self.basin_poly_bbox = self.basin_poly_bbox_raw
# find modis sinusodial grid tiles overlapping basin_poly
self.singrd_tile_list = find_tiles(self.basin_poly_bbox)
def download_snodas(cfg, date_dn, overwrite_flag = False):
"""Download snodas zip
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
overwrite_flag: boolean
True : overwrite existing files
Returns
-------
None
"""
site_url = cfg.host_snodas + cfg.dir_ftp_snodas
dir_work_snodas = cfg.dir_work + 'snodas/'
zip_name = "SNODAS_" + ("{}.tar".format(date_dn.strftime('%Y%m%d')))
zip_url = site_url + date_dn.strftime('%Y') + "/" + date_dn.strftime('%m') + "_" \
+ date_dn.strftime('%b') + '/' + zip_name
zip_path = dir_work_snodas + zip_name
if not os.path.isdir(cfg.dir_work):
os.makedirs(cfg.dir_work)
if not os.path.isdir(dir_work_snodas):
os.makedirs(dir_work_snodas)
if os.path.isfile(zip_path) and overwrite_flag:
os.remove(zip_path)
if not os.path.isfile(zip_path):
logger.info("download_snodas: downloading {}".format(date_dn.strftime('%Y-%m-%d')))
logger.info("download_snodas: downloading from {}".format(zip_url))
logger.info("download_snodas: downloading to {}".format(zip_path))
try:
urllib.request.urlretrieve(zip_url, zip_path)
except IOError as e:
logger.error("download_snodas: error downloading {}".format(date_dn.strftime('%Y-%m-%d')))
logging.error(e)
def org_snodas(cfg, date_dn):
dir_work_snodas = cfg.dir_work + 'snodas/'
dir_arch_snodas = cfg.dir_arch + 'snodas/'
date_str = str(date_dn.strftime('%Y%m%d'))
chr_rm = [":"]
proj_str = ''.join(i for i in cfg.proj if not i in chr_rm)
crs_raw = 'EPSG:4326'
dtype_out = 'float64' # convert snodas raster from int16 to float64 to perform
# unit conversion.
basin_str = os.path.splitext(os.path.basename(cfg.basin_poly_path))[0]
# clean up working directory
for file in glob.glob("{0}/*.tif".format(dir_work_snodas)):
file_path = dir_work_snodas + file
try:
os.remove(file_path)
logger.info("org_snodas: removing {}".format(file_path))
except:
logger.error("org_snodas: error removing {}".format(file_path))
# untar files
zip_name = "SNODAS_" + ("{}.tar".format(date_dn.strftime('%Y%m%d')))
zip_path = dir_work_snodas + zip_name
zip_arch = dir_arch_snodas + zip_name
try:
tar_con = tarfile.open(zip_path)
tar_con.extractall(path=dir_work_snodas)
tar_con.close()
logger.info("org_snodas: untaring {0}".format(zip_path))
except:
logger.error("download_snodas: error untaring {0}".format(zip_path))
if cfg.arch_flag == True:
os.rename(zip_path, zip_arch)
logger.info("org_snodas: archiving {0} to {1}".format(zip_path, zip_arch))
else:
os.remove(zip_path)
logger.info("org_snodas: removing {0}".format(zip_path))
# ungz files
for file_gz in os.listdir(dir_work_snodas):
if file_gz.endswith('.gz'):
file_path = dir_work_snodas + file_gz
file_out = os.path.splitext(file_path)[0]
# currently only keeping swe (1034) and snow depth (1036)
if '1034' in str(file_gz) or '1036' in str(file_gz):
try:
gz_con = gzip.GzipFile(file_path, 'rb')
gz_in = gz_con.read()
gz_con.close()
gz_out = open(file_out, 'wb')
gz_out.write(gz_in)
gz_out.close()
logger.info("org_snodas: unzipping {}".format(file_path))
os.remove(file_path)
logger.info("org_snodas: removing {}".format(file_path))
except:
logger.error("org_snodas: error unzipping {}".format(file_path))
else:
os.remove(file_path)
logger.info("org_snodas: removing {}".format(file_path))
# convert dat to bil
for file_dat in os.listdir(dir_work_snodas):
if file_dat.endswith('.dat'):
try:
file_path = dir_work_snodas + file_dat
file_out = file_path.replace('.dat', '.bil')
os.rename(file_path, file_out)
logger.info("org_snodas: converting {} to {}".format(file_path, file_out))
except:
logger.error("org_snodas: error converting {} to {}".format(file_path, file_out))
# create header file - ADD NSIDC LINK
for file_bil in os.listdir(dir_work_snodas):
if file_bil.endswith('.bil'):
try:
file_path = dir_work_snodas + file_bil
file_out = file_path.replace('.bil', '.hdr')
file_con = open(file_out, 'w')
file_con.write('units dd\n')
file_con.write('nbands 1\n')
file_con.write('nrows 3351\n')
file_con.write('ncols 6935\n')
file_con.write('nbits 16\n')
file_con.write('pixeltype signedint')
file_con.write('byteorder M\n')
file_con.write('layout bil\n')
file_con.write('ulxmap -124.729583333333\n')
file_con.write('ulymap 52.8704166666666\n')
file_con.write('xdim 0.00833333333333333\n')
file_con.write('ydim 0.00833333333333333\n')
file_con.close()
logger.info("org_snodas: creating header file {}".format(file_out))
except:
logger.error("org_snodas: error creating header file {}".format(file_out))
# convert bil to geotif
for file_bil in os.listdir(dir_work_snodas):
if file_bil.endswith('.bil'):
try:
file_path = dir_work_snodas + file_bil
file_out = file_path.replace('.bil', '.tif')
gdal.Translate(file_out, file_path, format = 'GTiff')
logger.info("org_snodas: converting {} to {}".format(file_path, file_out))
except:
logger.error("org_snodas: error converting {} to {}".format(file_path, file_out))
# remove unneeded files
for file in os.listdir(dir_work_snodas):
if not file.endswith('.tif'):
file_path = dir_work_snodas + file
try:
os.remove(file_path)
logger.info("org_snodas: removing {}".format(file_path))
except:
logger.error("org_snodas: error removing {}".format(file_path))
# reproject geotif
tif_list = glob.glob("{0}/*{1}{2}*.tif".format(dir_work_snodas, date_str, "05"))
for tif in tif_list:
tif_out = os.path.splitext(tif)[0] + "_" + proj_str + ".tif"
try:
gdal_raster_reproject(tif, tif_out, cfg.proj, crs_raw)
# rasterio_raster_reproject(tif, tif_out, cfg.proj)
logger.info("org_snodas: reprojecting {} to {}".format(tif, tif_out))
except:
logger.error("org_snodas: error reprojecting {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_snodas: error finding tifs to reproject")
# clip to basin polygon
tif_list = glob.glob("{0}/*{1}{2}*{3}.tif".format(dir_work_snodas, date_str, "05", proj_str))
for tif in tif_list:
tif_out = os.path.splitext(tif)[0] + "_" + basin_str + ".tif"
try:
gdal_raster_clip(cfg.basin_poly_path, tif, tif_out, cfg.proj, cfg.proj, -9999)
logger.info("org_snodas: clipping {} to {}".format(tif, tif_out))
except:
logger.error("org_snodas: error clipping {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_snodas: error finding tifs to clip")
# convert units
if cfg.unit_sys == 'english':
calc_exp = '(+ 1 (* .0393701 (read 1)))' # inches
if cfg.unit_sys == 'metric':
calc_exp = '(read 1)' # keep units in mm
# SWE
tif_list = glob.glob("{0}/*{1}*{2}{3}*{4}*{5}.tif".format(dir_work_snodas, '1034', date_str, "05", proj_str, basin_str))
for tif in tif_list:
tif_int = os.path.splitext(tif)[0] + "_" + dtype_out + ".tif"
tif_out = cfg.dir_db + "snodas_swe_" + date_str + "_" + basin_str + "_" + cfg.unit_sys + ".tif"
try:
rio_dtype_conversion(tif, tif_int, dtype_out)
rio_calc(tif_int, tif_out, calc_exp)
logger.info("org_snodas: calc {} {} to {}".format(calc_exp, tif, tif_out))
except:
logger.error("org_snodas: error calc {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_snodas: error finding tifs to calc")
# Snow Depth
tif_list = glob.glob("{0}/*{1}*{2}{3}*{4}*{5}.tif".format(dir_work_snodas, '1036', date_str, "05", proj_str, basin_str))
for tif in tif_list:
tif_int = os.path.splitext(tif)[0] + "_" + dtype_out + ".tif"
tif_out = cfg.dir_db + "snodas_snowdepth_" + date_str + "_" + basin_str + "_" + cfg.unit_sys + ".tif"
try:
rio_dtype_conversion(tif, tif_int, dtype_out)
rio_calc(tif_int, tif_out, calc_exp)
logger.info("org_snodas: calc {} {} to {}".format(calc_exp, tif, tif_out))
except:
logger.error("org_snodas: error calc {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_snodas: error finding tifs to calc")
# swe : 1034 [m *1000]
# snow depth : 1036 [m *1000]
# snow melt runoff at base of snowpack : 1044 [m *100,000]
# sublimation from snowpack : 1050 [m *100,000]
# sublimation of blowing snow: 1039 [m *100,000]
# solid precipitation: 1025(v code = IL01) [kg m-2 *10]
# liquid precipitation: 1025(v code = IL00) [kg m-2 *10]
# snowpack average temperature: 1038 [K *1]
# calculate zonal statistics and export data
tif_list = glob.glob("{0}/{1}*{2}*{3}*{4}.tif".format(cfg.dir_db, 'snodas', date_str, basin_str, cfg.unit_sys))
for tif in tif_list:
file_meta = os.path.basename(tif).replace('.', '_').split('_')
if 'poly' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_poly_path, tif, stats=['min', 'max', 'median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_snodas: computing zonal statistics")
except:
logger.error("org_snodas: error computing poly zonal statistics")
try:
frames = [cfg.basin_poly, tif_stats_df]
basin_poly_stats = pd.concat(frames, axis=1)
logger.info("org_snodas: merging poly zonal statistics")
except:
logger.error("org_snodas: error merging zonal statistics")
if 'geojson' in cfg.output_format:
try:
geojson_out = os.path.splitext(tif)[0] + "_poly.geojson"
basin_poly_stats.to_file(geojson_out, driver='GeoJSON')
logger.info("org_snodas: writing {0}".format(geojson_out))
except:
logger.error("org_snodas: error writing {0}".format(geojson_out))
if 'csv' in cfg.output_format:
try:
csv_out = os.path.splitext(tif)[0] + "_poly.csv"
basin_poly_stats_df = pd.DataFrame(basin_poly_stats.drop(columns = 'geometry'))
basin_poly_stats_df.insert(0, 'Source', file_meta[0])
basin_poly_stats_df.insert(0, 'Type', file_meta[1])
basin_poly_stats_df.insert(0, 'Date', dt.datetime.strptime(file_meta[2], '%Y%m%d').strftime('%Y-%m-%d %H:%M'))
basin_poly_stats_df.to_csv(csv_out, index=False)
logger.info("org_snodas: writing {0}".format(csv_out))
except:
logger.error("org_snodas: error writing {0}".format(csv_out))
if 'points' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_points_path, tif, stats=['min', 'max', 'median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_snodas: computing points zonal statistics")
except:
logger.error("org_snodas: error computing points zonal statistics")
try:
frames = [cfg.basin_points, tif_stats_df]
basin_points_stats = pd.concat(frames, axis=1)
logger.info("org_snodas: merging zonal statistics")
except:
logger.error("org_snodas: error merging zonal statistics")
if 'geojson' in cfg.output_format:
try:
geojson_out = os.path.splitext(tif)[0] + "_points.geojson"
basin_points_stats.to_file(geojson_out, driver='GeoJSON')
logger.info("org_snodas: writing {0}".format(geojson_out))
except:
logger.error("org_snodas: error writing {0}".format(geojson_out))
if 'csv' in cfg.output_format:
try:
csv_out = os.path.splitext(tif)[0] + "_points.csv"
basin_points_stats_df = pd.DataFrame(basin_points_stats.drop(columns = 'geometry'))
basin_points_stats_df.insert(0, 'Source', file_meta[0])
basin_points_stats_df.insert(0, 'Type', file_meta[1])
basin_points_stats_df.insert(0, 'Date', dt.datetime.strptime(file_meta[2], '%Y%m%d').strftime('%Y-%m-%d %H:%M'))
basin_points_stats_df.to_csv(csv_out, index=False)
logger.info("org_snodas: writing {0}".format(csv_out, index=False))
except:
logger.error("org_snodas: error writing {0}".format(csv_out))
# clean up working directory
for file in os.listdir(dir_work_snodas):
file_path = dir_work_snodas + file
try:
os.remove(file_path)
logger.info("org_snodas: removing {}".format(file_path))
except:
logger.error("org_snodas: error removing {}".format(file_path))
def download_srpt(cfg, date_dn, overwrite_flag = False):
"""Download snow reports from nohrsc
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
overwrite_flag: boolean
True : overwrite existing files
Returns
-------
None
"""
site_url = cfg.host_nohrsc + cfg.dir_http_srpt + date_dn.strftime('%Y%m%d')
dir_work_d = cfg.dir_work + 'srpt/'
if not os.path.isdir(dir_work_d):
os.makedirs(dir_work_d)
# snow reports (stations)
kmz_srpt_name = "snow_reporters_" + date_dn.strftime('%Y%m%d') + ".kmz"
kmz_srpt_url = site_url + "/" + kmz_srpt_name
kmz_srpt_path = dir_work_d + kmz_srpt_name
if os.path.isfile(kmz_srpt_path) and overwrite_flag:
os.remove(kmz_srpt_path)
if os.path.isfile(kmz_srpt_path) and overwrite_flag == False:
logger.info("download_srpt: skipping {} {}, {} exists".format('snow reports', date_dn.strftime('%Y-%m-%d'), kmz_srpt_path))
if not os.path.isfile(kmz_srpt_path):
logger.info("download_srpt: downloading {} {}".format('snow reports', date_dn.strftime('%Y-%m-%d')))
logger.info("download_srpt: downloading from {}".format(kmz_srpt_url))
logger.info("download_srpt: downloading to {}".format(kmz_srpt_path))
try:
urllib.request.urlretrieve(kmz_srpt_url, kmz_srpt_path)
except IOError as e:
logger.error("download_srpt: error downloading {} {}".format('snow reports', date_dn.strftime('%Y-%m-%d')))
logging.error(e)
def org_srpt(cfg, date_dn):
"""Downloads daily snow reporters KMZ from NOHRSC
formats to geoJSON and csv and clips to poly extents
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
Returns
-------
None
Notes
-----
Writes out geoJSON and csv to file
"""
# TODO - ADD ERROR CHECKING
# OPTION TO SPECIFY AN ALTERNATE BOUNDARY?
# https://stackoverflow.com/questions/55586376/how-to-obtain-element-values-from-a-kml-by-using-lmxl
basin_str = os.path.splitext(os.path.basename(cfg.basin_poly_path))[0]
dir_work_srpt = cfg.dir_work + 'srpt/'
# snow reports (stations)
kmz_srpt_name = "snow_reporters_" + date_dn.strftime('%Y%m%d') + ".kmz"
kmz_srpt_path = dir_work_srpt + kmz_srpt_name
with zipfile.ZipFile(kmz_srpt_path,"r") as zip_ref:
zip_ref.extractall(dir_work_srpt)
kml_path = kmz_srpt_path.replace('.kmz', '.kml')
gpd.io.file.fiona.drvsupport.supported_drivers['KML'] = 'rw'
srpt_gpd = gpd.GeoDataFrame()
# iterate over layers
for layer in fiona.listlayers(kml_path):
s = gpd.read_file(kml_path, driver='KML', layer=layer)
srpt_gpd = srpt_gpd.append(s, ignore_index=True)
ns = {"kml": "http://earth.google.com/kml/2.0"}
# parse kmlfile
tree = etree.parse(kml_path)
# read name - character
name_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:name", namespaces = ns):
name_arr.append(simple_data.text)
name_pd = pd.Series(name_arr, name = 'Name')
# read beginDate - Datetime
beginDate_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='beginDate']/kml:value", namespaces = ns):
beginDate_arr.append(simple_data.text)
beginDate_pd = pd.Series(beginDate_arr, name = 'beginDate')
beginDate_pd = pd.to_datetime(beginDate_pd, format='%Y-%m-%d', errors='coerce')
# read endDate - Datetime
endDate_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='endDate']/kml:value", namespaces = ns):
endDate_arr.append(simple_data.text)
endDate_pd = pd.Series(endDate_arr, name = 'endDate')
endDate_pd = pd.to_datetime(endDate_pd, format='%Y-%m-%d', errors='coerce')
# read type - character
type_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='type']/kml:value", namespaces = ns):
type_arr.append(simple_data.text)
type_pd = pd.Series(type_arr, name = 'type')
# read elevationMeters - numeric
elevationMeters_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='elevationMeters']/kml:value", namespaces = ns):
elevationMeters_arr.append(simple_data.text)
elevationMeters_pd = pd.Series(elevationMeters_arr, name = 'elevationMeters')
elevationMeters_pd = pd.to_numeric(elevationMeters_pd, errors='coerce')
# read latestSWEdateUTC - Datetime
latestSWEdateUTC_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='latestSWEdateUTC']/kml:value", namespaces = ns):
latestSWEdateUTC_arr.append(simple_data.text)
latestSWEdateUTC_pd = pd.Series(latestSWEdateUTC_arr, name = 'latestSWEdateUTC')
latestSWEdateUTC_pd = pd.to_datetime(latestSWEdateUTC_pd, format='%Y-%m-%d', errors='coerce')
# read latestSWEcm - numeric
latestSWEcm_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='latestSWEcm']/kml:value", namespaces = ns):
latestSWEcm_arr.append(simple_data.text)
latestSWEcm_pd = pd.Series(latestSWEcm_arr, name = 'latestSWEcm')
latestSWEcm_pd = pd.to_numeric(latestSWEcm_pd, errors='coerce')
# read latestDepthDateUTC - Datetime
latestDepthDateUTC_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='latestDepthDateUTC']/kml:value", namespaces = ns):
latestDepthDateUTC_arr.append(simple_data.text)
latestDepthDateUTC_pd = pd.Series(latestDepthDateUTC_arr, name = 'latestDepthDateUTC')
latestDepthDateUTC_pd = pd.to_datetime(latestDepthDateUTC_pd, format='%Y-%m-%d', errors='coerce')
# read latestDepthCm - numeric
latestDepthCm_arr = []
for simple_data in tree.xpath("/kml:kml/kml:Document/kml:Folder/kml:Placemark/kml:ExtendedData/kml:Data[@name='latestDepthCm']/kml:value", namespaces = ns):
latestDepthCm_arr.append(simple_data.text)
latestDepthCm_pd = pd.Series(latestDepthCm_arr, name = 'latestDepthCm')
latestDepthCm_pd = pd.to_numeric(latestDepthCm_pd, errors='coerce')
srpt_pd = pd.concat([name_pd, beginDate_pd, endDate_pd, type_pd, elevationMeters_pd, latestSWEdateUTC_pd,
latestSWEcm_pd, latestDepthDateUTC_pd, latestDepthCm_pd], axis = 1)
srpt_gpd = srpt_gpd.set_index('Name').join(srpt_pd.set_index('Name'))
# unit conversion
if cfg.unit_sys == 'english':
srpt_gpd.loc[:, 'elevationFeet'] = srpt_gpd.loc[:, 'elevationMeters'].values * 3.28084
srpt_gpd.loc[:, 'latestSWEin'] = srpt_gpd.loc[:, 'latestSWEcm'].values * 0.393701
srpt_gpd.loc[:, 'latestDepthin'] = srpt_gpd.loc[:, 'latestDepthCm'].values * 0.393701
srpt_gpd = srpt_gpd.drop(columns=['elevationMeters', 'latestSWEcm', 'latestDepthCm'])
# clip to basin
srpt_gpd_clip = gpd.clip(srpt_gpd.to_crs(cfg.proj), cfg.basin_poly, keep_geom_type = False)
# write out data
geojson_out = cfg.dir_db + 'snowreporters_obs_' + date_dn.strftime('%Y%m%d') + '_' + basin_str + '.geojson'
srpt_gpd_clip.to_file(geojson_out, driver = 'GeoJSON')
csv_out = cfg.dir_db + 'snowreporters_obs_' + date_dn.strftime('%Y%m%d') + '_' + basin_str + '.csv'
srpt_gpd_clip_df = pd.DataFrame(srpt_gpd_clip.drop(columns = 'geometry'))
srpt_gpd_clip_df.insert(1, 'Source', 'NOHRSCSnowReporters')
srpt_gpd_clip_df.to_csv(csv_out, index=False)
# clean up working directory
for file in os.listdir(dir_work_srpt):
file_path = dir_work_srpt + file
try:
os.remove(file_path)
logger.info("org_srpt: removing {}".format(file_path))
except:
logger.error("org_srpt: error removing {}".format(file_path))
def download_nsa(cfg, date_dn, overwrite_flag = False):
"""Download national snow analysis from nohrsc
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
overwrite_flag: boolean
True : overwrite existing files
Returns
-------
None
Notes
-----
Currently just downloads the 24hr product. 6hr, 48hr, 72hr, and 60day
products are also available.
24hr product is also avaiable twice a day, 00 and 12.
"""
site_url = cfg.host_nohrsc + cfg.dir_http_nsa + date_dn.strftime('%Y%m')
dir_work_d = cfg.dir_work + 'nsa/'
if not os.path.isdir(dir_work_d):
os.makedirs(dir_work_d)
# snow covered area (sca)
tif_24hr_name = "sfav2_CONUS_24h_" + date_dn.strftime('%Y%m%d') + "00.tif"
tif_24hr_url = site_url + "/" + tif_24hr_name
tif_24hr_path = dir_work_d + tif_24hr_name
if os.path.isfile(tif_24hr_path) and overwrite_flag:
os.remove(tif_24hr_path)
if os.path.isfile(tif_24hr_path) and overwrite_flag == False:
logger.info("download_nsa: skipping {} {}, {} exists".format('24hr', date_dn.strftime('%Y-%m-%d'), tif_24hr_path))
if not os.path.isfile(tif_24hr_path):
logger.info("download_nsa: downloading {} {}".format('24hr', date_dn.strftime('%Y-%m-%d')))
logger.info("download_nsa: downloading from {}".format(tif_24hr_url))
logger.info("download_nsa: downloading to {}".format(tif_24hr_path))
try:
urllib.request.urlretrieve(tif_24hr_url, tif_24hr_path)
except IOError as e:
logger.error("download_nsa: error downloading {} {}".format('24hr', date_dn.strftime('%Y-%m-%d')))
logging.error(e)
def download_modscag(cfg, date_dn, overwrite_flag = False):
"""Download modscag from JPL
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
overwrite_flag: boolean
True : overwrite existing files
Returns
-------
None
Notes
-----
Currently uses JPL data archive. May be moved in future.
geotif specs - # work on this
235 - cloud cover, guess
250 - nodata, guess
"""
site_url = cfg.host_jpl + cfg.dir_http_modscag + date_dn.strftime('%Y') + "/" + date_dn.strftime('%j')
r = requests.get(site_url, auth=HTTPDigestAuth(cfg.username_jpl, cfg.password_jpl), verify=cfg.ssl_verify)
if r.status_code == 200:
dir_work_d = cfg.dir_work + 'modscag/'
if not os.path.isdir(dir_work_d):
os.makedirs(dir_work_d)
# loop through modis sinusodial tiles
for tile in cfg.singrd_tile_list:
# snow fraction (fsca)
tif_fsca_name = "MOD09GA.A" + date_dn.strftime('%Y') + date_dn.strftime('%j') + "." + tile + ".006.NRT.snow_fraction.tif"
tif_fsca_url = site_url + "/" + tif_fsca_name
tif_fsca_path = dir_work_d + tif_fsca_name
if os.path.isfile(tif_fsca_path) and overwrite_flag:
os.remove(tif_fsca_path)
if os.path.isfile(tif_fsca_path) and overwrite_flag == False:
logger.info("download_modscag: skipping {} {}, {} exists".format(date_dn.strftime('%Y-%m-%d'), tile, tif_fsca_path))
if not os.path.isfile(tif_fsca_path):
logger.info("download_modscag: downloading {} {} {}".format('snow_fraction', date_dn.strftime('%Y-%m-%d'), tile))
logger.info("download_modscag: downloading from {}".format(tif_fsca_url))
logger.info("download_modscag: downloading to {}".format(tif_fsca_path))
try:
r = requests.get(tif_fsca_url, auth = HTTPDigestAuth(cfg.username_jpl, cfg.password_jpl), verify=cfg.ssl_verify)
if r.status_code == 200:
with open(tif_fsca_path, 'wb') as rfile:
rfile.write(r.content)
else:
logger.error("download_modscag: error downloading {} {} {}".format('snow_fraction', date_dn.strftime('%Y-%m-%d'), tile))
except IOError as e:
logger.error("download_modscag: error downloading {} {} {}".format(date_dn.strftime('snow_fraction', '%Y-%m-%d'), tile))
logging.error(e)
# vegetation fraction (vfrac)
tif_vfrac_name = "MOD09GA.A" + date_dn.strftime('%Y') + date_dn.strftime('%j') + "." + tile + ".006.NRT.vegetation_fraction.tif"
tif_vfrac_url = site_url + "/" + tif_vfrac_name
tif_vfrac_path = dir_work_d + tif_vfrac_name
if os.path.isfile(tif_vfrac_path) and overwrite_flag:
os.remove(tif_vfrac_path)
if os.path.isfile(tif_vfrac_path) and overwrite_flag == False:
logger.info("download_modscag: skipping {} {}, {} exists".format(date_dn.strftime('%Y-%m-%d'), tile, tif_vfrac_path))
if not os.path.isfile(tif_vfrac_path):
logger.info("download_modscag: downloading {} {} {}".format('vegetation_fraction', date_dn.strftime('%Y-%m-%d'), tile))
logger.info("download_modscag: downloading from {}".format(tif_vfrac_url))
logger.info("download_modscag: downloading to {}".format(tif_vfrac_path))
try:
r = requests.get(tif_vfrac_url, auth = HTTPDigestAuth(cfg.username_jpl, cfg.password_jpl), verify=cfg.ssl_verify)
if r.status_code == 200:
with open(tif_vfrac_path, 'wb') as rfile:
rfile.write(r.content)
else:
logger.error("download_modscag: error downloading {} {} {}".format('vegetation_fraction', date_dn.strftime('%Y-%m-%d'), tile))
except IOError as e:
logger.error("download_modscag: error downloading {} {} {}".format(date_dn.strftime('vegetation_fraction', '%Y-%m-%d'), tile))
logging.error(e)
else:
logger.error("download_modscag: error connecting {}".format(site_url))
def org_modscag(cfg, date_dn):
""" Organize downloaded modscag data
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
Returns
-------
None
Notes
-----
"""
# Proj4js.defs["SR-ORG:6974"] = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs";
dir_work_modscag = cfg.dir_work + 'modscag/'
dir_arch_modscag = cfg.dir_arch + 'modscag/'
date_str = str(date_dn.strftime('%Y%m%d'))
chr_rm = [":"]
proj_str = ''.join(i for i in cfg.proj if not i in chr_rm)
basin_str = os.path.splitext(os.path.basename(cfg.basin_poly_path))[0]
# merge and reproject snow fraction (fsca) files
tif_list_fsca = glob.glob("{0}/*{1}*{2}.tif".format(dir_work_modscag, date_dn.strftime('%Y%j'), "snow_fraction"))
tif_out_fsca = 'data\working\modscag\MOD09GA_' + date_str + '_{0}_fsca.tif'
try:
rasterio_raster_merge(tif_list_fsca, tif_out_fsca.format("ext"))
logger.info("org_modscag: merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'snow_fraction'))
except:
logger.error("org_modscag: error merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'snow_fraction'))
try:
rasterio_raster_reproject(tif_out_fsca.format("ext"), tif_out_fsca.format(proj_str), cfg.proj, nodata=250)
logger.info("org_modscag: reprojecting {} to {}".format('snow_fraction', date_dn.strftime('%Y-%m-%d')))
except:
logger.error("org_modscag: error reprojecting {} to {}".format(tif_out_fsca.format("ext"), tif_out_fsca.format(proj_str), cfg.proj))
# merge and reproject vegetation fraction files (vfrac)
tif_list_vfrac = glob.glob("{0}/*{1}*{2}.tif".format(dir_work_modscag, date_dn.strftime('%Y%j'), "vegetation_fraction"))
tif_out_vfrac = 'data\working\modscag\MOD09GA_' + date_str + '_{0}_vfrac.tif'
try:
rasterio_raster_merge(tif_list_vfrac, tif_out_vfrac.format("ext"))
logger.info("org_modscag: merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'vegetation_fraction'))
except:
logger.error("org_modscag: error merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'vegetation_fraction'))
try:
rasterio_raster_reproject(tif_out_vfrac.format("ext"), tif_out_vfrac.format(proj_str), cfg.proj, nodata=250)
logger.info("org_modscag: reprojecting {} to {}".format('vegetation_fraction', date_dn.strftime('%Y-%m-%d')))
except:
logger.error("org_modscag: error reprojecting {} to {}".format(tif_out_vfrac.format("ext"), tif_out_vfrac.format(proj_str), cfg.proj))
# clip to basin polygon (fsca)
tif_list = glob.glob("{0}/*{1}*{2}*{3}.tif".format(dir_work_modscag, date_str, proj_str, "fsca"))
for tif in tif_list:
tif_out = dir_work_modscag + "modscag_fsca_" + date_str + "_" + basin_str + ".tif"
try:
gdal_raster_clip(cfg.basin_poly_path, tif, tif_out, cfg.proj, cfg.proj, 250)
logger.info("org_modscag: clipping {} to {}".format(tif, tif_out))
except:
logger.error("org_modscag: error clipping {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_modscag: error finding tifs to clip")
# clip to basin polygon (vfrac)
tif_list = glob.glob("{0}/*{1}*{2}*{3}.tif".format(dir_work_modscag, date_str, proj_str, "vfrac"))
for tif in tif_list:
tif_out = dir_work_modscag + "modscag_vfrac_" + date_str + "_" + basin_str + ".tif"
try:
gdal_raster_clip(cfg.basin_poly_path, tif, tif_out, cfg.proj, cfg.proj, 250)
logger.info("org_modscag: clipping {} to {}".format(tif, tif_out))
except:
logger.error("org_modscag: error clipping {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_modscag: error finding tifs to clip")
# set filenames
file_fsca = "modscag_fsca_" + date_str + "_" + basin_str + ".tif"
file_vfrac = "modscag_vfrac_" + date_str + "_" + basin_str + ".tif"
file_fscavegcor = "modscag_fscavegcor_" + date_str + "_" + basin_str + ".tif"
# open connection to rasters
rast_fsca = rasterio.open(dir_work_modscag + file_fsca)
rast_vfrac = rasterio.open(dir_work_modscag + file_vfrac)
# read in raster data to np array
fsca = rast_fsca.read(1)
# set pixels > 100 to nodata value (250)
fsca_masked = np.where(fsca>100, 250, fsca)
# read in raster data to np array
vfrac = rast_vfrac.read(1)
# set pixels > 100 to nodata value (250)
vfrac_masked = np.where(vfrac>100, 250, vfrac)
# write out masked files (fsca)
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = rast_fsca.profile
profile.update(
dtype=rasterio.uint8,
count=1)
with rasterio.open(cfg.dir_db + file_fsca, 'w', **profile) as dst:
dst.write(fsca_masked,indexes=1)
# write out masked files (vfrac)
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = rast_vfrac.profile
profile.update(
dtype=rasterio.uint8,
count=1)
with rasterio.open(cfg.dir_db + file_vfrac, 'w', **profile) as dst:
dst.write(vfrac_masked,indexes=1)
# fsca with vegetation correction
vfrac_calc = np.where(vfrac_masked==100, 99, vfrac_masked)
fsca_vegcor = fsca / (100 - vfrac_calc) * 100
fsca_vegcor_masked = np.where(fsca>100, 250, fsca_vegcor)
# write fsca with vegetation correction
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = rast_vfrac.profile
profile.update(
dtype=rasterio.float64,
count=1)
with rasterio.open(cfg.dir_db + file_fscavegcor, 'w', **profile) as dst:
dst.write(fsca_vegcor_masked,indexes=1)
# close datasets
rast_fsca.close()
rast_vfrac.close()
# calculate zonal statistics and export data
tif_list = glob.glob("{0}/{1}*{2}*{3}*.tif".format(cfg.dir_db, 'modscag', date_str, basin_str))
for tif in tif_list:
file_meta = os.path.basename(tif).replace('.', '_').split('_')
if 'poly' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_poly_path, tif, stats=['median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_modscag: computing zonal statistics")
except:
logger.error("org_modscag: error computing poly zonal statistics")
try:
frames = [cfg.basin_poly, tif_stats_df]
basin_poly_stats = pd.concat(frames, axis=1)
logger.info("org_modscag: merging poly zonal statistics")
except:
logger.error("org_modscag: error merging zonal statistics")
if 'geojson' in cfg.output_format:
try:
geojson_out = os.path.splitext(tif)[0] + "_poly.geojson"
basin_poly_stats.to_file(geojson_out, driver='GeoJSON')
logger.info("org_modscag: writing {0}".format(geojson_out))
except:
logger.error("org_modscag: error writing {0}".format(geojson_out))
if 'csv' in cfg.output_format:
try:
csv_out = os.path.splitext(tif)[0] + "_poly.csv"
basin_poly_stats_df = pd.DataFrame(basin_poly_stats.drop(columns = 'geometry'))
basin_poly_stats_df.insert(0, 'Source', file_meta[0])
basin_poly_stats_df.insert(0, 'Type', file_meta[1])
basin_poly_stats_df.insert(0, 'Date', dt.datetime.strptime(file_meta[2], '%Y%m%d').strftime('%Y-%m-%d %H:%M'))
basin_poly_stats_df.to_csv(csv_out, index=False)
logger.info("org_modscag: writing {0}".format(csv_out))
except:
logger.error("org_modscag: error writing {0}".format(csv_out))
if 'points' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_points_path, tif, stats=['median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_modscag: computing points zonal statistics")
except:
logger.error("org_modscag: error computing points zonal statistics")
try:
frames = [cfg.basin_points, tif_stats_df]
basin_points_stats = pd.concat(frames, axis=1)
logger.info("org_modscag: merging zonal statistics")
except:
logger.error("org_modscag: error merging zonal statistics")
if 'geojson' in cfg.output_format:
try:
geojson_out = os.path.splitext(tif)[0] + "_points.geojson"
basin_points_stats.to_file(geojson_out, driver='GeoJSON')
logger.info("org_snodas: writing {0}".format(geojson_out))
except:
logger.error("org_snodas: error writing {0}".format(geojson_out))
if 'csv' in cfg.output_format:
try:
csv_out = os.path.splitext(tif)[0] + "_points.csv"
basin_points_stats_df = pd.DataFrame(basin_points_stats.drop(columns = 'geometry'))
basin_points_stats_df.insert(0, 'Source', file_meta[0])
basin_points_stats_df.insert(0, 'Type', file_meta[1])
basin_points_stats_df.insert(0, 'Date', dt.datetime.strptime(file_meta[2], '%Y%m%d').strftime('%Y-%m-%d %H:%M'))
basin_points_stats_df.to_csv(csv_out, index=False)
logger.info("org_modscag: writing {0}".format(csv_out))
except:
logger.error("org_modscag: error writing {0}".format(csv_out))
# clean up working directory
for file in os.listdir(dir_work_modscag):
file_path = dir_work_modscag + file
try:
os.remove(file_path)
logger.info("org_modscag: removing {}".format(file_path))
except:
logger.error("org_modscag: error removing {}".format(file_path))
def download_moddrfs(cfg, date_dn, overwrite_flag = False):
"""Download moddrfs from JPL
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
overwrite_flag: boolean
True : overwrite existing files
Returns
-------
None
Notes
-----
Currently uses JPL data archive. May be moved in future.
geotif specs - # work on this
2350 - cloud cover, guess
2500 - nodata, guess
*.deltavis product also available - unsure what this is
"""
site_url = cfg.host_jpl + cfg.dir_http_moddrfs + date_dn.strftime('%Y') + "/" + date_dn.strftime('%j')
r = requests.get(site_url, auth=HTTPDigestAuth(cfg.username_jpl, cfg.password_jpl), verify=cfg.ssl_verify)
if r.status_code == 200:
dir_work_d = cfg.dir_work + 'moddrfs/'
if not os.path.isdir(dir_work_d):
os.makedirs(dir_work_d)
# loop through modis sinusodial tiles
for tile in cfg.singrd_tile_list:
# forcing (forc)
tif_forc_name = "MOD09GA.A" + date_dn.strftime('%Y') + date_dn.strftime('%j') + "." + tile + ".006.NRT.forcing.tif"
tif_forc_url = site_url + "/" + tif_forc_name
tif_forc_path = dir_work_d + tif_forc_name
if os.path.isfile(tif_forc_path) and overwrite_flag:
os.remove(tif_forc_path)
if os.path.isfile(tif_forc_path) and overwrite_flag == False:
logger.info("download_moddrfs: skipping {} {}, {} exists".format(date_dn.strftime('%Y-%m-%d'), tile, tif_forc_path))
if not os.path.isfile(tif_forc_path):
logger.info("download_moddrfs: downloading {} {} {}".format('forcing', date_dn.strftime('%Y-%m-%d'), tile))
logger.info("download_moddrfs: downloading from {}".format(tif_forc_url))
logger.info("download_moddrfs: downloading to {}".format(tif_forc_path))
try:
r = requests.get(tif_forc_url, auth = HTTPDigestAuth(cfg.username_jpl, cfg.password_jpl), verify=cfg.ssl_verify)
if r.status_code == 200:
with open(tif_forc_path, 'wb') as rfile:
rfile.write(r.content)
else:
logger.error("download_moddrfs: error downloading {} {} {}".format('forcing', date_dn.strftime('%Y-%m-%d'), tile))
except IOError as e:
logger.error("download_moddrfs: error downloading {} {} {}".format(date_dn.strftime('forcing', '%Y-%m-%d'), tile))
logging.error(e)
# grain size (grnsz)
tif_grnsz_name = "MOD09GA.A" + date_dn.strftime('%Y') + date_dn.strftime('%j') + "." + tile + ".006.NRT.drfs.grnsz.tif"
tif_grnsz_url = site_url + "/" + tif_grnsz_name
tif_grnsz_path = dir_work_d + tif_grnsz_name
if os.path.isfile(tif_grnsz_path) and overwrite_flag:
os.remove(tif_grnsz_path)
if os.path.isfile(tif_grnsz_path) and overwrite_flag == False:
logger.info("download_moddrfs: skipping {} {}, {} exists".format(date_dn.strftime('%Y-%m-%d'), tile, tif_grnsz_path))
if not os.path.isfile(tif_grnsz_path):
logger.info("download_moddrfs: downloading {} {} {}".format('drfs.grnsz', date_dn.strftime('%Y-%m-%d'), tile))
logger.info("download_moddrfs: downloading from {}".format(tif_grnsz_url))
logger.info("download_moddrfs: downloading to {}".format(tif_grnsz_path))
try:
r = requests.get(tif_grnsz_url, auth = HTTPDigestAuth(cfg.username_jpl, cfg.password_jpl), verify=cfg.ssl_verify)
if r.status_code == 200:
with open(tif_grnsz_path, 'wb') as rfile:
rfile.write(r.content)
else:
logger.error("download_moddrfs: error downloading {} {} {}".format('drfs.grnsz', date_dn.strftime('%Y-%m-%d'), tile))
except IOError as e:
logger.error("download_moddrfs: error downloading {} {} {}".format(date_dn.strftime('drfs.grnsz', '%Y-%m-%d'), tile))
logging.error(e)
else:
logger.error("download_moddrfs: error connecting {}".format(site_url))
def org_moddrfs(cfg, date_dn):
""" Organize downloaded moddrfs data
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date
Returns
-------
None
Notes
-----
"""
# Proj4js.defs["SR-ORG:6974"] = "+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs";
dir_work_moddrfs = cfg.dir_work + 'moddrfs/'
dir_arch_moddrfs = cfg.dir_arch + 'moddrfs/'
date_str = str(date_dn.strftime('%Y%m%d'))
chr_rm = [":"]
proj_str = ''.join(i for i in cfg.proj if not i in chr_rm)
basin_str = os.path.splitext(os.path.basename(cfg.basin_poly_path))[0]
# merge and reproject radiative forcing (forc) files
tif_list_forc = glob.glob("{0}/*{1}*{2}.tif".format(dir_work_moddrfs, date_dn.strftime('%Y%j'), "forcing"))
tif_out_forc = 'data\working\moddrfs\MOD09GA_' + date_str + '_{0}_forc.tif'
try:
rasterio_raster_merge(tif_list_forc, tif_out_forc.format("ext"))
logger.info("org_moddrfs: merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'forcing'))
except:
logger.error("org_moddrfs: error merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'forcing'))
try:
rasterio_raster_reproject(tif_out_forc.format("ext"), tif_out_forc.format(proj_str), cfg.proj, nodata=2500)
logger.info("org_moddrfs: reprojecting {} to {}".format('forcing', date_dn.strftime('%Y-%m-%d')))
except:
logger.error("org_moddrfs: error reprojecting {} to {}".format(tif_out_forc.format("ext"), tif_out_forc.format(proj_str), cfg.proj))
# merge and reproject grain size files (grnsz)
tif_list_grnsz = glob.glob("{0}/*{1}*{2}.tif".format(dir_work_moddrfs, date_dn.strftime('%Y%j'), "drfs.grnsz"))
tif_out_grnsz = 'data\working\moddrfs\MOD09GA_' + date_str + '_{0}_grnsz.tif'
try:
rasterio_raster_merge(tif_list_grnsz, tif_out_grnsz.format("ext"))
logger.info("org_moddrfs: merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'drfs.grnsz'))
except:
logger.error("org_moddrfs: error merging {} {} tiles".format(date_dn.strftime('%Y-%m-%d'), 'drfs.grnsz'))
try:
rasterio_raster_reproject(tif_out_grnsz.format("ext"), tif_out_grnsz.format(proj_str), cfg.proj, nodata=2500)
logger.info("org_moddrfs: reprojecting {} to {}".format('drfs.grnsz', date_dn.strftime('%Y-%m-%d')))
except:
logger.error("org_moddrfs: error reprojecting {} to {}".format(tif_out_grnsz.format("ext"), tif_out_grnsz.format(proj_str), cfg.proj))
# clip to basin polygon (forc)
tif_list = glob.glob("{0}/*{1}*{2}*{3}.tif".format(dir_work_moddrfs, date_str, proj_str, "forc"))
for tif in tif_list:
tif_out = dir_work_moddrfs + "moddrfs_forc_" + date_str + "_" + basin_str + ".tif"
try:
gdal_raster_clip(cfg.basin_poly_path, tif, tif_out, cfg.proj, cfg.proj, 2500)
logger.info("org_moddrfs: clipping {} to {}".format(tif, tif_out))
except:
logger.error("org_moddrfs: error clipping {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_moddrfs: error finding tifs to clip")
# clip to basin polygon (grnsz)
tif_list = glob.glob("{0}/*{1}*{2}*{3}.tif".format(dir_work_moddrfs, date_str, proj_str, "grnsz"))
for tif in tif_list:
tif_out = dir_work_moddrfs + "moddrfs_grnsz_" + date_str + "_" + basin_str + ".tif"
try:
gdal_raster_clip(cfg.basin_poly_path, tif, tif_out, cfg.proj, cfg.proj, 2500)
logger.info("org_moddrfs: clipping {} to {}".format(tif, tif_out))
except:
logger.error("org_moddrfs: error clipping {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_moddrfs: error finding tifs to clip")
# set filenames
file_forc = "moddrfs_forc_" + date_str + "_" + basin_str + ".tif"
file_grnsz = "moddrfs_grnsz_" + date_str + "_" + basin_str + ".tif"
# open connection to rasters
rast_forc = rasterio.open(dir_work_moddrfs + file_forc)
rast_grnsz = rasterio.open(dir_work_moddrfs + file_grnsz)
# read in raster data to np array
forc = rast_forc.read(1)
# set pixels > 100 to nodata value (250)
forc_masked = np.where(forc>1000, 2500, forc)
# read in raster data to np array
grnsz = rast_grnsz.read(1)
# set pixels > 100 to nodata value (250)
grnsz_masked = np.where(grnsz>1000, 2500, grnsz)
# write out masked files (forc)
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = rast_forc.profile
profile.update(
dtype=rasterio.uint16,
count=1)
with rasterio.open(cfg.dir_db + file_forc, 'w', **profile) as dst:
dst.write(forc_masked,indexes=1)
# write out masked files (grnsz)
with rasterio.Env():
# Write an array as a raster band to a new 8-bit file. For
# the new file's profile, we start with the profile of the source
profile = rast_grnsz.profile
profile.update(
dtype=rasterio.uint16,
count=1)
with rasterio.open(cfg.dir_db + file_grnsz, 'w', **profile) as dst:
dst.write(grnsz_masked,indexes=1)
# close datasets
rast_forc.close()
rast_grnsz.close()
# calculate zonal statistics and export data
tif_list = glob.glob("{0}/{1}*{2}*{3}*.tif".format(cfg.dir_db, 'moddrfs', date_str, basin_str))
for tif in tif_list:
file_meta = os.path.basename(tif).replace('.', '_').split('_')
if 'poly' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_poly_path, tif, stats=['median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_moddrfs: computing zonal statistics")
except:
logger.error("org_moddrfs: error computing poly zonal statistics")
try:
frames = [cfg.basin_poly, tif_stats_df]
basin_poly_stats = pd.concat(frames, axis=1)
logger.info("org_moddrfs: merging poly zonal statistics")
except:
logger.error("org_moddrfs: error merging zonal statistics")
if 'geojson' in cfg.output_format:
try:
geojson_out = os.path.splitext(tif)[0] + "_poly.geojson"
basin_poly_stats.to_file(geojson_out, driver='GeoJSON')
logger.info("org_moddrfs: writing {0}".format(geojson_out))
except:
logger.error("org_moddrfs: error writing {0}".format(geojson_out))
if 'csv' in cfg.output_format:
try:
csv_out = os.path.splitext(tif)[0] + "_poly.csv"
basin_poly_stats_df = pd.DataFrame(basin_poly_stats.drop(columns = 'geometry'))
basin_poly_stats_df.insert(0, 'Source', file_meta[0])
basin_poly_stats_df.insert(0, 'Type', file_meta[1])
basin_poly_stats_df.insert(0, 'Date', dt.datetime.strptime(file_meta[2], '%Y%m%d').strftime('%Y-%m-%d %H:%M'))
basin_poly_stats_df.to_csv(csv_out, index=False)
logger.info("org_moddrfs: writing {0}".format(csv_out))
except:
logger.error("org_moddrfs: error writing {0}".format(csv_out))
if 'points' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_points_path, tif, stats=['median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_moddrfs: computing points zonal statistics")
except:
logger.error("org_moddrfs: error computing points zonal statistics")
try:
frames = [cfg.basin_points, tif_stats_df]
basin_points_stats = pd.concat(frames, axis=1)
logger.info("org_moddrfs: merging zonal statistics")
except:
logger.error("org_moddrfs: error merging zonal statistics")
if 'geojson' in cfg.output_format:
try:
geojson_out = os.path.splitext(tif)[0] + "_points.geojson"
basin_points_stats.to_file(geojson_out, driver='GeoJSON')
logger.info("org_snodas: writing {0}".format(geojson_out))
except:
logger.error("org_snodas: error writing {0}".format(geojson_out))
if 'csv' in cfg.output_format:
try:
csv_out = os.path.splitext(tif)[0] + "_points.csv"
basin_points_stats_df = pd.DataFrame(basin_points_stats.drop(columns = 'geometry'))
basin_points_stats_df.insert(0, 'Source', file_meta[0])
basin_points_stats_df.insert(0, 'Type', file_meta[1])
basin_points_stats_df.insert(0, 'Date', dt.datetime.strptime(file_meta[2], '%Y%m%d').strftime('%Y-%m-%d %H:%M'))
basin_points_stats_df.to_csv(csv_out, index=False)
logger.info("org_moddrfs: writing {0}".format(csv_out))
except:
logger.error("org_moddrfs: error writing {0}".format(csv_out))
# clean up working directory
for file in os.listdir(dir_work_moddrfs):
file_path = dir_work_moddrfs + file
try:
os.remove(file_path)
logger.info("org_moddrfs: removing {}".format(file_path))
except:
logger.error("org_moddrfs: error removing {}".format(file_path))
def download_modis(cfg, date_dn):
""" Download modis snow data
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
date in local date
Returns
-------
None
Notes
-----
"""
# create working directory
dir_work_d = cfg.dir_work + 'modis/'
if not os.path.isdir(dir_work_d):
os.makedirs(dir_work_d)
# format date into UTC and API format
# assumes local time zone for date_dn
date_dn_ltz = date_dn.replace(tzinfo=get_localzone())
date_dn_utc_start = date_dn_ltz.astimezone(pytz.utc)
date_dn_start = date_dn_utc_start.isoformat()[0:16] + ':00Z'
date_dn_utc_end = date_dn_utc_start + dt.timedelta(hours=24)
date_dn_end = date_dn_utc_end.isoformat()[0:16] + ':00Z'
# format bounding box
basin_poly_bbox_tmp = [str(element) for element in cfg.basin_poly_bbox]
lonlatod = [1, 0, 3, 2]
basin_poly_bbox_tmp2 = [basin_poly_bbox_tmp[i] for i in lonlatod]
basin_poly_bbox_fmt = ",".join(basin_poly_bbox_tmp2)
short_name_aqua = 'MYD10A2'
short_name_terra = 'MOD10A2'
version = '6'
polygon = ''
filename_filter = ''
# search for aqua data
try:
url_list_aqua = cmr_search(short_name_aqua, version, date_dn_start,
date_dn_end, bounding_box=basin_poly_bbox_fmt,
polygon=polygon, filename_filter=filename_filter)
except:
url_list_aqua = []
# search for terra data
try:
url_list_terra = cmr_search(short_name_terra, version, date_dn_start,
date_dn_end, bounding_box=basin_poly_bbox_fmt,
polygon=polygon, filename_filter=filename_filter)
except:
url_list_terra = []
# combine data results together
url_list = url_list_aqua + url_list_terra
print(url_list)
if len(url_list) > 0:
credentials = '{0}:{1}'.format(cfg.username_earthdata, cfg.password_earthdata)
credentials = base64.b64encode(credentials.encode('ascii')).decode('ascii')
for index, url in enumerate(url_list, start=1):
file_name = url.split('/')[-1]
file_path = dir_work_d + file_name
try:
req = Request(url)
if credentials:
req.add_header('Authorization', 'Basic {0}'.format(credentials))
opener = build_opener(HTTPCookieProcessor())
data = opener.open(req).read()
open(file_path, 'wb').write(data)
except HTTPError as e:
logger.info(('HTTP error {0}, {1}'.format(e.code, e.reason)))
except URLError as e:
logger.info(('URL error: {0}'.format(e.reason)))
except IOError:
raise
else:
logger.info("download_modis: no data found")
def batch_swann(cfg, date_list, time_int):
"""downloads and processes swann data
Parameters
---------
date_list: list of datetime dates
dates to retrieve data
Returns
-------
None
Notes
-----
calls 'download_swann_arc', 'download_swann_rt', 'org_swann'
hard-coded parameters found from this reference -
https://nsidc.org/data/nsidc-0719/versions/1
"""
# NSIDC short name for dataset
short_name = 'NSIDC-0719'
# look at data avaiable through NSIDC archive
params = {
'short_name': short_name
}
cmr_collections_url = 'https://cmr.earthdata.nasa.gov/search/collections.json'
response = requests.get(cmr_collections_url, params=params)
results = json.loads(response.content)
# start date of archive data
start_date_ds = dt.datetime.strptime([el['time_start'] for el in results['feed']['entry']][0][0:10], '%Y-%m-%d')
# end date of archive data
end_date_ds = dt.datetime.strptime([el['time_end'] for el in results['feed']['entry']][0][0:10], '%Y-%m-%d')
# build list of data available
date_list_ds = pd.date_range(start_date_ds, end_date_ds, freq=time_int).tolist()
# find dates requested avaiable in archive
date_list_arc = lint(date_list_ds, date_list)
# get unique years as data are stored as water year netCDF
year_list_arc = set([dt.datetime.strftime(i, "%Y") for i in date_list_arc])
# build pandas dataframe of dates, year, month, and wyear for proc netCDF
arc_dt = pd.DataFrame({'date':date_list_arc})
arc_dt['year'] = arc_dt['date'].dt.strftime('%Y')
arc_dt['month'] = arc_dt['date'].dt.strftime('%m')
arc_dt['wyear'] = arc_dt.apply(lambda x: wyear_pd(x), axis=1)
# download archive netCDF data
for year_dn in year_list_arc:
error_flag = False
try:
download_swann_arc(cfg, year_dn)
except:
logger.info("download_swann_arc: error downloading swann for '{}'".format(year_dn))
error_flag = True
if error_flag is False:
# build list of dates for netCDF file
date_list_nc = pd.to_datetime(arc_dt[arc_dt.wyear == int(year_dn)].date.to_numpy())
for date_dn in date_list_nc:
try:
org_swann(cfg, date_dn, ftype = 'arc')
except:
logger.info("org_swann: error processing swann for '{}'".format(date_dn))
# find dates requested available in real-time
date_list_rt = ldif(date_list_arc, date_list)
# download real-time netCDF data
for date_dn in date_list_rt:
error_flag = False
try:
download_swann_rt(cfg, date_dn)
except:
logger.info("download_swann_rt: error downloading swann` for '{}'".format(date_dn))
error_flag = True
if error_flag is False:
try:
org_swann(cfg, date_dn, ftype = 'rt')
except:
logger.info("org_swann: error processing swann for '{}'".format(date_dn))
def download_swann_arc(cfg, year_dn, overwrite_flag = False):
""" Download SWANN snow data from NSIDC archive
Parameters
---------
cfg ():
config_params Class object
year_dn: datetime
year format
overwrite_flag: boolean
True : overwrite existing files
Returns
-------
None
Notes
-----
called from 'batch_swann'
"""
site_url = cfg.host_snodas + cfg.dir_ftp_swann_arc
nc_name = "4km_SWE_Depth_WY" + ("{}_v01.nc".format(year_dn))
nc_url = site_url + nc_name
dir_work_swann = cfg.dir_work + 'swann/'
nc_path = dir_work_swann + nc_name
if not os.path.isdir(cfg.dir_work):
os.makedirs(cfg.dir_work)
if not os.path.isdir(dir_work_swann):
os.makedirs(dir_work_swann)
if os.path.isfile(nc_path) and overwrite_flag:
os.remove(nc_path)
if not os.path.isfile(nc_path):
logger.info("download_swann_arc: downloading {}".format(year_dn))
logger.info("download_swann_arc: downloading from {}".format(nc_url))
logger.info("download_swann_arc: downloading to {}".format(nc_path))
try:
urllib.request.urlretrieve(nc_url, nc_path)
except IOError as e:
logger.error("download_swann_arc: error downloading {}".format(year_dn))
logging.error(e)
def org_swann(cfg, date_dn, ftype):
""" Organize SWANN snow data
Parameters
---------
cfg ():
config_params Class object
date_dn: datetime
'%Y-%m-%d' format
ftype: string
arc: archive data file
rt: real-time data file
Returns
-------
None
Notes
-----
called from 'batch_swann'
"""
year_dn = wyear_dt(date_dn)
dir_work_swann = cfg.dir_work + 'swann/'
if ftype == 'arc':
nc_name = "4km_SWE_Depth_WY" + ("{}_v01.nc".format(year_dn))
elif ftype == 'rt':
nc_name = "4km_SWE_Depth_" + ("{}_v01.nc".format(date_dn.strftime('%Y%m%d')))
else:
logger.error("org_swann: invalid type {}".format(type))
nc_path = dir_work_swann + nc_name
date_str = str(date_dn.strftime('%Y%m%d'))
chr_rm = [":"]
proj_str = ''.join(i for i in cfg.proj if not i in chr_rm)
dtype_out = 'float64'
basin_str = os.path.splitext(os.path.basename(cfg.basin_poly_path))[0]
crs_raw = 'EPSG:4326'
# open dataset with xarray
with xr.open_dataset(nc_path) as file_nc:
# set coordinate system to 'WGS84 (EPSG:4326)'
swann_xr = file_nc.rio.write_crs(4326, inplace=True)
file_nc.close()
# extract swe data for 'date_dn' and save as geotif
swe_swann_xr = swann_xr["SWE"].sel(
time=np.datetime64(date_dn))
swe_swann_xr_date_dn = swe_swann_xr.rio.set_spatial_dims(x_dim='lon', y_dim='lat', inplace=True)
swe_file_path = dir_work_swann + 'swann_swe_' + date_str + '.tif'
swe_swann_xr_date_dn.rio.to_raster(swe_file_path)
# extract snow depth data for 'date_dn' and save as geotif
sd_swann_xr = swann_xr["DEPTH"].sel(
time=np.datetime64(date_dn))
sd_swann_xr_date_dn = swe_swann_xr.rio.set_spatial_dims(x_dim='lon', y_dim='lat', inplace=True)
sd_file_path = dir_work_swann + 'swann_sd_' + date_str + '.tif'
sd_swann_xr_date_dn.rio.to_raster(sd_file_path)
# close xr dataset
swann_xr.close()
# reproject geotif
tif_list = glob.glob("{0}/*{1}*.tif".format(dir_work_swann, date_str))
for tif in tif_list:
tif_out = os.path.splitext(tif)[0] + "_" + proj_str + ".tif"
try:
gdal_raster_reproject(tif, tif_out, cfg.proj, crs_raw)
# rasterio_raster_reproject(tif, tif_out, cfg.proj)
logger.info("org_swann: reprojecting {} to {}".format(tif, tif_out))
except:
logger.error("org_swann: error reprojecting {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_swann: error finding tifs to reproject")
# clip to basin polygon
tif_list = glob.glob("{0}/*{1}*{2}.tif".format(dir_work_swann, date_str, proj_str))
for tif in tif_list:
tif_out = os.path.splitext(tif)[0] + "_" + basin_str + ".tif"
try:
gdal_raster_clip(cfg.basin_poly_path, tif, tif_out, cfg.proj, cfg.proj, -9999)
logger.info("org_swann: clipping {} to {}".format(tif, tif_out))
except:
logger.error("org_swann: error clipping {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_swann: error finding tifs to clip")
# convert units
if cfg.unit_sys == 'english':
calc_exp = '(+ 1 (* .0393701 (read 1)))' # inches
if cfg.unit_sys == 'metric':
calc_exp = '(read 1)' # keep units in mm
# SWE
tif_list = glob.glob("{0}/*{1}*{2}*{3}*{4}.tif".format(dir_work_swann, 'swann_swe', date_str, proj_str, basin_str))
for tif in tif_list:
tif_int = os.path.splitext(tif)[0] + "_" + dtype_out + ".tif"
tif_out = cfg.dir_db + "swann_swe_" + date_str + "_" + basin_str + "_" + cfg.unit_sys + ".tif"
try:
rio_dtype_conversion(tif, tif_int, dtype_out)
rio_calc(tif_int, tif_out, calc_exp)
logger.info("org_swann: calc {} {} to {}".format(calc_exp, tif, tif_out))
except:
logger.error("org_swann: error calc {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_swann: error finding tifs to calc")
# Snow Depth
tif_list = glob.glob("{0}/*{1}*{2}*{3}*{4}.tif".format(dir_work_swann, 'swann_sd', date_str, proj_str, basin_str))
for tif in tif_list:
tif_int = os.path.splitext(tif)[0] + "_" + dtype_out + ".tif"
tif_out = cfg.dir_db + "swann_snowdepth_" + date_str + "_" + basin_str + "_" + cfg.unit_sys + ".tif"
try:
rio_dtype_conversion(tif, tif_int, dtype_out)
rio_calc(tif_int, tif_out, calc_exp)
logger.info("org_swann: calc {} {} to {}".format(calc_exp, tif, tif_out))
except:
logger.error("org_swann: error calc {} to {}".format(tif, tif_out))
if not tif_list:
logger.error("org_swann: error finding tifs to calc")
# calculate zonal statistics and export data
tif_list = glob.glob("{0}/{1}*{2}*{3}*{4}.tif".format(cfg.dir_db, 'swann', date_str, basin_str, cfg.unit_sys))
for tif in tif_list:
file_meta = os.path.basename(tif).replace('.', '_').split('_')
if 'poly' in cfg.output_type:
try:
tif_stats = zonal_stats(cfg.basin_poly_path, tif, stats=['min', 'max', 'median', 'mean'], all_touched=True)
tif_stats_df = pd.DataFrame(tif_stats)
logger.info("org_swann: computing zonal statistics")
except:
logger.error("org_swann: error computing poly zonal statistics")
try:
frames = [cfg.basin_poly, tif_stats_df]
basin_poly_stats = | pd.concat(frames, axis=1) | pandas.concat |
import json
import csv
import numpy as np
from stockstats import StockDataFrame
import pandas as pd
import mplfinance as mpf
import seaborn as sn
import matplotlib.pyplot as plt
def load_secrets():
"""
Load data from secret.json as JSON
:return: Dict.
"""
try:
with open('secret.json', 'r') as fp:
data = json.load(fp)
return data
except Exception as e:
raise e
def to_csv(filename, input_list: list):
"""
:param input_list: List of dict
:param filename: filename.csv
:return:
"""
rows = []
keys, values = [], []
for data in input_list:
keys, values = [], []
for key, value in data.items():
keys.append(key)
values.append(value)
rows.append(values)
with open(filename, "w") as outfile:
csvwriter = csv.writer(outfile)
csvwriter.writerow(keys)
for row in rows:
csvwriter.writerow(row)
class TechnicalAnalysisV2:
"""
Class to perform technical analysis on input stock data.
The input data should have columns date, open, high, low, close, volume etc.
"""
def __init__(self, data=None, name: str = None):
self.data = pd.DataFrame(data)
self.name = name
@staticmethod
def __get_trend(data, stride=1):
"""
Get trend from a given data.
:param data: Price data
:param stride: Neighbour distance to consider for determining trend
:return: Trend list
"""
if stride < 1:
stride = 1
trend = []
stride_list = [i for i in range(stride)]
stride_list.extend([(i - (len(data) - 1)) * -1 for i in range(stride)])
for index, value in enumerate(data):
if index in stride_list:
trend.append('-')
continue
prev_value = data[index - stride]
next_value = data[index + stride]
if prev_value <= value < next_value or prev_value < value <= next_value:
trend.append('A')
elif prev_value >= value > next_value or prev_value > value >= next_value:
trend.append('D')
elif prev_value < value > next_value:
trend.append('SH')
elif prev_value > value < next_value:
trend.append('SL')
else:
trend.append('-')
return trend
def get_swing_data(self, stride, type='close', data=None, ramp=False, swing=True):
"""
Get actions and swing data for given data
:param data: Price data
:param stride: Neighbour distance to consider for determining trend
:param type: Open, high, low or close.
:param ramp: Consider ascend and descend separately
:param swing: If True, considers swing high and low and movement as separate, else Swing low and ascending in
one and swing high and descending in another
:return: Dict {actions, swing high, swing low}
"""
if data:
data = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains the Evaluator class.
Part of symenergy. Copyright 2018 authors listed in AUTHORS.
"""
import os
import sys
import gc
import py_compile
import sympy as sp
import numpy as np
from importlib import reload
from multiprocessing import current_process
import pandas as pd
import itertools
import random
from hashlib import md5
from functools import partial
import time
from sympy.utilities.lambdify import lambdastr
import symenergy
from symenergy.auxiliary.parallelization import parallelize_df
from symenergy.auxiliary.parallelization import log_time_progress
from symenergy import multiproc_params
from symenergy.auxiliary.parallelization import get_default_nworkers
from symenergy.auxiliary.parallelization import MP_COUNTER, MP_EMA
from symenergy.auxiliary import parallelization
from symenergy.auxiliary.decorators import hexdigest
from symenergy.auxiliary.io import EvaluatorCache
from symenergy.core.model import Model
from symenergy import _get_logger
logger = _get_logger(__name__)
pd.options.mode.chained_assignment = None
THRESHOLD_UNEXPECTED_ZEROS = 1e-9
def log_info_mainprocess(logstr):
if current_process().name == 'MainProcess':
logger.info(logstr)
def _eval(func, df_x):
'''
Vectorized evaluation
Parameters
----------
func : pandas.Series
df_x : pandas.DataFrame
'''
new_index = df_x.set_index(df_x.columns.tolist()).index
data = func.iloc[0](*df_x.values.T)
if not isinstance(data, np.ndarray): # constant value --> expand
data = np.ones(df_x.iloc[:, 0].values.shape) * data
res = pd.DataFrame(data, index=new_index)
MP_COUNTER.increment()
return res
class Expander():
'''
Evaluates the functions in the `lambd_func` column of `df` with all
values of the x_vals dataframe.
'''
def __init__(self, x_vals):
self.df_x_vals = x_vals
def _expand(self, df):
logger.warning('_call_eval: Generating dataframe with length %d' % (
len(df) * len(self.df_x_vals)))
if not multiproc_params['nworkers'] or multiproc_params['nworkers'] == 1:
df_result = self._call_eval(df)
else:
self.nparallel = len(df)
df_result = parallelize_df(df=df[['func', 'idx', 'lambd_func']],
func=self._wrapper_call_eval)
return df_result.rename(columns={0: 'lambd'}).reset_index()
def _call_eval(self, df):
df_result = (df.groupby(['func', 'idx'])
.lambd_func
.apply(_eval, df_x=self.df_x_vals))
return df_result
def _restore_columns(self, df_result, df):
ind = ['func', 'idx']
cols = ['is_positive']
return df_result.join(df.set_index(ind)[cols], on=ind)
def _wrapper_call_eval(self, df):
name, ntot = 'Vectorized evaluation', self.nparallel
return log_time_progress(self._call_eval)(self, df, name, ntot)
def run(self, df):
df_result = self._expand(df)
return self._restore_columns(df_result, df)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class EvalAnalysis():
'''
Identifies optimal and infeasible solutions.
'''
def __init__(self, x_vals, map_col_func, dict_cap, dict_constrs,
tolerance, drop_non_optimum):
self.x_vals = x_vals
self.map_col_func = map_col_func
self.tolerance = tolerance
self.drop_non_optimum = drop_non_optimum
self.dict_cap = dict_cap
self.dict_constrs = dict_constrs
self.x_name = list(map(lambda x: x.name, self.x_vals))
def run(self, df):
if not multiproc_params['nworkers'] or multiproc_params['nworkers'] == 1:
df_exp = self._evaluate_by_x_new(df)
else:
group_params = self._get_optimum_group_params()
df_split = [df for _, df in (df.groupby(group_params))]
self.nparallel = len(df_split)
df_exp = parallelize_df(df=df_split,
func=self._wrapper_call_evaluate_by_x)
return df_exp
def _get_optimum_group_params(self):
'''
Identify groupby columns to get closest to nchunks.
evaluate_by_x must be applied to full sets of constraint
combinations, since constraint combinations are to be compared.
'''
nchunks = get_default_nworkers() * multiproc_params['chunks_per_worker']
param_combs = \
itertools.chain.from_iterable(itertools.combinations(self.x_vals, i)
for i in range(1, len(self.x_vals) + 1))
len_param_combs = {params: np.prod(list(len(self.x_vals[par])
for par in params))
for params in param_combs}
dev_param_combs = {key: abs((len_ - nchunks) / nchunks)
for key, len_ in len_param_combs.items()}
group_params = min(dev_param_combs, key=lambda x: dev_param_combs[x])
group_params = list(map(lambda x: x.name, group_params))
return group_params
def _get_map_sanitize(self, df):
'''
Identify zero values with non-binding zero constraints.
'''
map_ = pd.Series([False] * len(df), index=df.index)
for col, func in self.map_col_func:
map_new = ((df.func == func)
& df.idx.isin(self.dict_constrs[col])
& (df['lambd'].abs() <= THRESHOLD_UNEXPECTED_ZEROS))
map_ |= map_new
return map_
def _evaluate_by_x_new(self, df):
MP_COUNTER.increment()
log_info_mainprocess('Sanitizing unexpected zeros.')
df['map_sanitize'] = self._get_map_sanitize(df)
df.loc[df.map_sanitize, 'lambd'] = np.nan
log_info_mainprocess('Getting mask valid solutions.')
mask_valid = self._get_mask_valid_solutions(df)
df = df.join(mask_valid, on=mask_valid.index.names)
df.loc[:, 'lambd'] = df.lambd.astype(float)
log_info_mainprocess('Identify cost optimum.')
df.loc[:, 'is_optimum'] = self.init_cost_optimum(df)
if self.drop_non_optimum:
df = df.loc[df.is_optimum]
return df
# def _call_evaluate_by_x(self, df):
# return self._evaluate_by_x_new(df)
def _wrapper_call_evaluate_by_x(self, df):
name, ntot = 'Evaluate', self.nparallel
return log_time_progress(self._evaluate_by_x_new)(self, df, name, ntot)
def _get_mask_valid_solutions(self, df, return_full=False):
'''
Obtain a mask identifying valid solutions for each parameter value set
and constraint combination.
Indexed by x_name and constraint combination idx, *not* by function.
Parameters
----------
df : pandas.DataFrame
return_full : bool
if True, returns the full mask for debugging, i.e. indexed by
functions prior to consolidation
'''
df = df.copy() # this is important, otherwise we change the x_vals
mask_valid = pd.Series(True, index=df.index)
mask_valid &= self._get_mask_valid_positive(df)
mask_valid &= self._get_mask_valid_capacity(df.copy())
df.loc[:, 'mask_valid'] = mask_valid
if return_full: # for debugging
return df
# consolidate mask by constraint combination and x values
index = self.x_name + ['idx']
mask_valid = df.pivot_table(index=index, values='mask_valid',
aggfunc=min)
return mask_valid
def _get_mask_valid_positive(self, df):
''' Called by _get_mask_valid_solutions '''
msk_pos = df.is_positive == 1
mask_positive = | pd.Series(True, index=df.index) | pandas.Series |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_not():
target = mock.MagicMock()
target.temp = {}
#run except on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
notAlgo = algos.Not(runOnDateAlgo)
target.now = pd.to_datetime('2018-01-01')
assert notAlgo(target)
target.now = pd.to_datetime('2018-01-02')
assert not notAlgo(target)
def test_or():
target = mock.MagicMock()
target.temp = {}
#run on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
runOnDateAlgo2 = algos.RunOnDate(pd.to_datetime('2018-01-03'))
runOnDateAlgo3 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
runOnDateAlgo4 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
orAlgo = algos.Or([runOnDateAlgo, runOnDateAlgo2, runOnDateAlgo3, runOnDateAlgo4])
#verify it returns false when neither is true
target.now = pd.to_datetime('2018-01-01')
assert not orAlgo(target)
# verify it returns true when the first is true
target.now = pd.to_datetime('2018-01-02')
assert orAlgo(target)
# verify it returns true when the second is true
target.now = pd.to_datetime('2018-01-03')
assert orAlgo(target)
# verify it returns true when both algos return true
target.now = pd.to_datetime('2018-01-04')
assert orAlgo(target)
def test_TargetVol():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=7)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data.loc[dts[0],'c1'] = 95
data.loc[dts[1],'c1'] = 105
data.loc[dts[2],'c1'] = 95
data.loc[dts[3],'c1'] = 105
data.loc[dts[4],'c1'] = 95
data.loc[dts[5],'c1'] = 105
data.loc[dts[6],'c1'] = 95
# low vol c2
data.loc[dts[0], 'c2'] = 99
data.loc[dts[1], 'c2'] = 101
data.loc[dts[2], 'c2'] = 99
data.loc[dts[3], 'c2'] = 101
data.loc[dts[4], 'c2'] = 99
data.loc[dts[5], 'c2'] = 101
data.loc[dts[6], 'c2'] = 99
targetVolAlgo = algos.TargetVol(
0.1,
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=1
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1':0.5, 'c2':0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'],weights['c1'])
unannualized_c2_weight = weights['c1']
targetVolAlgo = algos.TargetVol(
0.1*np.sqrt(252),
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'], weights['c1'])
assert np.isclose(unannualized_c2_weight, weights['c2'])
def test_PTE_Rebalance():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=30*4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
# low vol c2
for i,dt in enumerate(dts[:-2]):
if i % 2 == 0:
data.loc[dt,'c1'] = 95
data.loc[dt,'c2'] = 101
else:
data.loc[dt, 'c1'] = 105
data.loc[dt, 'c2'] = 99
dt = dts[-2]
data.loc[dt,'c1'] = 115
data.loc[dt,'c2'] = 97
s.setup(data)
s.update(dts[-2])
s.adjust(1000000)
s.rebalance(0.4,'c1')
s.rebalance(0.6,'c2')
wdf = pd.DataFrame(
np.zeros(data.shape),
columns=data.columns,
index=data.index
)
wdf['c1'] = 0.5
wdf['c2'] = 0.5
PTE_rebalance_Algo = bt.algos.PTE_Rebalance(
0.01,
wdf,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
assert PTE_rebalance_Algo(s)
s.rebalance(0.5, 'c1')
s.rebalance(0.5, 'c2')
assert not PTE_rebalance_Algo(s)
def test_close_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
cutoffs= pd.DataFrame( { 'date' : [ dts[1], dts[2] ] }, index = ['c1','c2'] )
algo = algos.ClosePositionsAfterDates( 'cutoffs' )
s.setup(data, cutoffs=cutoffs)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 0
assert c2.position == 0
assert c3.position == 100
assert s.perm['closed'] == set(['c1', 'c2'])
def test_roll_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
roll = pd.DataFrame( { 'date' : [ dts[1], dts[2] ], 'target' : [ 'c3', 'c1' ], 'factor' : [ 0.5, 2.0 ] }, index = ['c1','c2'] )
algo = algos.RollPositionsAfterDates( 'roll' )
s.setup(data, roll=roll)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 200 # From c2
assert c2.position == 0
assert c3.position == 100 + 50
assert s.perm['rolled'] == set(['c1', 'c2'])
def test_replay_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
transactions = pd.DataFrame( [ ( pd.Timestamp( '2009-12-01 00'), 'c1', 100, 99.5),
( pd.Timestamp( '2010-01-01 10'), 'c1', -100, 101),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50, 103)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
s.setup(data, bidoffer={}, transactions=transactions) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_replay_transactions_consistency():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
transactions = pd.DataFrame( [ ( pd.Timestamp( '2010-01-01 00'), 'c1', -100., 101.),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50., 103.)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
strategy = bt.Strategy('strategy', algos = [ algo ], children = [c1, c2])
backtest = bt.backtest.Backtest(strategy, data, name='Test',
additional_data={'bidoffer':{}, 'transactions':transactions})
out = bt.run(backtest)
t1 = transactions.sort_index(axis=1)
t2 = out.get_transactions().sort_index(axis=1)
assert t1.equals( t2 )
def test_simulate_rfq_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
rfqs = pd.DataFrame( [ ( 'A', pd.Timestamp( '2009-12-01 00'), 'c1', 100),
( 'B', pd.Timestamp( '2010-01-01 10'), 'c1', -100),
( 'C', pd.Timestamp( '2010-01-01 12'), 'c1', 75),
( 'D', pd.Timestamp( '2010-01-02 00'), 'c2', 50)
],
columns = ['id', 'Date', 'Security', 'quantity'])
rfqs = rfqs.set_index(['Date','Security'])
def model( rfqs, target ):
# Dummy model - in practice this model would rely on positions and values in target
transactions = rfqs[ ['quantity']]
prices = {'A' : 99.5, 'B' : 101, 'D':103}
transactions[ 'price' ] = rfqs.id.apply( lambda x : prices.get(x) )
return transactions.dropna()
algo = algos.SimulateRFQTransactions( 'rfqs', model )
s.setup(data, bidoffer={}, rfqs=rfqs) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_update_risk():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=False)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risk['Test'] == 0
assert c1.risk['Test'] == 0
assert c2.risk['Test'] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risk['Test'] == 600
assert c1.risk['Test'] == 100
assert c2.risk['Test'] == 500
s.update(dts[1])
assert algo( s )
assert s.risk['Test'] == 105 + 5*95
assert c1.risk['Test'] == 105
assert c2.risk['Test'] == 5*95
assert not hasattr( s, 'risks' )
assert not hasattr( c1, 'risks' )
assert not hasattr( c2, 'risks' )
def test_update_risk_history_1():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=1)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risks['Test'][0] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risks['Test'][0] == 600
s.update(dts[1])
assert algo( s )
assert s.risks['Test'][0] == 600
assert s.risks['Test'][1] == 105 + 5*95
assert not hasattr( c1, 'risks' )
assert not hasattr( c2, 'risks' )
def test_update_risk_history_2():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from greykite.common.features.timeseries_lags import build_agg_lag_df
from greykite.common.features.timeseries_lags import build_autoreg_df
from greykite.common.features.timeseries_lags import build_autoreg_df_multi
from greykite.common.features.timeseries_lags import build_lag_df
from greykite.common.features.timeseries_lags import min_max_lag_order
def test_build_lag_df():
"""Testing build_lag_df."""
df = pd.DataFrame({"x": range(10), "y": range(100, 110)})
lag_info = build_lag_df(df=df, value_col="x", max_order=3, orders=None)
lag_df = lag_info["lag_df"]
assert list(lag_df.columns) == ["x_lag1", "x_lag2", "x_lag3"], \
"The expected column names were not found in lags dataframe (lag_df)"
assert lag_df["x_lag1"].values[2].round(0) == 1.0, (
"lag value is not correct")
assert lag_df["x_lag2"].values[7].round(0) == 5.0, (
"lag value is not correct")
# example with orders provided
lag_info = build_lag_df(
value_col="x",
df=df,
max_order=None,
orders=[1, 2, 5])
lag_df = lag_info["lag_df"]
assert list(lag_df.columns) == ["x_lag1", "x_lag2", "x_lag5"], \
"The expected column names were not found in lags dataframe (lag_df)"
assert lag_df["x_lag1"].values[2].round(0) == 1.0, (
"lag value is not correct")
assert lag_df["x_lag2"].values[7].round(0) == 5.0, (
"lag value is not correct")
assert lag_df["x_lag5"].values[8].round(0) == 3.0, (
"lag value is not correct")
def test_build_lag_df_exception():
df = pd.DataFrame({"x": range(10), "y": range(100, 110)})
with pytest.raises(
ValueError,
match="at least one of 'max_order' or 'orders' must be provided"):
build_lag_df(
value_col="x",
df=df,
max_order=None,
orders=None)
def test_build_lag_df_col_names_only():
"""Testing for the case where no df is passed and only col_names are generated."""
lag_info = build_lag_df(
value_col="x",
df=None,
max_order=3,
orders=None)
col_names = lag_info["col_names"]
assert col_names == ["x_lag1", "x_lag2", "x_lag3"], (
"The expected column names were not found in lags dataframe (lag_df)")
assert lag_info["lag_df"] is None, "returned lag_df must be None"
def test_min_max_lag_order():
"""Testing max_lag_order for various cases."""
# case with no lags
agg_lag_dict = {
"orders_list": [],
"interval_list": []}
lag_dict = {
"orders": None,
"max_order": None
}
min_max_order = min_max_lag_order(
lag_dict=lag_dict,
agg_lag_dict=agg_lag_dict)
min_order = min_max_order["min_order"]
max_order = min_max_order["max_order"]
assert max_order == 0, (
"max_order is not calculated correctly")
assert min_order == np.inf, (
"min_order is not calculated correctly")
# case with lag_dict only including lags
agg_lag_dict = {
"orders_list": [],
"interval_list": []}
lag_dict = {
"orders": [2, 3, 13],
"max_order": None
}
min_max_order = min_max_lag_order(
lag_dict=lag_dict,
agg_lag_dict=agg_lag_dict)
min_order = min_max_order["min_order"]
max_order = min_max_order["max_order"]
assert max_order == 13, (
"max_order is not calculated correctly")
assert min_order == 2, (
"max_order is not calculated correctly")
# `max_order` below is expected to be ignored
# since `orders` is provided
lag_dict = {
"orders": [2, 3, 13],
"max_order": 20
}
agg_lag_dict = None
min_max_order = min_max_lag_order(
lag_dict=lag_dict,
agg_lag_dict=agg_lag_dict)
min_order = min_max_order["min_order"]
max_order = min_max_order["max_order"]
assert max_order == 13, (
"max_order is not calculated correctly")
assert min_order == 2, (
"max_order is not calculated correctly")
# case with agg_lag_dict inclduing lags only
agg_lag_dict = {
"orders_list": [[1, 2, 3, 16]],
"interval_list": [[1, 2]]}
lag_dict = {
"orders": None,
"max_order": None
}
min_max_order = min_max_lag_order(
lag_dict=lag_dict,
agg_lag_dict=agg_lag_dict)
min_order = min_max_order["min_order"]
max_order = min_max_order["max_order"]
assert max_order == 16, (
"max_order is not calculated correctly")
assert min_order == 1, (
"max_order is not calculated correctly")
# case with both agg_lag_dict and lag_dict prescribing lags
agg_lag_dict = {
"orders_list": [[1, 2, 3]],
"interval_list": [(2, 3), (2, 5)]}
lag_dict = {
"orders": [2, 3, 8],
"max_order": None
}
min_max_order = min_max_lag_order(
lag_dict=lag_dict,
agg_lag_dict=agg_lag_dict)
min_order = min_max_order["min_order"]
max_order = min_max_order["max_order"]
assert max_order == 8, (
"max_order is not calculated correctly")
assert min_order == 1, (
"min_order is not calculated correctly")
# case with max_order appearing in lag_dict["max_order"]
agg_lag_dict = {
"orders_list": [[2, 3]],
"interval_list": [(3, 6), (3, 10)]}
lag_dict = {
"orders": None,
"max_order": 18
}
min_max_order = min_max_lag_order(
lag_dict=lag_dict,
agg_lag_dict=agg_lag_dict)
min_order = min_max_order["min_order"]
max_order = min_max_order["max_order"]
assert max_order == 18, (
"max_order is not calculated correctly")
assert min_order == 1, (
"min_order is not calculated correctly")
def test_build_agg_lag_df():
"""Testing build_agg_lag_df."""
df = pd.DataFrame({
"x": [1, 5, 6, 7, 8, -1, -10, -19, -20, 10],
"y": range(10)})
agg_lag_info = build_agg_lag_df(
value_col="x",
df=df,
orders_list=[[1, 2, 5], [1, 3, 8], [2, 3, 4]],
interval_list=[(1, 5), (1, 8)],
agg_func=np.mean,
agg_name="avglag")
agg_lag_df = agg_lag_info["agg_lag_df"]
assert list(agg_lag_df.columns) == [
"x_avglag_1_2_5",
"x_avglag_1_3_8",
"x_avglag_2_3_4",
"x_avglag_1_to_5",
"x_avglag_1_to_8"], \
"aggregated lag df does not have the correct names"
assert agg_lag_df["x_avglag_1_2_5"].values[2].round(0) == 3.0, (
"aggregated lags are not correct")
assert agg_lag_df["x_avglag_1_to_8"].values[7].round(1) == 2.3, (
"aggregated lags are not correct")
# check for Exception being raised for repeated orders
with pytest.raises(
Exception,
match="a list of orders in orders_list contains a duplicate element"):
build_agg_lag_df(
df=df,
value_col="x",
orders_list=[[1, 2, 2], [1, 3, 8], [2, 3, 4]],
interval_list=[(1, 5), (1, 8)],
agg_func=np.mean,
agg_name="avglag")
# check for Exception being raised for interval not being on length 2
with pytest.raises(
Exception,
match="interval must be a tuple of length 2"):
build_agg_lag_df(
df=df,
value_col="x",
orders_list=[[1, 2, 3], [1, 3, 8], [2, 3, 4]],
interval_list=[(1, 5), (1, 8, 9)],
agg_func=np.mean,
agg_name="avglag")
# check for Exception being raised for interval[0] <= interval[1]
# for each interval in interval_list
with pytest.raises(
Exception,
match=r"we must have interval\[0\] <= interval\[1\], for each interval in interval_list"):
build_agg_lag_df(
df=df,
value_col="x",
orders_list=[[1, 2, 3], [1, 3, 8], [2, 3, 4]],
interval_list=[(1, 5), (8, 1)],
agg_func=np.mean,
agg_name="avglag")
def test_build_agg_lag_df_col_names_only():
"""Testing build_agg_lag_df for the case where input df is not passed and
only col_names are generated"""
agg_lag_info = build_agg_lag_df(
value_col="x",
df=None,
orders_list=[[1, 2, 5], [1, 3, 8], [2, 3, 4]],
interval_list=[(1, 5), (1, 8)],
agg_func=np.mean,
agg_name="avglag")
col_names = agg_lag_info["col_names"]
assert col_names == [
"x_avglag_1_2_5",
"x_avglag_1_3_8",
"x_avglag_2_3_4",
"x_avglag_1_to_5",
"x_avglag_1_to_8"], \
"aggregated lag df does not have the correct names"
assert agg_lag_info["agg_lag_df"] is None, (
"returned agg_lag_df must be None")
def test_build_agg_lag_df_exception():
df = pd.DataFrame({"x": range(10), "y": range(100, 110)})
with pytest.raises(
ValueError,
match="at least one of 'orders_list' or 'interval_list' must be provided"):
build_agg_lag_df(
value_col="x",
df=df,
orders_list=None,
interval_list=None)
def test_build_autoreg_df():
"""Testing build_autoreg_df generic use case with no data filling."""
df = pd.DataFrame({
"x": [1, 5, 6, 7, 8, -1, -10, -19, -20, 10],
"y": range(10)})
autoreg_info = build_autoreg_df(
value_col="x",
lag_dict={"orders": [1, 2, 5]},
agg_lag_dict={
"orders_list": [[1, 2, 5], [1, 3, 8], [2, 3, 4]],
"interval_list": [(1, 5), (1, 8)]},
series_na_fill_func=None) # no filling of NAs
build_lags_func = autoreg_info["build_lags_func"]
lag_col_names = autoreg_info["lag_col_names"]
agg_lag_col_names = autoreg_info["agg_lag_col_names"]
max_order = autoreg_info["max_order"]
min_order = autoreg_info["min_order"]
lag_df_info = build_lags_func(df)
lag_df = lag_df_info["lag_df"]
agg_lag_df = lag_df_info["agg_lag_df"]
assert max_order == 8, (
"returned max_order should be 8 for the given input")
assert min_order == 1, (
"returned min_order should be 8 for the given input")
assert list(lag_df.columns) == lag_col_names
assert lag_col_names == ["x_lag1", "x_lag2", "x_lag5"], \
"The expected column names were not found in lags dataframe (lag_df)"
assert | pd.isnull(lag_df) | pandas.isnull |
import streamlit as st
import numpy as np
import pandas as pd
from matplotlib.image import imread
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import seaborn as sns
import requests
import joblib
import shap
# import streamlit.components.v1 as components
shap.initjs()
st.set_option('deprecation.showPyplotGlobalUse', False)
########################################################
# Session for the API
########################################################
def fetch(session, url):
"""Create session for the API
Args:
session : session
url (link): complete url to connect to
Returns:
result (json): result of the request to the url
"""
try:
result = session.get(url)
return result.json()
except Exception:
return {}
session = requests.Session()
########################################################
# Functions to call the EndPoints
########################################################
def client():
#Getting Client details
response = fetch(session, f"http://projetoc-scoring.herokuapp.com/api/clients")
if response:
return response["clientsId"]
else:
return "Error"
def client_details(id):
#Getting Client details
response = fetch(session,f"http://projetoc-scoring.herokuapp.com/api/clients/{id}")
if response:
return response
else:
return "Error"
def client_prediction(id):
#Getting Client prediction
response = fetch(session, f"http://projetoc-scoring.herokuapp.com/api/clients/{id}/prediction")
if response:
return response
else:
return "Error"
########################################################
# Function to load data stored on github
########################################################
@st.experimental_memo(suppress_st_warning=True)
def load_data():
"""Load data necessary for the page 2 of the dashboard.
- df_train
- df_test
- df_test_cat_features
- df_test_cat_features
Returns:
df, df_test, df_test_cat_features, df_test_num_features : DataFrame loaded
"""
df = pd.read_csv("./dashboard_data/df_train.csv")
df_test = pd.read_csv("./dashboard_data/df_test.csv")
df_test_cat_features = pd.read_csv("./dashboard_data/df_test_cat_features.csv")
df_test_num_features = pd.read_csv("./dashboard_data/df_test_num_features.csv")
return df, df_test, df_test_cat_features, df_test_num_features
@st.experimental_memo(suppress_st_warning=True)
def transform_df(df):
"""Changes the type of several features to int64 in order to be used for plotting.
Taking the absolute value of credit_downpayment to be plotted.
Replacing outliers by NaN in DAYS_EMPLOYED feature.
Args:
df (DataFrame): dataframe to be transformed
"""
#changing type of Data comparison features
df["CNT_CHILDREN"] = df["CNT_CHILDREN"].astype("int64")
df["AGE_INT"] = df["AGE_INT"].astype("int64")
#changing sign of features
df["credit_downpayment"] = abs(df["credit_downpayment"])
df['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)
df["DAYS_EMPLOYED"] = abs(df["DAYS_EMPLOYED"])
return df
########################################################
# Functions to automate the graphs
########################################################
def chart_kde(title,row,df,col,client):
"""Building KDE Charts with vertical line for client position"""
with row:
st.subheader(title)
fig,ax = plt.subplots()
sns.kdeplot(df.loc[df["TARGET"]==0,col],color="green", label = "Target == 0")
sns.kdeplot(df.loc[df["TARGET"]==1,col],color="red", label = "Target == 1")
plt.axvline(x=df.iloc[client, df.columns.get_loc(col)],ymax=0.95,color="black")
plt.legend()
st.pyplot(fig)
def chart_bar(title,row,df,col,client):
"""Building bar Charts with vertical line for client position"""
with row:
st.subheader(title)
fig,ax = plt.subplots()
data=df[["TARGET",col]]
if data[col].dtypes!="object":
data[col]=data[col].astype("str")
data1=round(data[col].loc[data["TARGET"]==1].value_counts()/data[col].loc[data["TARGET"]==1].value_counts().sum()*100,2)
data0=round(data[col].loc[data["TARGET"]==0].value_counts()/data[col].loc[data["TARGET"]==0].value_counts().sum()*100,2)
data=pd.concat([pd.DataFrame({"Pourcentage":data0,"TARGET":0}),pd.DataFrame({"Pourcentage":data1,"TARGET":1})]).reset_index().rename(columns={"index":col})
sns.barplot(data=data,x="Pourcentage", y=col, hue="TARGET", palette=["green","red"], order=sorted(data[col].unique()));
data[col]=data[col].astype("int64")
plt.axhline(y=sorted(data[col].unique()).index(df.loc[client,col]),xmax=0.95,color="black",linewidth=4)
st.pyplot(fig)
else:
data1=round(data[col].loc[data["TARGET"]==1].value_counts()/data[col].loc[data["TARGET"]==1].value_counts().sum()*100,2)
data0=round(data[col].loc[data["TARGET"]==0].value_counts()/data[col].loc[data["TARGET"]==0].value_counts().sum()*100,2)
data=pd.concat([pd.DataFrame({"Pourcentage":data0,"TARGET":0}),pd.DataFrame({"Pourcentage":data1,"TARGET":1})]).reset_index().rename(columns={"index":col})
sns.barplot(data=data,x="Pourcentage", y=col, hue="TARGET", palette=["green","red"], order=sorted(data[col].unique()));
plt.axhline(y=sorted(data[col].unique()).index(df.loc[client,col]),xmax=0.95,color="black",linewidth=4)
st.pyplot(fig)
def display_charts(df,client):
"""Plotting graphs for selected clientID """
row1_1,row1_2,row1_3 = st.columns(3)
st.write('')
row2_10,row2_2,row2_3 = st.columns(3)
st.write('')
row3_1, row3_2, row3_3 = st.columns(3)
chart_bar("Niveau d'études",row1_1, df,'NAME_EDUCATION_TYPE',client)
chart_kde("Ratio Revenu/Annuité",row1_2, df,'annuity_income_ratio',client)
chart_kde("Revenus totaux",row1_3, df,'AMT_INCOME_TOTAL',client)
chart_kde("Apport",row2_10, df,'credit_downpayment',client)
chart_kde("Durée d'activité pro.",row2_2, df,'DAYS_EMPLOYED',client)
chart_bar("Sexe",row2_3,df,'CODE_GENDER',client)
chart_bar("Propriétaire d'un véhicule",row3_1,df,'FLAG_OWN_CAR',client)
chart_bar("Répartition du statut professionel",row3_2,df,'NAME_INCOME_TYPE',client)
chart_bar("Répartition du type de logement",row3_3,df,'NAME_HOUSING_TYPE',client)
def color(pred):
"""Choosing color depending on the prediction"""
if pred=='Approved':
col='Green'
else :
col='Red'
return col
# def st_shap(plot, height=None):
# """Fonction permettant l'affichage de graphique shap values"""
# shap_html = f"<head>{shap.getjs()}</head><body>{plot.html()}</body>"
# components.html(shap_html, height=height)
@st.experimental_memo(suppress_st_warning=True)
def shap_preproc(df, df_test, df_test_cat_features, df_test_num_features):
"""Pre-processing of the data to be used to calculate SHAP Values.
Args:
df (Dataframe): df_train
df_test (Dataframe): df_test
df_test_cat_features (Dataframe): df_test_cat_features
df_test_num_features (Dataframe): df_test_num_features
Returns:
ohe_dataframe, ohe_dataframe_test, features_list_after_prepr_test : post-encoding training, testing dataframes + list of features
"""
ohe = joblib.load("./bin/ohe.joblib")
categorical_imputer = joblib.load("./bin/categorical_imputer.joblib")
simple_imputer = joblib.load("./bin/simple_imputer.joblib")
scaler = joblib.load("./bin/scaler.joblib")
#---------------------------------------------------------------------
#data pre-processing (training set)
list_cat_features = df_test_cat_features.columns.to_list()
list_num_features = df_test_num_features.columns.to_list()
#SimpleImputing (most frequent) and ohe of categorical features
cat_array = categorical_imputer.transform(df[list_cat_features])
cat_array = ohe.transform(cat_array).todense()
#SimpleImputing (median) and StandardScaling of numerical features
num_array = simple_imputer.transform(df[list_num_features])
num_array = scaler.transform(num_array)
#concatenate
X_train = np.concatenate([cat_array, num_array], axis=1)
X_train = np.asarray(X_train)
#building dataframe with post-preprocessed data (training set)
cat_features_list_after_ohe = ohe.get_feature_names_out(list_cat_features).tolist()
features_list_after_prepr = cat_features_list_after_ohe + list_num_features
ohe_dataframe = | pd.DataFrame(X_train, columns=features_list_after_prepr) | pandas.DataFrame |
# Notebook to transform OSeMOSYS output to same format as EGEDA
# Import relevant packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from openpyxl import Workbook
import xlsxwriter
import pandas.io.formats.excel
import glob
import re
# Path for OSeMOSYS output
path_output = './data/3_OSeMOSYS_output'
# Path for OSeMOSYS to EGEDA mapping
path_mapping = './data/2_Mapping_and_other'
# Where to save finalised dataframe
path_final = './data/4_Joined'
# OSeMOSYS results files
OSeMOSYS_filenames = glob.glob(path_output + "/*.xlsx")
# Reference filenames and net zero filenames
reference_filenames = list(filter(lambda k: 'reference' in k, OSeMOSYS_filenames))
netzero_filenames = list(filter(lambda y: 'net-zero' in y, OSeMOSYS_filenames))
# New 2018 data variable names
Mapping_sheets = list(pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = None).keys())[1:]
Mapping_file = pd.DataFrame()
for sheet in Mapping_sheets:
interim_map = pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = sheet, skiprows = 1)
Mapping_file = Mapping_file.append(interim_map).reset_index(drop = True)
# Moving everything from OSeMOSYS to EGEDA for TFC and TPES
Mapping_TFC_TPES = Mapping_file[Mapping_file['Balance'].isin(['TFC', 'TPES'])]
# And for transformation
Map_trans = Mapping_file[Mapping_file['Balance'] == 'TRANS'].reset_index(drop = True)
# A mapping just for i) power, ii) ref, own, sup and iii) hydrogen
Map_power = Map_trans[Map_trans['Sector'] == 'POW'].reset_index(drop = True)
Map_refownsup = Map_trans[Map_trans['Sector'].isin(['REF', 'SUP', 'OWN', 'HYD'])].reset_index(drop = True)
Map_hydrogen = Map_trans[Map_trans['Sector'] == 'HYD'].reset_index(drop = True)
# Define unique workbook and sheet combinations for TFC and TPES
Unique_TFC_TPES = Mapping_TFC_TPES.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
# Define unique workbook and sheet combinations for Transformation
Unique_trans = Map_trans.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
################################### TFC and TPES #############################################################
# Determine list of files to read based on the workbooks identified in the mapping file for REFERENCE scenario
ref_file_df = pd.DataFrame()
for i in range(len(Unique_TFC_TPES['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in reference_filenames if Unique_TFC_TPES['Workbook'].unique()[i] in entry],
'Workbook': Unique_TFC_TPES['Workbook'].unique()[i]})
ref_file_df = ref_file_df.append(_file)
ref_file_df = ref_file_df.merge(Unique_TFC_TPES, how = 'outer', on = 'Workbook')
# Determine list of files to read based on the workbooks identified in the mapping file for NET-ZERO scenario
netz_file_df = pd.DataFrame()
for i in range(len(Unique_TFC_TPES['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in netzero_filenames if Unique_TFC_TPES['Workbook'].unique()[i] in entry],
'Workbook': Unique_TFC_TPES['Workbook'].unique()[i]})
netz_file_df = netz_file_df.append(_file)
netz_file_df = netz_file_df.merge(Unique_TFC_TPES, how = 'outer', on = 'Workbook')
# Create empty dataframe to store REFERENCE aggregated results
ref_aggregate_df1 = pd.DataFrame(columns = ['TECHNOLOGY', 'FUEL', 'REGION', 2050])
# Now read in the OSeMOSYS output files so that that they're all in one data frame (ref_aggregate_df1)
if ref_file_df['File'].isna().any() == False:
for i in range(ref_file_df.shape[0]):
_df = pd.read_excel(ref_file_df.iloc[i, 0], sheet_name = ref_file_df.iloc[i, 2])
_df['Workbook'] = ref_file_df.iloc[i, 1]
_df['Sheet_energy'] = ref_file_df.iloc[i, 2]
ref_aggregate_df1 = ref_aggregate_df1.append(_df)
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['TIMESLICE'] != 'ONE']
interim_df2 = ref_aggregate_df1[ref_aggregate_df1['TIMESLICE'] == 'ONE']
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION', 'Workbook', 'Sheet_energy']).sum().reset_index()
ref_aggregate_df1 = interim_df2.append(interim_df1).reset_index(drop = True)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = ref_aggregate_df1[ref_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = ref_aggregate_df1[~ref_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
ref_aggregate_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
# Create empty dataframe to store NET ZERO aggregated results
netz_aggregate_df1 = pd.DataFrame(columns = ['TECHNOLOGY', 'FUEL', 'REGION', 2050])
# Now read in the OSeMOSYS output files so that that they're all in one data frame (netz_aggregate_df1)
if netz_file_df['File'].isna().any() == False:
for i in range(netz_file_df.shape[0]):
_df = pd.read_excel(netz_file_df.iloc[i, 0], sheet_name = netz_file_df.iloc[i, 2])
_df['Workbook'] = netz_file_df.iloc[i, 1]
_df['Sheet_energy'] = netz_file_df.iloc[i, 2]
netz_aggregate_df1 = netz_aggregate_df1.append(_df)
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['TIMESLICE'] != 'ONE']
interim_df2 = netz_aggregate_df1[netz_aggregate_df1['TIMESLICE'] == 'ONE']
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION', 'Workbook', 'Sheet_energy']).sum().reset_index()
netz_aggregate_df1 = interim_df2.append(interim_df1).reset_index(drop = True)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = netz_aggregate_df1[netz_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = netz_aggregate_df1[~netz_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
netz_aggregate_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
# Now aggregate all the results for APEC
# REFERENCE
APEC_ref = ref_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
APEC_ref['REGION'] = 'APEC'
ref_aggregate_df1 = ref_aggregate_df1.append(APEC_ref).reset_index(drop = True)
# NET ZERO
APEC_netz = netz_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
APEC_netz['REGION'] = 'APEC'
netz_aggregate_df1 = netz_aggregate_df1.append(APEC_netz).reset_index(drop = True)
# Now aggregate results for 22_SEA
# Southeast Asia: 02, 07, 10, 15, 17, 19, 21
# REFERENCE
SEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
SEA_ref['REGION'] = '22_SEA'
ref_aggregate_df1 = ref_aggregate_df1.append(SEA_ref).reset_index(drop = True)
# NET ZERO
SEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
SEA_netz['REGION'] = '22_SEA'
netz_aggregate_df1 = netz_aggregate_df1.append(SEA_netz).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# REFERENCE
NEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
NEA_ref['REGION'] = '23_NEA'
ref_aggregate_df1 = ref_aggregate_df1.append(NEA_ref).reset_index(drop = True)
# NET ZERO
NEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
NEA_netz['REGION'] = '23_NEA'
netz_aggregate_df1 = netz_aggregate_df1.append(NEA_netz).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# REFERENCE
ONEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
ONEA_ref['REGION'] = '23b_ONEA'
ref_aggregate_df1 = ref_aggregate_df1.append(ONEA_ref).reset_index(drop = True)
# NET ZERO
ONEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
ONEA_netz['REGION'] = '23b_ONEA'
netz_aggregate_df1 = netz_aggregate_df1.append(ONEA_netz).reset_index(drop = True)
# Aggregate results for 24_OAM
# OAM: 03, 04, 11, 14
# REFERENCE
OAM_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OAM_ref['REGION'] = '24_OAM'
ref_aggregate_df1 = ref_aggregate_df1.append(OAM_ref).reset_index(drop = True)
# NET ZERO
OAM_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OAM_netz['REGION'] = '24_OAM'
netz_aggregate_df1 = netz_aggregate_df1.append(OAM_netz).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# REFERENCE
OOAM_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OOAM_ref['REGION'] = '24b_OOAM'
ref_aggregate_df1 = ref_aggregate_df1.append(OOAM_ref).reset_index(drop = True)
# NET ZERO
OOAM_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OOAM_netz['REGION'] = '24b_OOAM'
netz_aggregate_df1 = netz_aggregate_df1.append(OOAM_netz).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# REFERENCE
OCE_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OCE_ref['REGION'] = '25_OCE'
ref_aggregate_df1 = ref_aggregate_df1.append(OCE_ref).reset_index(drop = True)
# NET ZERO
OCE_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OCE_netz['REGION'] = '25_OCE'
netz_aggregate_df1 = netz_aggregate_df1.append(OCE_netz).reset_index(drop = True)
# Get maximum REFERENCE year column to build data frame below
ref_year_columns = []
for item in list(ref_aggregate_df1.columns):
try:
ref_year_columns.append(int(item))
except ValueError:
pass
max_year_ref = max(ref_year_columns)
OSeMOSYS_years_ref = list(range(2017, max_year_ref + 1))
# Get maximum NET ZERO year column to build data frame below
netz_year_columns = []
for item in list(netz_aggregate_df1.columns):
try:
netz_year_columns.append(int(item))
except ValueError:
pass
max_year_netz = max(netz_year_columns)
OSeMOSYS_years_netz = list(range(2017, max_year_netz + 1))
#################################################################################################
### ADJUNCT; LAST MINUTE GRAB of LNG/PIPELINE imports and exports which are only from OSeMOSYS
# This script is a bit messy as there are two chunks that have ref_aggregate_df1
# Building the grab here as it grabs from the first ref_aggregate_df1 which is more comprehensive
# i.e. it has region aggregates such as OOAM, OCE and APEC in addition to economies
ref_lngpipe_1 = ref_aggregate_df1[ref_aggregate_df1['TECHNOLOGY'].isin(['SUP_8_1_natural_gas_import',\
'SUP_8_2_lng_import', 'SUP_8_1_natural_gas_export', 'SUP_8_2_lng_export'])].copy()\
.loc[:, ['REGION', 'TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_lngpipe_1.to_csv(path_final + '/lngpipe_reference.csv', index = False)
netz_lngpipe_1 = netz_aggregate_df1[netz_aggregate_df1['TECHNOLOGY'].isin(['SUP_8_1_natural_gas_import',\
'SUP_8_2_lng_import', 'SUP_8_1_natural_gas_export', 'SUP_8_2_lng_export'])].copy()\
.loc[:, ['REGION', 'TECHNOLOGY'] + OSeMOSYS_years_netz].reset_index(drop = True)
netz_lngpipe_1.to_csv(path_final + '/lngpipe_netzero.csv', index = False)
###################################################################################################
########################## fuel_code aggregations ##########################
# First level
coal_fuels = ['1_1_coking_coal', '1_5_lignite', '1_x_coal_thermal']
oil_fuels = ['6_1_crude_oil', '6_x_ngls']
petrol_fuels = ['7_1_motor_gasoline', '7_2_aviation_gasoline', '7_3_naphtha', '7_x_jet_fuel', '7_6_kerosene', '7_7_gas_diesel_oil',
'7_8_fuel_oil', '7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_11_ethane', '7_x_other_petroleum_products']
gas_fuels = ['8_1_natural_gas', '8_2_lng', '8_3_gas_works_gas']
biomass_fuels = ['15_1_fuelwood_and_woodwaste', '15_2_bagasse', '15_3_charcoal', '15_4_black_liquor', '15_5_other_biomass']
other_fuels = ['16_1_biogas', '16_2_industrial_waste', '16_3_municipal_solid_waste_renewable', '16_4_municipal_solid_waste_nonrenewable', '16_5_biogasoline', '16_6_biodiesel',
'16_7_bio_jet_kerosene', '16_8_other_liquid_biofuels', '16_9_other_sources', '16_x_hydrogen']
# Total
total_fuels = ['1_coal', '2_coal_products', '5_oil_shale_and_oil_sands', '6_crude_oil_and_ngl', '7_petroleum_products', '8_gas', '9_nuclear', '10_hydro', '11_geothermal',
'12_solar', '13_tide_wave_ocean', '14_wind', '15_solid_biomass', '16_others', '17_electricity', '18_heat']
# total_renewables to be completed
##############################################################################
# item_code_new aggregations
# Lowest level
industry_agg = ['14_1_iron_and_steel', '14_2_chemical_incl_petrochemical', '14_3_non_ferrous_metals', '14_4_nonmetallic_mineral_products', '14_5_transportation_equipment',
'14_6_machinery', '14_7_mining_and_quarrying', '14_8_food_beverages_and_tobacco', '14_9_pulp_paper_and_printing', '14_10_wood_and_wood_products',
'14_11_construction', '14_12_textiles_and_leather', '14_13_nonspecified_industry']
transport_agg = ['15_1_domestic_air_transport', '15_2_road', '15_3_rail', '15_4_domestic_navigation', '15_5_pipeline_transport', '15_6_nonspecified_transport']
others_agg = ['16_1_commercial_and_public_services', '16_2_residential', '16_3_agriculture', '16_4_fishing', '16_5_nonspecified_others']
# Then first level
tpes_agg = ['1_indigenous_production', '2_imports', '3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers', '6_stock_change']
tfc_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector', '17_nonenergy_use']
tfec_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector']
# For dataframe finalising
key_variables = ['economy', 'fuel_code', 'item_code_new']
#######################################################################################################################
# REFERENCE
# Now aggregate data based on the mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
ref_aggregate_df2 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in ref_aggregate_df1['REGION'].unique():
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Mapping_TFC_TPES, how = 'left', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['item_code_new', 'fuel_code']).sum().reset_index()
# Change export data to negative values
exports_bunkers = interim_df1[interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]\
.set_index(['item_code_new', 'fuel_code'])
everything_else = interim_df1[~interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]
exports_bunkers = exports_bunkers * -1
exports_bunkers = exports_bunkers.reset_index()
interim_df2 = everything_else.append(exports_bunkers)
########################### Aggregate fuel_code for new variables ###################################
# First level fuels
coal = interim_df2[interim_df2['fuel_code'].isin(coal_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '1_coal').reset_index()
oil = interim_df2[interim_df2['fuel_code'].isin(oil_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '6_crude_oil_and_ngl').reset_index()
petrol = interim_df2[interim_df2['fuel_code'].isin(petrol_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_petroleum_products').reset_index()
gas = interim_df2[interim_df2['fuel_code'].isin(gas_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '8_gas').reset_index()
biomass = interim_df2[interim_df2['fuel_code'].isin(biomass_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '15_solid_biomass').reset_index()
others = interim_df2[interim_df2['fuel_code'].isin(other_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '16_others').reset_index()
interim_df3 = interim_df2.append([coal, oil, petrol, gas, biomass, others]).reset_index(drop = True)
# And total fuels
total_f = interim_df3[interim_df3['fuel_code'].isin(total_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '19_total').reset_index()
interim_df4 = interim_df3.append(total_f).reset_index(drop = True)
################################ And now item_code_new ######################################
# Start with lowest level
industry = interim_df4[interim_df4['item_code_new'].isin(industry_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '14_industry_sector').reset_index()
transport = interim_df4[interim_df4['item_code_new'].isin(transport_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '15_transport_sector').reset_index()
bld_ag_other = interim_df4[interim_df4['item_code_new'].isin(others_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '16_other_sector').reset_index()
interim_df5 = interim_df4.append([industry, transport, bld_ag_other]).reset_index(drop = True)
# Now higher level agg
#Might need to check this depending on whether exports is negative
tpes = interim_df5[interim_df5['item_code_new'].isin(tpes_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '7_total_primary_energy_supply').reset_index()
tfc = interim_df5[interim_df5['item_code_new'].isin(tfc_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '12_total_final_consumption').reset_index()
tfec = interim_df5[interim_df5['item_code_new'].isin(tfec_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '13_total_final_energy_consumption').reset_index()
interim_df6 = interim_df5.append([tpes, tfc, tfec]).reset_index(drop = True)
# Now add in economy reference
interim_df6['economy'] = region
# Now append economy dataframe to communal data frame
ref_aggregate_df2 = ref_aggregate_df2.append(interim_df6)
# aggregate_df2 = aggregate_df2[['economy', 'fuel_code', 'item_code_new'] + OSeMOSYS_years]
if ref_aggregate_df2.empty:
ref_aggregate_df2
else:
ref_aggregate_df2 = ref_aggregate_df2.loc[:, key_variables + OSeMOSYS_years_ref]
#######################################################################################################################
# NET ZERO
# Now aggregate data based on the mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
netz_aggregate_df2 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in netz_aggregate_df1['REGION'].unique():
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Mapping_TFC_TPES, how = 'left', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['item_code_new', 'fuel_code']).sum().reset_index()
# Change export data to negative values
exports_bunkers = interim_df1[interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]\
.set_index(['item_code_new', 'fuel_code'])
everything_else = interim_df1[~interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]
exports_bunkers = exports_bunkers * -1
exports_bunkers = exports_bunkers.reset_index()
interim_df2 = everything_else.append(exports_bunkers)
########################### Aggregate fuel_code for new variables ###################################
# First level fuels
coal = interim_df2[interim_df2['fuel_code'].isin(coal_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '1_coal').reset_index()
oil = interim_df2[interim_df2['fuel_code'].isin(oil_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '6_crude_oil_and_ngl').reset_index()
petrol = interim_df2[interim_df2['fuel_code'].isin(petrol_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_petroleum_products').reset_index()
gas = interim_df2[interim_df2['fuel_code'].isin(gas_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '8_gas').reset_index()
biomass = interim_df2[interim_df2['fuel_code'].isin(biomass_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '15_solid_biomass').reset_index()
others = interim_df2[interim_df2['fuel_code'].isin(other_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '16_others').reset_index()
interim_df3 = interim_df2.append([coal, oil, petrol, gas, biomass, others]).reset_index(drop = True)
# And total fuels
total_f = interim_df3[interim_df3['fuel_code'].isin(total_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '19_total').reset_index()
interim_df4 = interim_df3.append(total_f).reset_index(drop = True)
################################ And now item_code_new ######################################
# Start with lowest level
industry = interim_df4[interim_df4['item_code_new'].isin(industry_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '14_industry_sector').reset_index()
transport = interim_df4[interim_df4['item_code_new'].isin(transport_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '15_transport_sector').reset_index()
bld_ag_other = interim_df4[interim_df4['item_code_new'].isin(others_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '16_other_sector').reset_index()
interim_df5 = interim_df4.append([industry, transport, bld_ag_other]).reset_index(drop = True)
# Now higher level agg
#Might need to check this depending on whether exports is negative
tpes = interim_df5[interim_df5['item_code_new'].isin(tpes_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '7_total_primary_energy_supply').reset_index()
tfc = interim_df5[interim_df5['item_code_new'].isin(tfc_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '12_total_final_consumption').reset_index()
tfec = interim_df5[interim_df5['item_code_new'].isin(tfec_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '13_total_final_energy_consumption').reset_index()
interim_df6 = interim_df5.append([tpes, tfc, tfec]).reset_index(drop = True)
# Now add in economy reference
interim_df6['economy'] = region
# Now append economy dataframe to communal data frame
netz_aggregate_df2 = netz_aggregate_df2.append(interim_df6)
# aggregate_df2 = aggregate_df2[['economy', 'fuel_code', 'item_code_new'] + OSeMOSYS_years]
if netz_aggregate_df2.empty == True:
netz_aggregate_df2
else:
netz_aggregate_df2 = netz_aggregate_df2.loc[:, key_variables + OSeMOSYS_years_netz]
# Now load the EGEDA_years data frame
EGEDA_years = pd.read_csv('./data/1_EGEDA/EGEDA_2018_years.csv')
# REFERENCE
if ref_aggregate_df2.empty == True:
ref_aggregate_df2_tojoin = ref_aggregate_df2.copy()
else:
ref_aggregate_df2_tojoin = ref_aggregate_df2.copy().loc[:, key_variables + OSeMOSYS_years_ref]
# NET ZERO
if netz_aggregate_df2.empty == True:
netz_aggregate_df2_tojoin = netz_aggregate_df2.copy()
else:
netz_aggregate_df2_tojoin = netz_aggregate_df2.copy().loc[:, key_variables + OSeMOSYS_years_netz]
# Join EGEDA historical to OSeMOSYS results (line below removes 2017 and 2018 from historical)
# REFERENCE
if ref_aggregate_df2_tojoin.empty == True:
Joined_ref_df = EGEDA_years.copy().reindex(columns = EGEDA_years.columns.tolist() + list(range(2019, 2051)))
else:
Joined_ref_df = EGEDA_years.copy().iloc[:, :-2].merge(ref_aggregate_df2_tojoin, on = ['economy', 'fuel_code', 'item_code_new'], how = 'left')
Joined_ref_df.to_csv(path_final + '/OSeMOSYS_to_EGEDA_2018_reference.csv', index = False)
# NET ZERO
if netz_aggregate_df2_tojoin.empty == True:
Joined_netz_df = EGEDA_years.copy().reindex(columns = EGEDA_years.columns.tolist() + list(range(2019, 2051)))
else:
Joined_netz_df = EGEDA_years.copy().iloc[:, :-2].merge(netz_aggregate_df2_tojoin, on = ['economy', 'fuel_code', 'item_code_new'], how = 'left')
Joined_netz_df.to_csv(path_final + '/OSeMOSYS_to_EGEDA_2018_netzero.csv', index = False)
###############################################################################################################################
# Moving beyond TFC and TPES and Transformation
# Determine list of files to read based on the workbooks identified in the mapping file
# REFERENCE
ref_file_trans = pd.DataFrame()
for i in range(len(Unique_trans['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in reference_filenames if Unique_trans['Workbook'].unique()[i] in entry],
'Workbook': Unique_trans['Workbook'].unique()[i]})
ref_file_trans = ref_file_trans.append(_file)
ref_file_trans = ref_file_trans.merge(Unique_trans, how = 'outer', on = 'Workbook')
# NET ZERO
netz_file_trans = pd.DataFrame()
for i in range(len(Unique_trans['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in netzero_filenames if Unique_trans['Workbook'].unique()[i] in entry],
'Workbook': Unique_trans['Workbook'].unique()[i]})
netz_file_trans = netz_file_trans.append(_file)
netz_file_trans = netz_file_trans.merge(Unique_trans, how = 'outer', on = 'Workbook')
# Create empty dataframe to store aggregated results
# REFERENCE
ref_aggtrans_df1 = pd.DataFrame()
# Now read in the OSeMOSYS output files so that that they're all in one data frame (aggregate_df1)
for i in range(ref_file_trans.shape[0]):
_df = pd.read_excel(ref_file_trans.iloc[i, 0], sheet_name = ref_file_trans.iloc[i, 2])
_df['Workbook'] = ref_file_trans.iloc[i, 1]
_df['Sheet_energy'] = ref_file_trans.iloc[i, 2]
ref_aggtrans_df1 = ref_aggtrans_df1.append(_df)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = ref_aggtrans_df1[ref_aggtrans_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = ref_aggtrans_df1[~ref_aggtrans_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
ref_aggtrans_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
ref_osemo_only_1 = ref_aggtrans_df1[ref_aggtrans_df1['Sheet_energy'] == 'UseByTechnology'].copy()\
.groupby(['TECHNOLOGY', 'FUEL', 'REGION']).sum().reset_index()
ref_aggtrans_df1 = ref_aggtrans_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION']).sum().reset_index()
# NET ZERO
netz_aggtrans_df1 = pd.DataFrame()
# Now read in the OSeMOSYS output files so that that they're all in one data frame (aggregate_df1)
for i in range(netz_file_trans.shape[0]):
_df = pd.read_excel(netz_file_trans.iloc[i, 0], sheet_name = netz_file_trans.iloc[i, 2])
_df['Workbook'] = netz_file_trans.iloc[i, 1]
_df['Sheet_energy'] = netz_file_trans.iloc[i, 2]
netz_aggtrans_df1 = netz_aggtrans_df1.append(_df)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = netz_aggtrans_df1[netz_aggtrans_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = netz_aggtrans_df1[~netz_aggtrans_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
netz_aggtrans_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
netz_osemo_only_1 = netz_aggtrans_df1[netz_aggtrans_df1['Sheet_energy'] == 'UseByTechnology'].copy()\
.groupby(['TECHNOLOGY', 'FUEL', 'REGION']).sum().reset_index()
netz_aggtrans_df1 = netz_aggtrans_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION']).sum().reset_index()
##################################################################
# Now aggregate all the results for APEC
# # REFERENCE
# APEC_ref = ref_aggtrans_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# APEC_ref['REGION'] = 'APEC'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(APEC_ref).reset_index(drop = True)
# # NET ZERO
# APEC_netz = netz_aggtrans_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# APEC_netz['REGION'] = 'APEC'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(APEC_netz).reset_index(drop = True)
# # Now aggregate results for 22_SEA
# # Southeast Asia: 02, 07, 10, 15, 17, 19, 21
# # REFERENCE
# SEA_ref = ref_aggtrans_df1[ref_aggtrans_df1['REGION']\
# .isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# SEA_ref['REGION'] = '22_SEA'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(SEA_ref).reset_index(drop = True)
# # NET ZERO
# SEA_netz = netz_aggtrans_df1[netz_aggtrans_df1['REGION']\
# .isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# SEA_netz['REGION'] = '22_SEA'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(SEA_netz).reset_index(drop = True)
# # Aggregate results for 23_NEA
# # Northeast Asia: 06, 08, 09, 18
# # REFERENCE
# NEA_ref = ref_aggtrans_df1[ref_aggtrans_df1['REGION']\
# .isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# NEA_ref['REGION'] = '23_NEA'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(NEA_ref).reset_index(drop = True)
# # NET ZERO
# NEA_netz = netz_aggtrans_df1[netz_aggtrans_df1['REGION']\
# .isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# NEA_netz['REGION'] = '23_NEA'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(NEA_netz).reset_index(drop = True)
# # Aggregate results for 23b_ONEA
# # ONEA: 06, 09, 18
# # REFERENCE
# ONEA_ref = ref_aggtrans_df1[ref_aggtrans_df1['REGION']\
# .isin(['06_HKC', '09_ROK', '18_CT'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# ONEA_ref['REGION'] = '23b_ONEA'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(ONEA_ref).reset_index(drop = True)
# # NET ZERO
# ONEA_netz = netz_aggtrans_df1[netz_aggtrans_df1['REGION']\
# .isin(['06_HKC', '09_ROK', '18_CT'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# ONEA_netz['REGION'] = '23b_ONEA'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(ONEA_netz).reset_index(drop = True)
# # Aggregate results for 24_OAM
# # OAM: 03, 04, 11, 14
# # REFERENCE
# OAM_ref = ref_aggtrans_df1[ref_aggtrans_df1['REGION']\
# .isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# OAM_ref['REGION'] = '24_OAM'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(OAM_ref).reset_index(drop = True)
# # NET ZERO
# OAM_netz = netz_aggtrans_df1[netz_aggtrans_df1['REGION']\
# .isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# OAM_netz['REGION'] = '24_OAM'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(OAM_netz).reset_index(drop = True)
# # Aggregate results for 24b_OOAM
# # OOAM: 04, 11, 14
# # REFERENCE
# OOAM_ref = ref_aggtrans_df1[ref_aggtrans_df1['REGION']\
# .isin(['04_CHL', '11_MEX', '14_PE'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# OOAM_ref['REGION'] = '24b_OOAM'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(OOAM_ref).reset_index(drop = True)
# # NET ZERO
# OOAM_netz = netz_aggtrans_df1[netz_aggtrans_df1['REGION']\
# .isin(['04_CHL', '11_MEX', '14_PE'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# OOAM_netz['REGION'] = '24b_OOAM'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(OOAM_netz).reset_index(drop = True)
# # Aggregate results for 25_OCE
# # Oceania: 01, 12, 13
# # REFERENCE
# OCE_ref = ref_aggtrans_df1[ref_aggtrans_df1['REGION']\
# .isin(['01_AUS', '12_NZ', '13_PNG'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# OCE_ref['REGION'] = '25_OCE'
# ref_aggtrans_df1 = ref_aggtrans_df1.append(OCE_ref).reset_index(drop = True)
# # NET ZERO
# OCE_netz = netz_aggtrans_df1[netz_aggtrans_df1['REGION']\
# .isin(['01_AUS', '12_NZ', '13_PNG'])]\
# .groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
# OCE_netz['REGION'] = '25_OCE'
# netz_aggtrans_df1 = netz_aggtrans_df1.append(OCE_netz).reset_index(drop = True)
# Get maximum year column to build data frame below
# REFERENCE
ref_year_columns = []
for item in list(ref_aggtrans_df1.columns):
try:
ref_year_columns.append(int(item))
except ValueError:
pass
max_year_ref = min(2050, max(ref_year_columns))
OSeMOSYS_years_ref = list(range(2017, max_year_ref + 1))
# NET ZERO
netz_year_columns = []
for item in list(netz_aggtrans_df1.columns):
try:
netz_year_columns.append(int(item))
except ValueError:
pass
max_year_netz = min(2050, max(netz_year_columns))
OSeMOSYS_years_netz = list(range(2017, max_year_netz + 1))
############################################################################
# Read in capacity data
# REFERENCE
ref_capacity_df1 = pd.DataFrame()
# Populate the above blank dataframe with capacity data from the results workbook
for i in range(len(reference_filenames)):
_df = pd.read_excel(reference_filenames[i], sheet_name = 'TotalCapacityAnnual')
ref_capacity_df1 = ref_capacity_df1.append(_df)
# Now just extract the power capacity
ref_pow_capacity_df1 = ref_capacity_df1[ref_capacity_df1['TECHNOLOGY'].str.startswith('POW')].reset_index(drop = True)
# REFERENCE
APEC_ref_cap = ref_pow_capacity_df1.groupby(['TECHNOLOGY']).sum().reset_index()
APEC_ref_cap['REGION'] = 'APEC'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(APEC_ref_cap).reset_index(drop = True)
# SEA
SEA_ref = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
SEA_ref['REGION'] = '22_SEA'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(SEA_ref).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# NEA
NEA_ref = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
NEA_ref['REGION'] = '23_NEA'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(NEA_ref).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# ONEA
ONEA_ref = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
ONEA_ref['REGION'] = '23b_ONEA'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(ONEA_ref).reset_index(drop = True)
# OAM
OAM_ref = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
OAM_ref['REGION'] = '24_OAM'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(OAM_ref).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# OOAM
OOAM_ref = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
OOAM_ref['REGION'] = '24b_OOAM'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(OOAM_ref).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# OCE
OCE_ref = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
OCE_ref['REGION'] = '25_OCE'
ref_pow_capacity_df1 = ref_pow_capacity_df1.append(OCE_ref).reset_index(drop = True)
# NET ZERO
netz_capacity_df1 = pd.DataFrame()
# Populate the above blank dataframe with capacity data from the results workbook
for i in range(len(netzero_filenames)):
_df = pd.read_excel(netzero_filenames[i], sheet_name = 'TotalCapacityAnnual')
netz_capacity_df1 = netz_capacity_df1.append(_df)
# Now just extract the power capacity
netz_pow_capacity_df1 = netz_capacity_df1[netz_capacity_df1['TECHNOLOGY'].str.startswith('POW')].reset_index(drop = True)
# REFERENCE
APEC_netz_cap = netz_pow_capacity_df1.groupby(['TECHNOLOGY']).sum().reset_index()
APEC_netz_cap['REGION'] = 'APEC'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(APEC_netz_cap).reset_index(drop = True)
# SEA
SEA_ref = netz_pow_capacity_df1[netz_pow_capacity_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
SEA_ref['REGION'] = '22_SEA'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(SEA_ref).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# NEA
NEA_ref = netz_pow_capacity_df1[netz_pow_capacity_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
NEA_ref['REGION'] = '23_NEA'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(NEA_ref).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# ONEA
ONEA_ref = netz_pow_capacity_df1[netz_pow_capacity_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
ONEA_ref['REGION'] = '23b_ONEA'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(ONEA_ref).reset_index(drop = True)
# OAM
OAM_ref = netz_pow_capacity_df1[netz_pow_capacity_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
OAM_ref['REGION'] = '24_OAM'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(OAM_ref).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# OOAM
OOAM_ref = netz_pow_capacity_df1[netz_pow_capacity_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
OOAM_ref['REGION'] = '24b_OOAM'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(OOAM_ref).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# OCE
OCE_ref = netz_pow_capacity_df1[netz_pow_capacity_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY']).sum().reset_index()
OCE_ref['REGION'] = '25_OCE'
netz_pow_capacity_df1 = netz_pow_capacity_df1.append(OCE_ref).reset_index(drop = True)
#################################################################################################
# Now create the dataframes to save and use in the later bossanova script
################################ POWER SECTOR ###############################
# Aggregate data based on the Map_power mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
# REFERENCE
ref_power_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in ref_aggtrans_df1['REGION'].unique():
interim_df1 = ref_aggtrans_df1[ref_aggtrans_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_power, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
ref_power_df1 = ref_power_df1.append(interim_df1)
ref_power_df1 = ref_power_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_ref]
# REFERENCE
APEC_ref = ref_power_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
APEC_ref['economy'] = 'APEC'
ref_power_df1 = ref_power_df1.append(APEC_ref).reset_index(drop = True)
# SEA
SEA_ref = ref_power_df1[ref_power_df1['economy']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
SEA_ref['economy'] = '22_SEA'
ref_power_df1 = ref_power_df1.append(SEA_ref).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# NEA
NEA_ref = ref_power_df1[ref_power_df1['economy']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
NEA_ref['economy'] = '23_NEA'
ref_power_df1 = ref_power_df1.append(NEA_ref).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# ONEA
ONEA_ref = ref_power_df1[ref_power_df1['economy']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
ONEA_ref['economy'] = '23b_ONEA'
ref_power_df1 = ref_power_df1.append(ONEA_ref).reset_index(drop = True)
# OAM
OAM_ref = ref_power_df1[ref_power_df1['economy']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OAM_ref['economy'] = '24_OAM'
ref_power_df1 = ref_power_df1.append(OAM_ref).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# OOAM
OOAM_ref = ref_power_df1[ref_power_df1['economy']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OOAM_ref['economy'] = '24b_OOAM'
ref_power_df1 = ref_power_df1.append(OOAM_ref).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# OCE
OCE_ref = ref_power_df1[ref_power_df1['economy']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OCE_ref['economy'] = '25_OCE'
ref_power_df1 = ref_power_df1.append(OCE_ref).reset_index(drop = True)
# NET ZERO
netz_power_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in netz_aggtrans_df1['REGION'].unique():
interim_df1 = netz_aggtrans_df1[netz_aggtrans_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_power, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
netz_power_df1 = netz_power_df1.append(interim_df1)
netz_power_df1 = netz_power_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_netz]
# NET ZERO
APEC_netz = netz_power_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
APEC_netz['economy'] = 'APEC'
netz_power_df1 = netz_power_df1.append(APEC_netz).reset_index(drop = True)
# SEA
SEA_netz = netz_power_df1[netz_power_df1['economy']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
SEA_netz['economy'] = '22_SEA'
netz_power_df1 = netz_power_df1.append(SEA_netz).reset_index(drop = True)
# NEA
NEA_netz = netz_power_df1[netz_power_df1['economy']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
NEA_netz['economy'] = '23_NEA'
netz_power_df1 = netz_power_df1.append(NEA_netz).reset_index(drop = True)
# ONEA
ONEA_netz = netz_power_df1[netz_power_df1['economy']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
ONEA_netz['economy'] = '23b_ONEA'
netz_power_df1 = netz_power_df1.append(ONEA_netz).reset_index(drop = True)
# OAM
OAM_netz = netz_power_df1[netz_power_df1['economy']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OAM_netz['economy'] = '24_OAM'
netz_power_df1 = netz_power_df1.append(OAM_netz).reset_index(drop = True)
# OOAM
OOAM_netz = netz_power_df1[netz_power_df1['economy']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OOAM_netz['economy'] = '24b_OOAM'
netz_power_df1 = netz_power_df1.append(OOAM_netz).reset_index(drop = True)
# OCE
OCE_netz = netz_power_df1[netz_power_df1['economy']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OCE_netz['economy'] = '25_OCE'
netz_power_df1 = netz_power_df1.append(OCE_netz).reset_index(drop = True)
################################ REFINERY, OWN USE and SUPPLY TRANSFORMATION SECTOR ###############################
# Aggregate data based on REGION, TECHNOLOGY and FUEL
# First create empty dataframe
# REFERENCE
ref_refownsup_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in ref_aggtrans_df1['REGION'].unique():
interim_df1 = ref_aggtrans_df1[ref_aggtrans_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_refownsup, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
ref_refownsup_df1 = ref_refownsup_df1.append(interim_df1)
ref_refownsup_df1 = ref_refownsup_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_ref]
# REFERENCE
APEC_ref = ref_refownsup_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
APEC_ref['economy'] = 'APEC'
ref_refownsup_df1 = ref_refownsup_df1.append(APEC_ref).reset_index(drop = True)
# SEA
SEA_ref = ref_refownsup_df1[ref_refownsup_df1['economy']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
SEA_ref['economy'] = '22_SEA'
ref_refownsup_df1 = ref_refownsup_df1.append(SEA_ref).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# NEA
NEA_ref = ref_refownsup_df1[ref_refownsup_df1['economy']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
NEA_ref['economy'] = '23_NEA'
ref_refownsup_df1 = ref_refownsup_df1.append(NEA_ref).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# ONEA
ONEA_ref = ref_refownsup_df1[ref_refownsup_df1['economy']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
ONEA_ref['economy'] = '23b_ONEA'
ref_refownsup_df1 = ref_refownsup_df1.append(ONEA_ref).reset_index(drop = True)
# OAM
OAM_ref = ref_refownsup_df1[ref_refownsup_df1['economy']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OAM_ref['economy'] = '24_OAM'
ref_refownsup_df1 = ref_refownsup_df1.append(OAM_ref).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# OOAM
OOAM_ref = ref_refownsup_df1[ref_refownsup_df1['economy']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OOAM_ref['economy'] = '24b_OOAM'
ref_refownsup_df1 = ref_refownsup_df1.append(OOAM_ref).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# OCE
OCE_ref = ref_refownsup_df1[ref_refownsup_df1['economy']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OCE_ref['economy'] = '25_OCE'
ref_refownsup_df1 = ref_refownsup_df1.append(OCE_ref).reset_index(drop = True)
# NET-ZERO
netz_refownsup_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in netz_aggtrans_df1['REGION'].unique():
interim_df1 = netz_aggtrans_df1[netz_aggtrans_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_refownsup, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
netz_refownsup_df1 = netz_refownsup_df1.append(interim_df1)
netz_refownsup_df1 = netz_refownsup_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_netz]
# NET ZERO
APEC_netz = netz_refownsup_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
APEC_netz['economy'] = 'APEC'
netz_refownsup_df1 = netz_refownsup_df1.append(APEC_netz).reset_index(drop = True)
# SEA
SEA_netz = netz_refownsup_df1[netz_refownsup_df1['economy']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
SEA_netz['economy'] = '22_SEA'
netz_refownsup_df1 = netz_refownsup_df1.append(SEA_netz).reset_index(drop = True)
# NEA
NEA_netz = netz_refownsup_df1[netz_refownsup_df1['economy']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
NEA_netz['economy'] = '23_NEA'
netz_refownsup_df1 = netz_refownsup_df1.append(NEA_netz).reset_index(drop = True)
# ONEA
ONEA_netz = netz_refownsup_df1[netz_refownsup_df1['economy']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
ONEA_netz['economy'] = '23b_ONEA'
netz_refownsup_df1 = netz_refownsup_df1.append(ONEA_netz).reset_index(drop = True)
# OAM
OAM_netz = netz_refownsup_df1[netz_refownsup_df1['economy']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OAM_netz['economy'] = '24_OAM'
netz_refownsup_df1 = netz_refownsup_df1.append(OAM_netz).reset_index(drop = True)
# OOAM
OOAM_netz = netz_refownsup_df1[netz_refownsup_df1['economy']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OOAM_netz['economy'] = '24b_OOAM'
netz_refownsup_df1 = netz_refownsup_df1.append(OOAM_netz).reset_index(drop = True)
# OCE
OCE_netz = netz_refownsup_df1[netz_refownsup_df1['economy']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
OCE_netz['economy'] = '25_OCE'
netz_refownsup_df1 = netz_refownsup_df1.append(OCE_netz).reset_index(drop = True)
# Refinery, own-use, supply and power
ref_trans_df1 = ref_power_df1.append(ref_refownsup_df1)
netz_trans_df1 = netz_power_df1.append(netz_refownsup_df1)
#################################################################
# Hydrogen sector
# Save the required dataframes for transformation charts in bossanova script
# Reference
ref_power_df1.to_csv(path_final + '/OSeMOSYS_power_reference.csv', index = False)
ref_refownsup_df1.to_csv(path_final + '/OSeMOSYS_refownsup_reference.csv', index = False)
ref_pow_capacity_df1.to_csv(path_final + '/OSeMOSYS_powcapacity_reference.csv', index = False)
ref_trans_df1.to_csv(path_final + '/OSeMOSYS_transformation_reference.csv', index = False)
# Net-zero
netz_power_df1.to_csv(path_final + '/OSeMOSYS_power_netzero.csv', index = False)
netz_refownsup_df1.to_csv(path_final + '/OSeMOSYS_refownsup_netzero.csv', index = False)
netz_pow_capacity_df1.to_csv(path_final + '/OSeMOSYS_powcapacity_netzero.csv', index = False)
netz_trans_df1.to_csv(path_final + '/OSeMOSYS_transformation_netzero.csv', index = False)
# Dataframes for demand sectors
# Save OSeMOSYS results dataframes
ref_osemo_only_1.to_csv(path_final + '/OSeMOSYS_only_reference.csv', index = False)
netz_osemo_only_1.to_csv(path_final + '/OSeMOSYS_only_netzero.csv', index = False)
# # Macro dataframes (opens in Bossanova)
# macro_GDP = pd.read_excel(path_mapping + '/Key Inputs.xlsx', sheet_name = 'GDP')
# macro_GDP.columns = macro_GDP.columns.astype(str)
# macro_GDP['Series'] = 'GDP 2018 USD PPP'
# macro_GDP = macro_GDP[['Economy', 'Series'] + list(macro_GDP.loc[:, '2000':'2050'])]
# macro_GDP = macro_GDP[macro_GDP['Economy'].isin(list(macro_GDP['Economy'].unique()))]
# macro_GDP.to_csv(path_final + '/macro_GDP.csv', index = False)
# macro_GDP_growth = pd.read_excel('./data/2_Mapping_and_other/Key Inputs.xlsx', sheet_name = 'GDP_growth')
# macro_GDP_growth.columns = macro_GDP_growth.columns.astype(str)
# macro_GDP_growth['Series'] = 'GDP growth'
# macro_GDP_growth = macro_GDP_growth[['Economy', 'Series'] + list(macro_GDP_growth.loc[:, '2000':'2050'])]
# macro_pop = pd.read_excel('./data/2_Mapping_and_other/Key Inputs.xlsx', sheet_name = 'Population')
# macro_pop.columns = macro_pop.columns.astype(str)
# macro_pop['Series'] = 'Population'
# macro_pop = macro_pop[['Economy', 'Series'] + list(macro_pop.loc[:, '2000':'2050'])]
# macro_GDPpc = pd.read_excel('./data/2_Mapping_and_other/Key Inputs.xlsx', sheet_name = 'GDP per capita')
# macro_GDPpc.columns = macro_GDPpc.columns.astype(str)
# macro_GDPpc['Series'] = 'GDP per capita'
# macro_GDPpc = macro_GDPpc[['Economy', 'Series'] + list(macro_GDPpc.loc[:, '2000':'2050'])]
################################################################################################
# EMISSIONS
EGEDA_emissions = pd.read_csv('./data/1_EGEDA/EGEDA_FC_CO2_Emissions_years_2018.csv')
agg_fuel = ['1_coal', '1_x_coal_thermal', '2_coal_products', '6_crude_oil_and_ngl', '6_x_ngls',
'7_petroleum_products', '7_x_jet_fuel', '7_x_other_petroleum_products', '8_gas', '16_others', '19_total']
EGEDA_emissions = EGEDA_emissions[~EGEDA_emissions['fuel_code'].isin(agg_fuel)].reset_index(drop = True)
########################## fuel_code aggregations ##########################
# lowest level
thermal_coal = ['1_2_other_bituminous_coal', '1_3_subbituminous_coal', '1_4_anthracite', '3_peat', '4_peat_products']
ngl = ['6_2_natural_gas_liquids', '6_3_refinery_feedstocks', '6_4_additives_oxygenates', '6_5_other_hydrocarbons']
other_petrol = ['7_12_white_spirit_sbp', '7_13_lubricants', '7_14_bitumen', '7_15_paraffin_waxes', '7_16_petroleum_coke', '7_17_other_products']
jetfuel = ['7_4_gasoline_type_jet_fuel', '7_5_kerosene_type_jet_fuel']
# First level and Total vetor(s) defined at beginning of script
coal_prod_fuels = ['2_1_coke_oven_coke', '2_2_coke_oven_gas', '2_3_blast_furnace_gas', '2_4_other_recovered_gases', '2_5_patent_fuel', '2_6_coal_tar', '2_7_bkb_pb']
power_agg = ['9_1_main_activity_producer', '9_2_autoproducers']
# Change from negative to positive
neg_to_pos = ['9_x_power',
'9_1_main_activity_producer', '9_1_1_electricity_plants', '9_1_2_chp_plants', '9_1_3_heat_plants', '9_2_autoproducers',
'9_2_1_electricity_plants', '9_2_2_chp_plants', '9_2_3_heat_plants', '9_3_gas_processing_plants', '9_3_1_gas_works_plants',
'9_3_2_liquefaction_plants', '9_3_3_regasification_plants', '9_3_4_natural_gas_blending_plants', '9_3_5_gastoliquids_plants',
'9_4_oil_refineries', '9_5_coal_transformation', '9_5_1_coke_ovens', '9_5_2_blast_furnaces', '9_5_3_patent_fuel_plants',
'9_5_4_bkb_pb_plants', '9_5_5_liquefaction_coal_to_oil', '9_6_petrochemical_industry', '9_7_biofuels_processing',
'9_8_charcoal_processing', '9_9_nonspecified_transformation', '10_losses_and_own_use']
# Aggregations for Emissions dataframe
EGEDA_aggregate = | pd.DataFrame() | pandas.DataFrame |
import os
from tkinter import *
import pandas as pd
# UI set up
root = Tk()
root.title("Zoom Automator")
scroll = Scrollbar(root)
canvas = Canvas(root, width = 350, height = 350)
canvas.grid(columnspan = 7, rowspan = 7)
left_margin = Label(root, text = " ", padx = 7)
left_margin.grid(row = 0, column = 0)
bottom_margin = Label(root, text = " ", pady = 7)
bottom_margin.grid(row = 7, column = 0)
right_margin = Label(root, text = " ", padx = 7)
right_margin.grid(row = 0, column = 7)
top_margin = Label(root, text = " ", padx = 7)
top_margin.grid(row = 0, column = 3)
r = IntVar()
link_id_label = Label(root, text = "Link or ID", font = ("TkDefaultFont", 10))
passw_label = Label(root, text = "Password", font = ("TkDefaultFont", 10))
join_time = Label(root, text = "Start Time", font = ("TkDefaultFont", 10))
leave_time = Label(root, text = "Leave Time", font = ("TkDefaultFont", 10))
nick_name = Label(root, text = "Nickname", font = ("TkDefaultFont", 10))
check_btn = Checkbutton(root, variable=r, onvalue = 1, offvalue = 0)
link_id_label.grid(row = 1, column = 1)
passw_label.grid(row = 2, column = 1)
join_time.grid(row = 3, column = 1)
leave_time.grid(row = 4, column = 1)
nick_name.grid(row = 5, column = 1)
check_btn.grid(row = 6, column = 1)
entry1 = Entry(root, width = 20)
entry1.grid(row = 1, column = 2)
entry2 = Entry(root, width = 20)
entry2.grid(row = 2, column = 2)
entry3 = Entry(root, width = 20)
entry3.grid(row = 3, column = 2)
entry4 = Entry(root, width = 20)
entry4.grid(row = 4, column = 2)
entry5 = Entry(root, width = 20)
entry5.grid(row = 5, column = 2)
check_text = Label(root, text = "Leave zoom when 80%\nof participants have left")
check_text.grid(row = 6, column = 2)
divider = Label(root, text = " ", padx = 20)
divider.grid(row = 1, column = 3)
text_box = Text(root, height = 15, width = 30, state = DISABLED, pady = 0)
text_box.grid(row = 2, column = 4, rowspan = 4, columnspan = 2, sticky = N+S+E+W, pady = 20)
text_box.configure(yscrollcommand=scroll.set)
# Interaction setup for Add and Clear button
listZoom = []
def clickClear():
# Clears textbox and exits
listZoom.clear()
text_box.config(state = NORMAL)
text_box.delete("1.0", "end")
text_box.config(state = DISABLED)
# Display tutorial
#... to be implemented
# Clears CSV
df = pd.read_csv("C:\\Users\\giang\\zoom_auto2\\references\\record.csv")
colNames = df.columns
df_new = | pd.DataFrame(data=[], columns=colNames) | pandas.DataFrame |
#!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
@File : Stress_detection_script.py
@Time : 2022/03/17 09:45:59
@Author : <NAME>
@Contact : <EMAIL>
'''
import os
import logging
import plotly.express as px
import numpy as np
import pandas as pd
import zipfile
import fnmatch
import flirt.reader.empatica
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime, timedelta
import cvxopt as cv
from neurokit2 import eda_phasic
from matplotlib.font_manager import FontProperties
import matplotlib.dates as mdates
# rootPath = r"./"
# pattern = '*.zip'
rootPath = input("Enter Folder Path : ")
pattern = input("Enter File Name : ")
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
print(os.path.join(root, filename))
zipfile.ZipFile(os.path.join(root, filename)).extractall(
os.path.join(root, os.path.splitext(filename)[0]))
dir = os.path.splitext(pattern)[0]
# os.listdir(dir)
class process:
def moving_avarage_smoothing(X, k, description_str):
S = np.zeros(X.shape[0])
for t in tqdm(range(X.shape[0]), desc=description_str):
if t < k:
S[t] = np.mean(X[:t+1])
else:
S[t] = np.sum(X[t-k:t])/k
return S
def deviation_above_mean(unit, mean_unit, std_unit):
'''
Function takes 3 arguments
unit : number of Standard deviations above the mean
mean_unit : mean value of each signal
std_unit : standard deviation of each signal
'''
if unit == 0:
return (mean_unit)
else:
return (mean_unit + (unit*std_unit))
def Starting_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
starting_time_index = []
for i in range(len(column)-1): #iterating till the end of the array
if column[i] < deviation_metric and column[i+1] > deviation_metric: # checking if the n+1 element is greater than nth element to conclude if the signal is increasing
starting_time_index.append(time_frames[i]) #appending the timestamp's index to the declared empty array
return starting_time_index
def Ending_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
time_index = []
for i in range(len(column)-1):
if column[i] > deviation_metric and column[i+1] < deviation_metric: # checking if the n+1 element is lesser than nth element to conclude if the signal is decreasing
time_index.append(time_frames[i])
if column[len(column) - 1] > deviation_metric: # checking for hanging ends, where the signal stops abruptly
time_index.insert(
len(time_index), time_frames[len(time_frames) - 1]) # inserting the timestamp's index to the last index of the array
else:
pass
return time_index
def Extract_HRV_Information():
global hrv_features # declaring global to get access them for combined plot function
global hrv_events_df # declaring global to get access them for combined plot function
ibi = pd.read_csv(rootPath+'/'+dir+'\IBI.csv')
mean_ibi = ibi[' IBI'].mean()
average_heart_rate = 60/mean_ibi
print('mean ibi is :', mean_ibi)
print('mean heart rate :', average_heart_rate.round())
ibis = flirt.reader.empatica.read_ibi_file_into_df(
rootPath+'/'+dir + '\IBI.csv')
hrv_features = flirt.get_hrv_features(
ibis['ibi'], 128, 1, ["td", "fd"], 0.2)
hrv_features = hrv_features.dropna(how='any', axis=0)
hrv_features.reset_index(inplace=True)
hrv_features['datetime'] = hrv_features['datetime'].dt.tz_convert('US/Eastern')
hrv_features['datetime'] = pd.to_datetime(hrv_features['datetime'])
hrv_features['datetime'] = hrv_features['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
# smoothing the curve
print('\n', '******************** Smoothing The Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
hrv_features['hrv_rmssd'], 500, "Processing HRV Data")
hrv_features['MAG_K500'] = MAG_K500
# hrv_features.to_csv("./Metadata/"+ dir+"_HRV.csv")
# hrv_features.to_csv(os.path.join('./Metadata'+dir+'_HRV.csv'))
mean_rmssd = hrv_features['hrv_rmssd'].mean()
std_rmssd = hrv_features['hrv_rmssd'].std()
# getting the starting and ending time of of the signal
starting_timestamp = process.Starting_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
ending_timestamp = process.Ending_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
# in the below if case i am assuming that there was no events that crossed the threshold
if len(starting_timestamp) < 1:
fig, ax1 = plt.subplots(figsize=(30, 10))
ax1.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
# fig.savefig('./Plots/HRV_figure.png')
else:
#check if the len of starting timestamps and ending timestamps are equal if not popping the last element of the ending timestamp
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
else:
pass
difference = [] # empty array to see how long the event lasts in seconds
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i) #subtracting ending timestamp - starting timestamp to get difference in seconds
for i in difference:
time_delta_minutes.append(i.total_seconds()/60) # converting the second's difference to minuted
time_delta_minutes
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 5.00: #checking if the each episode is more then 5 minutes
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = pd.DataFrame(ending_timestamp)
frames = (starting_timestamp_df, ending_timestamp_df)
hrv_events_df = pd.concat(frames, axis=1)
hrv_events_df.columns = ['Starting Timestamp', 'Ending Timestamp']
hrv_events_df['Starting Timestamp'] = hrv_events_df['Starting Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S") #converting it to Y:M:D H:M:S to ignore nanoseconds in timestamp dataframe
hrv_events_df['Ending Timestamp'] = hrv_events_df['Ending Timestamp'].dt.strftime("%Y-%m-%d %H:%M:%S")
hrv_events_df = hrv_events_df.loc[desired_time_index, :] # selecting only the timestamps which crosses the time threshold limit
fig, ax = plt.subplots(figsize=(20, 6))
ax.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
for d in hrv_events_df.index:
ax.axvspan(hrv_events_df['Starting Timestamp'][d], hrv_events_df['Ending Timestamp']
[d], facecolor="g", edgecolor="none", alpha=0.5)
ax.relim()
ax.autoscale_view()
# fig.savefig('./Plots/HRV_figure.png')
return hrv_features, hrv_events_df
def Extract_ACC_Infromation():
global acc_df
global acc_events_df
acc_df = pd.read_csv(rootPath+'/'+dir + '/ACC.csv')
acc_df = flirt.reader.empatica.read_acc_file_into_df(
rootPath+'/'+dir + '/ACC.csv')
acc_df['Magnitude'] = np.sqrt(
acc_df['acc_x']**2 + acc_df['acc_y']**2 + acc_df['acc_z']**2)
print("Magnitude Mean : ", acc_df['Magnitude'].mean())
acc_df.reset_index(inplace=True)
acc_df['datetime'] = acc_df['datetime'].dt.tz_convert('US/Eastern')
acc_df['datetime'] = pd.to_datetime(acc_df['datetime'])
acc_df['datetime'] = acc_df['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
print('\n', '******************** Smoothing The ACC Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
acc_df['Magnitude'], 15000, "Processing ACC Data")
acc_df['MAG_K500'] = MAG_K500
# acc_df.to_csv("./Metadata/"+ dir+"_ACC.csv")
mean_acc_magnitude = acc_df['Magnitude'].mean()
std_acc_magnitude = acc_df['Magnitude'].std()
print("Average Magnitude of the Acc Data : ", mean_acc_magnitude)
starting_timestamp = process.Starting_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
ending_timestamp = process.Ending_timeStamp(acc_df['MAG_K500'], acc_df['datetime'],
process.deviation_above_mean(0.20, mean_acc_magnitude, std_acc_magnitude))
if len(starting_timestamp) < 1:
fig, ax2 = plt.subplots(figsize=(30, 10))
ax2.plot(acc_df['datetime'], acc_df['MAG_K500'], color='red')
fig.savefig('./Plots/ACC_figure.png')
else:
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
difference = [] # initialization of result list
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i)
for i in difference:
time_delta_minutes.append(i.total_seconds()/60)
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 2.00:
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df = | pd.DataFrame(ending_timestamp) | pandas.DataFrame |
# Copyright (c) 2013-2015 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Read data from the National Data Buoy Center."""
from io import StringIO
import warnings
import numpy as np
import pandas as pd
import requests
from ..http_util import HTTPEndPoint
warnings.filterwarnings('ignore', "Pandas doesn\'t allow columns to be created", UserWarning)
class NDBC(HTTPEndPoint):
"""Download and parse data from the National Data Buoy Center."""
def __init__(self):
"""Set up endpoint."""
super(NDBC, self).__init__('https://www.ndbc.noaa.gov/')
@classmethod
def realtime_observations(cls, buoy, data_type='txt'):
"""Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'ocean' oceanographic data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string
"""
endpoint = cls()
parsers = {'txt': endpoint._parse_met,
'drift': endpoint._parse_drift,
'cwind': endpoint._parse_cwind,
'spec': endpoint._parse_spec,
'ocean': endpoint._parse_ocean,
'srad': endpoint._parse_srad,
'dart': endpoint._parse_dart,
'supl': endpoint._parse_supl,
'rain': endpoint._parse_rain}
if data_type not in parsers:
raise KeyError('Data type must be txt, drift, cwind, spec, ocean, srad, dart,'
'supl, or rain for parsed realtime data.')
raw_data = endpoint.raw_buoy_data(buoy, data_type=data_type)
return parsers[data_type](raw_data)
@staticmethod
def _parse_met(content):
"""Parse standard meteorological data from NDBC buoys.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour', 'minute',
'wind_direction', 'wind_speed', 'wind_gust',
'wave_height', 'dominant_wave_period', 'average_wave_period',
'dominant_wave_direction', 'pressure',
'air_temperature', 'water_temperature', 'dewpoint',
'visibility', '3hr_pressure_tendency', 'water_level_above_mean']
col_units = {'wind_direction': 'degrees',
'wind_speed': 'meters/second',
'wind_gust': 'meters/second',
'wave_height': 'meters',
'dominant_wave_period': 'seconds',
'average_wave_period': 'seconds',
'dominant_wave_direction': 'degrees',
'pressure': 'hPa',
'air_temperature': 'degC',
'water_temperature': 'degC',
'dewpoint': 'degC',
'visibility': 'nautical_mile',
'3hr_pressure_tendency': 'hPa',
'water_level_above_mean': 'feet',
'time': None}
df = pd.read_csv(StringIO(content), comment='#', na_values='MM',
names=col_names, sep=r'\s+')
df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True)
df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute'])
df.units = col_units
return df
@staticmethod
def _parse_drift(content):
"""Parse meteorological data from drifting buoys and limited moored buoy data.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour_minute',
'latitude', 'longitude',
'wind_direction', 'wind_speed', 'wind_gust',
'pressure', '3hr_pressure_tendency',
'air_temperature', 'water_temperature']
col_units = {'latitude': 'degrees',
'longitude': 'degrees',
'wind_direction': 'degrees',
'wind_speed': 'meters/second',
'wind_gust': 'meters/second',
'pressure': 'hPa',
'air_temperature': 'degC',
'water_temperature': 'degC',
'3hr_pressure_tendency': 'hPa',
'time': None}
df = pd.read_csv(StringIO(content), comment='#', na_values='MM',
names=col_names, sep=r'\s+')
df['hour'] = np.floor(df['hour_minute'] / 100)
df['minute'] = df['hour_minute'] - df['hour'] * 100
df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True)
df = df.drop(columns=['year', 'month', 'day', 'hour_minute', 'hour', 'minute'])
df.units = col_units
return df
@staticmethod
def _parse_cwind(content):
"""Parse continuous wind data (10 minute average).
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour', 'minute',
'wind_direction', 'wind_speed', 'gust_direction',
'wind_gust', 'gust_time']
col_units = {'wind_direction': 'degrees',
'wind_speed': 'meters/second',
'gust_direction': 'degrees',
'wind_gust': 'meters/second',
'gust_time': None,
'time': None}
df = pd.read_csv(StringIO(content), comment='#', na_values='MM',
names=col_names, sep=r'\s+')
df['gust_direction'] = df['gust_direction'].replace(999, np.nan)
df['wind_gust'] = df['wind_gust'].replace(99.0, np.nan)
df['time'] = | pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Load averaged results from csv, containing scores of all ckpts.
For each exp group, return the best ckpt (ranked by valid performance).
"""
import shutil
import configargparse
import os
import pandas as pd
__author__ = "<NAME>"
__email__ = "<EMAIL>"
dev_test_pairs = [
('kp20k_valid2k', 'kp20k'),
# ('kp20k_valid2k', 'duc'),
('openkp_valid2k', 'openkp'),
('kptimes_valid2k', 'kptimes'),
# ('kptimes_valid2k', 'jptimes'),
# ('kptimes_valid2k', 'duc'),
('stackex_valid2k', 'stackex'),
# ('kp20k_valid2k', 'inspec'),
# ('kp20k_valid2k', 'krapivin'),
# ('kp20k_valid2k', 'nus'),
# ('kp20k_valid2k', 'semeval'),
]
def main():
parser = configargparse.ArgumentParser()
parser.add_argument('-report_dir', type=str, required=True, help='Directory to all report csv files.')
parser.add_argument('-pred_name', type=str, required=False, help='Filter by pred_name, since there exists results by multiple decoding settings from the same ckpt.')
parser.add_argument('-export_dir', type=str, required=False, default=None, help='If set, the best pred/eval files will be copied to this place.')
parser.add_argument('-report_selfbest', action='store_true', help='')
parser.add_argument('-report_lastckpt', action='store_true', help='')
opt = parser.parse_args()
kp_df = None
for f in os.listdir(opt.report_dir):
if not f.endswith('.csv'): continue
print(f)
df = pd.read_csv(os.path.join(opt.report_dir, f))
kp_df = df if kp_df is None else pd.concat([kp_df, df], sort=True)
# rearrange cols since paths take a lot of space
cols = df.columns.tolist()
path_cols = [c for c in cols if c.endswith('_path')]
not_path_cols = [c for c in cols if not c.endswith('_path')]
kp_df = kp_df[not_path_cols + path_cols]
if opt.pred_name is None:
if len(kp_df.pred_name.unique()) > 1:
print('Found multiple decoding settings, please set opt.pred_name to avoid mixed results')
print(kp_df.pred_name.unique().tolist())
raise Exception()
else:
kp_df = kp_df.loc[kp_df.pred_name == opt.pred_name]
# kp_df = kp_df.loc[kp_df.exp_name.str.contains("PT_step200k")]
print(len(kp_df))
# print(kp_df.columns)
for exp_name in kp_df.exp_name.unique():
print(exp_name, len(kp_df.loc[kp_df.exp_name == exp_name]))
exp_names = kp_df.exp_name.unique()
anchor_metric_name = 'all_exact_f_score@k'
# anchor_metric_name = 'present_exact_f_score@k'
for dev_test_pair in dev_test_pairs:
# for transfer results
dev_name = dev_test_pair[0] + '_test'
test_name = dev_test_pair[1] + '_test'
# for empirical results
# dev_name = dev_test_pair[0]
# test_name = dev_test_pair[1]
devbest_dev_rows, devbest_test_rows = None, None
selfbest_dev_rows, selfbest_test_rows = None, None
for exp_name in exp_names:
exp_df = kp_df.loc[kp_df.exp_name == exp_name]
dev_df = exp_df.loc[exp_df.test_dataset == dev_name]
test_df = exp_df.loc[exp_df.test_dataset == test_name]
if opt.report_lastckpt:
dev_df = dev_df.sort_values(by='step', ascending=False)
test_df = test_df.sort_values(by='step', ascending=False)
else:
dev_df = dev_df.sort_values(by=anchor_metric_name, ascending=False)
test_df = test_df.sort_values(by=anchor_metric_name, ascending=False)
if len(dev_df) == 0: continue
dev_row = dev_df.iloc[0].to_frame().transpose()
selfbest_dev_row = dev_row
selfbest_dev_rows = dev_row if selfbest_dev_rows is None else | pd.concat([selfbest_dev_rows, dev_row]) | pandas.concat |
#!/usr/bin/env python
"""
Fuse csv files into one single file.
"""
## file_fuser.py
### fuse individual files into one giant file
import pandas as pd
import os
import argparse
from abc import ABCMeta, abstractmethod
class CsvFuserAbs(object, metaclass=ABCMeta):
## This object initialized from command line arguments when used in a Python script
def __init__(self):
## parse named arguments from command line
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--inputStart', '-i', help="intput file name starting pattern (only files that match will be fused)", type=str)
self.parser.add_argument('--input_index', '-idx', help="enter True if input has an index / Omit this argument if it doesn't", type=bool, default=False)
self.parser.add_argument('--output', '-o', help="Name of final output file", type= str)
self.parser.add_argument('--output_index', '-odx', help="enter true to output an index / Omit this argument to leave off the index", type=bool, default=False)
self.parser.add_argument('--dir', '-d', help="input files directory", type= str, default=".")
self.args=self.parser.parse_args()
self.pyVer = "3.6.1"
self.tmpDF = pd.DataFrame() # holds entire input file when
self.tmpDF2 = pd.DataFrame() # DF to get overwritten by each file fragment
def fuse_files(self):
fileStart = self.args.inputStart # start of filenames to fuse here (assumes all files have a pattern starting with this)
outfilename = self.args.output # output file to merge files into
path = self.args.dir # directory files are found in
print("Input files will be located at: ", path)
print("args.input_index is set to:", str(type(self.args.input_index)), self.args.input_index)
print("args.output_index is set to:", str(type(self.args.output_index)), self.args.output_index)
filesInDir = os.listdir(path)
# print(type(filesInDir)) # is list
filesInDir.sort()
outDF = pd.DataFrame()
tmpDF = | pd.DataFrame() | pandas.DataFrame |
# Adapted from code written by <NAME>
import pandas as pd
from tqdm import tqdm
import numpy as np
import datetime
'''
Sources are currently human, mouse, and rat, in order of descending priority.
'''
sources = [
'ftp://ftp.ncbi.nih.gov/gene/DATA/GENE_INFO/Mammalia/Rattus_norvegicus.gene_info.gz',
'ftp://ftp.ncbi.nih.gov/gene/DATA/GENE_INFO/Mammalia/Mus_musculus.gene_info.gz',
'ftp://ftp.ncbi.nih.gov/gene/DATA/GENE_INFO/Mammalia/Homo_sapiens.gene_info.gz'
]
def get_dictionary(path, mapfrom):
'''
Returns two dictionaries, the first a mapping from symbols to approved gene
symbols (synonyms), the second a mapping from approved symbols to Entrez
Gene IDs from an NCBI gene info source designated by path.
'''
column = {
'synonyms': 'Synonyms',
'ensembl': 'dbXrefs'
}[mapfrom]
ncbi = pd.read_csv(path, sep='\t', usecols=[
'Symbol', 'GeneID', column])
def split_list(v): return v.split('|') if type(v) == str else []
ncbi[column] = ncbi[column].apply(split_list)
# Map existing entities to NCBI Genes
symbol_lookup = {}
geneid_lookup = {}
for i in ncbi.index:
approved_sym = ncbi.loc[i, 'Symbol']
v = ncbi.loc[i, 'GeneID']
geneid_lookup[approved_sym] = v if v != '-' else np.nan
if mapfrom == 'synonyms':
for sym in [approved_sym] + ncbi.loc[i, column]:
if not (sym == '-'):
symbol_lookup[sym] = approved_sym
elif mapfrom == 'ensembl':
for sym in ncbi.loc[i, column]:
if sym.startswith('Ensembl:'):
symbol_lookup[sym[len('Ensembl:'):]] = approved_sym
return symbol_lookup, geneid_lookup
def get_lookups(mapfrom='synonyms'):
'''
Returns two dictionaries, the first a mapping from symbols to approved gene
symbols (synonyms), the second a mapping from approved symbols to Entrez
Gene IDs.
'''
symbol_lookup = {}
geneid_lookup = {}
for source in tqdm(sources, desc='Gathering sources'):
sym, gene = get_dictionary(source, mapfrom)
symbol_lookup.update(sym)
geneid_lookup.update(gene)
return symbol_lookup, geneid_lookup
def save_lookup(symbol_lookup, geneid_lookup):
'''
Save the lookups as pandas DataFrames. They are saved as:
symbol_lookup_<year>_<month>.csv
geneid_lookup_<year>_<month>.csv
'''
date = str(datetime.date.today())[0:7].replace('-', '_')
symbol = pd.DataFrame.from_dict(
symbol_lookup, orient='index', columns=['Approved Symbol'])
geneid = pd.DataFrame.from_dict(
geneid_lookup, orient='index', columns=['Entrez Gene ID'])
symbol.to_csv('symbol_lookup_{}.csv'.format(date), sep='\t')
geneid.to_csv('geneid_lookup_{}.csv'.format(date), sep='\t')
def load_lookup(symbol_path, geneid_path):
'''
Loads the lookups from custom paths. The files should be tab-separated
files. Returns the symbol and geneid lookups as dictionaries.
'''
symbol = pd.read_csv(symbol_path, sep='\t', na_filter=False)
geneid = | pd.read_csv(geneid_path, sep='\t', na_filter=False) | pandas.read_csv |
from flask import Flask, request, jsonify
import json
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
# import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# from scipy.spatial import distance
from sklearn.manifold import MDS
from sklearn.model_selection import train_test_split
from sklearn.metrics.pairwise import pairwise_distances
import time
import itertools
app = Flask(__name__)
dataset, datasetRandomSampled, datasetStratified = None, None, None
clusterColors = ["gold", "blue", "green", "yellow", "slateblue", "grey", "orange", "pink", "brown"]
clusterLabels = None
def performRandomSampling(dataset):
count_row = dataset.shape[0]
numberOfSamplesNeeded = int(0.25 * count_row)
chosen_idx = np.random.choice(count_row, replace=False, size=numberOfSamplesNeeded)
dataset = dataset.iloc[chosen_idx]
return dataset
def performOneHotEncoding(dataset, feature):
dataset = pd.get_dummies(dataset,prefix=[feature])
return dataset
def dropTargetColoumnFromDataset(dataset, feature):
dataset = dataset.drop(feature, 1)
return dataset
def performStandardisation(dataset):
dataset = pd.DataFrame(StandardScaler().fit_transform(dataset),columns = dataset.columns)
return dataset
def calculateCumulativeSum(percentages):
aggregator = 0
cumulativeSum = []
for x in percentages:
aggregator += x
cumulativeSum.append(aggregator)
return cumulativeSum
def makeDatasetTransferrable(dataset):
output = {}
output["info"] = []
for _, row in dataset.iterrows():
data = {}
data['education'] = row['education']
data['currentSmoker'] = int(row['currentSmoker'])
data['cigsPerDay'] = row['cigsPerDay']
data['BPMeds'] = row['BPMeds']
data['totChol'] = row['totChol']
data['sysBP'] = row['sysBP']
data['diaBP'] = row['diaBP']
data['BMI'] = row['BMI']
data['heartRate'] = row['heartRate']
data['glucose'] = row['glucose']
data['gender'] = int(row['gender'])
data['age'] = int(row['age'])
data['prevalentStroke'] = int(row['prevalentStroke'])
data['prevalentHyp'] = int(row['prevalentHyp'])
data['diabetes'] = int(row['diabetes'])
data['TenYearCHD'] = int(row['TenYearCHD'])
output["info"].append(data)
return output
def load_dataset(path):
dataset = | pd.read_csv(path) | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: Python [conda env:core_acc] *
# language: python
# name: conda-env-core_acc-py
# ---
# # Composition of compendia
#
# This notebook makes figures that illustrate the composition of the compendia. In particular this notebook takes metadata files (generated by <NAME> from Dartmouth) and creates plots to show the different types of media used in experiments and what types of genetic malnipulations were used as well.
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
import pandas as pd
import plotnine as pn
import seaborn as sns
import matplotlib.pyplot as plt
# Import metadata files
pao1_metadata_filename = "PAO1TableVF1.csv"
pa14_metadata_filename = "PA14TableVF1.csv"
pao1_metadata = pd.read_csv(pao1_metadata_filename, header=0, index_col=0)
pa14_metadata = | pd.read_csv(pa14_metadata_filename, header=0, index_col=0) | pandas.read_csv |
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
Data = pd.read_excel(io="Training_Data.xlsx", sheet_name="E0_Mod")
D = pd.DataFrame(Data[['HS','AS','HST','AST','HF','AF','HC','AC','HY','AY','HR','AR']].T)
D2 = pd.DataFrame(Data[['HS','AS','HST','AST','HF','AF','HC','AC','HY','AY','HR','AR']].T)
#%% Level (1)
for x in range (11):
for i in range (255):
if D.iloc[x][i] >= (max(D.iloc[x][:])-min(D.iloc[x][:]))/2 :
D.iloc[x][i] = 1
else:
D.iloc[x][i] = 0
DT = D.T
#%% Computing Entropy(S) and Gains
columns = list(DT)
Sammary = | pd.DataFrame() | pandas.DataFrame |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{ | u('name') | pandas.compat.u |
import itertools
import os
import random
import tempfile
from unittest import mock
import pandas as pd
import pytest
import pickle
import numpy as np
import string
import multiprocessing as mp
from copy import copy
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, assert_categorical_equal
from dask import delayed
from dask.base import compute_as_if_collection
from dask.optimization import cull
from dask.dataframe.shuffle import (
shuffle,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
maybe_buffered_partd,
remove_nans,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.dataframe._compat import PANDAS_GT_120
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=17, max_branch=4)
sc = s.compute(scheduler="sync")
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, "b", shuffle=method))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[["b"]], shuffle=method).compute()
res2 = shuffle(d, ["b"], shuffle=method).compute()
res3 = shuffle(d, "b", shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_tasks(npartitions):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle="tasks"))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle="tasks"))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle="tasks"))
assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle="tasks"))
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle="tasks"))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle="tasks"))
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_self_index(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize("shuffle", ["tasks"])
def test_set_index_names(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle).dask) == set(
ddf.set_index("x", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", shuffle=shuffle).dask) != set(
ddf.set_index("y", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle).dask
)
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle)
df2.value.sum().compute(scheduler="sync")
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
pool = ctx.Pool(processes=8)
with pool:
results = [pool.apply_async(_set_index, (ddf, "x")) for _ in range(100)]
divisions_set = set(result.get() for result in results)
assert len(divisions_set) == 1
def _set_index(df, *args, **kwargs):
return df.set_index(*args, **kwargs).divisions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
def test_set_index():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 4])
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize("engine", ["pandas", "cudf"])
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == set([612509347682975743, 616762138058293247])
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = | pd.DataFrame({"tz": s_aware, "notz": s_naive}) | pandas.DataFrame |
#!/usr/bin/env python3
##############################################################
## <NAME> & <NAME> ##
## Copyright (C) 2020-2021 ##
##############################################################
'''
Created on 30 oct. 2020
@author: alba
Modified in March 2021
@author: <NAME>
'''
## useful imports
import os
import sys
import pandas as pd
import numpy as np
from Bio import SeqIO, Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation, ExactPosition
from Bio.SeqIO.FastaIO import SimpleFastaParser
from HCGB.functions.aesthetics_functions import debug_message
from BacDup.scripts.functions import columns_annot_table
################################################################################
def gbf_parser_caller(annot_file, output_path, debug):
## set output paths
prot_file = os.path.abspath( os.path.join(output_path, 'proteins.fa'))
csv_file = os.path.abspath( os.path.join(output_path, 'annot_df.csv'))
csv_length = os.path.abspath( os.path.join(output_path, 'length_df.csv'))
list_out_files = [prot_file, csv_file, csv_length]
try:
with open(prot_file, "w") as output_handle:
SeqIO.write(
gbf_parser(annot_file, list_out_files, debug=debug),
output_handle, "fasta")
## output files
return (list_out_files)
except:
return (False)
################################################################################
def gbf_parser(gbf_file, list_out_files, debug=False):
## create dataframe.
## get common column names
columns = columns_annot_table()
annot_df = pd.DataFrame(data=None, columns=columns)
genome_length = | pd.DataFrame(data=None, columns=["length"]) | pandas.DataFrame |
import os
import ctypes
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.collections import PolyCollection
import matplotlib.colors as colors
from pywfm import DLL_PATH, DLL
from pywfm.misc import IWFMMiscellaneous
class IWFMModel(IWFMMiscellaneous):
"""
IWFM Model Class for interacting with the IWFM API.
Parameters
----------
preprocessor_file_name : str
file path and name of the model preprocessor input file.
simulation_file_name : str
file path and name of the model simulation input file.
has_routed_streams : {1 or 0}, default 1
If 1: model has routed streams.
If 0: does not have routed streams.
is_for_inquiry : {1 or 0}, default 1
Options for instantiating model.
* 1: model is instantiated for inquiry.
* 0: model is instantiated for simulations.
instantiate : bool, default True
flag to instantiate the model object
delete_inquiry_data_file : bool, default True
flag to delete inquiry data file, if it exists
log_file : str, default 'message.log'
name of the file used for logging simulation messages
Returns
-------
IWFMModel Object
instance of the IWFMModel class and access to the IWFM Model Object
fortran procedures.
"""
def __init__(
self,
preprocessor_file_name,
simulation_file_name,
has_routed_streams=1,
is_for_inquiry=1,
instantiate=True,
delete_inquiry_data_file=True,
log_file="message.log",
):
if not isinstance(preprocessor_file_name, str):
raise TypeError("preprocessor_file_name must be a str")
if not os.path.exists(preprocessor_file_name) or not os.path.isfile(
preprocessor_file_name
):
raise FileNotFoundError("{} was not found".format(preprocessor_file_name))
self.preprocessor_file_name = preprocessor_file_name
if not isinstance(simulation_file_name, str):
raise TypeError("simulation_file_name must be a str")
if not os.path.exists(simulation_file_name) or not os.path.isfile(
simulation_file_name
):
raise FileNotFoundError("{} was not found".format(simulation_file_name))
self.simulation_file_name = simulation_file_name
if not isinstance(has_routed_streams, int):
raise TypeError("has_routed_streams must be an int")
if has_routed_streams not in [0, 1]:
raise ValueError("has_routed_streams must be 0 or 1")
self.has_routed_streams = has_routed_streams
if not isinstance(is_for_inquiry, int):
raise TypeError("is_for_inquiry must be an int")
if is_for_inquiry not in [0, 1]:
raise ValueError("is_for_inquiry must be 0 or 1")
self.is_for_inquiry = is_for_inquiry
self.dll = ctypes.windll.LoadLibrary(os.path.join(DLL_PATH, DLL))
if delete_inquiry_data_file:
self.delete_inquiry_data_file()
if not isinstance(log_file, str):
raise TypeError("log_file must be a str or None")
self.set_log_file(log_file)
if instantiate:
self.new()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.kill()
self.close_log_file()
def new(self):
"""
Instantiate the IWFM Model Object.
This method opens all related files and allocates memory for
the IWFM Model Object.
Note
----
When an instance of the IWFMModel class is created for the
first time, the entire model object will be available for
returning data. A binary file will be generated for quicker
loading, if this binary file exists when subsequent instances
of the IWFMModel object are created, not all functions will be
available.
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_New"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_New"
)
)
# check that model object isn't already instantiated
if self.is_model_instantiated():
return
# convert preprocessor file name to ctypes
preprocessor_file_name = ctypes.create_string_buffer(
self.preprocessor_file_name.encode("utf-8")
)
length_preprocessor_file_name = ctypes.c_int(
ctypes.sizeof(preprocessor_file_name)
)
# convert simulation file name to ctypes
simulation_file_name = ctypes.create_string_buffer(
self.simulation_file_name.encode("utf-8")
)
length_simulation_file_name = ctypes.c_int(ctypes.sizeof(simulation_file_name))
# convert has_routed_streams to ctypes
has_routed_streams = ctypes.c_int(self.has_routed_streams)
# convert is_for_inquiry to ctypes
is_for_inquiry = ctypes.c_int(self.is_for_inquiry)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_New(
ctypes.byref(length_preprocessor_file_name),
preprocessor_file_name,
ctypes.byref(length_simulation_file_name),
simulation_file_name,
ctypes.byref(has_routed_streams),
ctypes.byref(is_for_inquiry),
ctypes.byref(status),
)
def kill(self):
"""
Terminate the IWFM Model Object.
This method closes files associated with model and clears
memory.
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_Kill"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_Kill")
)
# reset instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_Kill(ctypes.byref(status))
def get_current_date_and_time(self):
"""
Return the current simulation date and time.
Returns
-------
str
current date and time in IWFM date format i.e. MM/DD/YYYY_hh:mm
See Also
--------
IWFMModel.get_n_time_steps : Return the number of timesteps in an IWFM simulation
IWFMModel.get_time_specs : Return the IWFM simulation dates and time step
IWFMModel.get_n_intervals : Return the number of time intervals between a provided start date and end date
IWFMModel.get_output_interval : Return a list of the possible time intervals a selected time-series data can be retrieved at.
IWFMModel.is_date_greater : Return True if first_date is greater than comparison_date
IWFMModel.increment_time : increments the date provided by the specified time interval
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_current_date_and_time()
'09/30/1990_24:00'
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file, is_for_inquiry=0)
>>> model.advance_time()
>>> model.get_current_date_and_time()
'10/01/1990_24:00'
>>> model.kill()
>>> model.close_log_file()
Note
----
1. the intent of this method is to retrieve information about the
current time step when using the IWFM DLL to run a simulation.
i.e. IWFMModel object is instantiated with is_for_inquiry=0
2. if this method is called when the IWFMModel object is
instantiated with is_for_inquiry=1, it only returns the
simulation begin date and time.
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetCurrentDateAndTime"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetCurrentDateAndTime")
)
# set length of IWFM Date and Time string
length_date_string = ctypes.c_int(16)
# initialize output variables
current_date_string = ctypes.create_string_buffer(length_date_string.value)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetCurrentDateAndTime(
ctypes.byref(length_date_string), current_date_string, ctypes.byref(status)
)
return current_date_string.value.decode("utf-8")
def get_n_time_steps(self):
"""
Return the number of timesteps in an IWFM simulation
Returns
-------
int
the number of timesteps in the simulation
See Also
--------
IWFMModel.get_current_date_and_time : Return the current simulation date and time
IWFMModel.get_time_specs : Return the IWFM simulation dates and time step
IWFMModel.get_n_intervals : Return the number of time intervals between a provided start date and end date
IWFMModel.get_output_interval : Return a list of the possible time intervals a selected time-series data can be retrieved at.
IWFMModel.is_date_greater : Return True if first_date is greater than comparison_date
IWFMModel.increment_time : increments the date provided by the specified time interval
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_time_steps()
3653
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNTimeSteps"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNTimeSteps")
)
# reset instance variable status to 0
status = ctypes.c_int(0)
# initialize n_nodes variable
n_time_steps = ctypes.c_int(0)
self.dll.IW_Model_GetNTimeSteps(
ctypes.byref(n_time_steps), ctypes.byref(status)
)
return n_time_steps.value
def get_time_specs(self):
"""
Return the IWFM simulation dates and time step
Returns
-------
tuple (length=2)
index 0 - (list) simulation dates; index 1 - (str) simulation time step
See Also
--------
IWFMModel.get_current_date_and_time : Return the current simulation date and time
IWFMModel.get_n_time_steps : Return the number of timesteps in an IWFM simulation
IWFMModel.get_n_intervals : Return the number of time intervals between a provided start date and end date
IWFMModel.get_output_interval : Return a list of the possible time intervals a selected time-series data can be retrieved at.
IWFMModel.is_date_greater : Return True if first_date is greater than comparison_date
IWFMModel.increment_time : increments the date provided by the specified time interval
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> time_stamps, time_interval = model.get_time_specs()
>>> time_stamps
['10/01/1990_24:00',
'10/02/1990_24:00',
'10/03/1990_24:00',
'10/04/1990_24:00',
'10/05/1990_24:00',
'10/06/1990_24:00',
'10/07/1990_24:00',
...
'09/29/2000_24:00'
'09/30/2000_24:00'
'10/01/2000_24:00']
>>> time_interval
'1DAY'
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetTimeSpecs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetTimeSpecs")
)
# set instance variable status to 0
status = ctypes.c_int(0)
# set input variables
n_data = ctypes.c_int(self.get_n_time_steps())
length_dates = ctypes.c_int(n_data.value * 16)
length_ts_interval = ctypes.c_int(8)
# initialize output variables
simulation_time_step = ctypes.create_string_buffer(length_ts_interval.value)
raw_dates_string = ctypes.create_string_buffer(length_dates.value)
delimiter_position_array = (ctypes.c_int * n_data.value)()
self.dll.IW_Model_GetTimeSpecs(
raw_dates_string,
ctypes.byref(length_dates),
simulation_time_step,
ctypes.byref(length_ts_interval),
ctypes.byref(n_data),
delimiter_position_array,
ctypes.byref(status),
)
dates_list = self._string_to_list_by_array(
raw_dates_string, delimiter_position_array, n_data
)
sim_time_step = simulation_time_step.value.decode("utf-8")
return dates_list, sim_time_step
def get_output_interval(self):
"""
Return a list of the possible time intervals a selected
time-series data can be retrieved at.
Returns
-------
list of strings
list of available output intervals for given data type
See Also
--------
IWFMModel.get_current_date_and_time : Return the current simulation date and time
IWFMModel.get_n_time_steps : Return the number of timesteps in an IWFM simulation
IWFMModel.get_time_specs : Return the IWFM simulation dates and time step
IWFMModel.get_n_intervals : Return the number of time intervals between a provided start date and end date
IWFMModel.is_date_greater : Return True if first_date is greater than comparison_date
IWFMModel.increment_time : increments the date provided by the specified time interval
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_output_interval()
['1DAY', '1WEEK', '1MON', '1YEAR']
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetOutputIntervals"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetOutputIntervals")
)
# set instance variable status to 0
status = ctypes.c_int(0)
# set length of output intervals character array to 160 or larger
length_output_intervals = ctypes.c_int(160)
# set maximum number of time intervals to 20 or larger
max_num_time_intervals = ctypes.c_int(20)
# initialize output variables
output_intervals = ctypes.create_string_buffer(length_output_intervals.value)
actual_num_time_intervals = ctypes.c_int(0)
delimiter_position_array = (ctypes.c_int * max_num_time_intervals.value)()
self.dll.IW_Model_GetOutputIntervals(
output_intervals,
ctypes.byref(length_output_intervals),
delimiter_position_array,
ctypes.byref(max_num_time_intervals),
ctypes.byref(actual_num_time_intervals),
ctypes.byref(status),
)
return self._string_to_list_by_array(
output_intervals, delimiter_position_array, actual_num_time_intervals
)
def get_n_nodes(self):
"""
Return the number of nodes in an IWFM model
Returns
-------
int
number of nodes specified in the IWFM model
See Also
--------
IWFMModel.get_node_coordinates : Return the x,y coordinates of the nodes in an IWFM model
IWFMModel.get_node_ids : Return an array of node ids in an IWFM model
IWFMModel.get_n_elements : Return the number of elements in an IWFM model
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_n_stream_inflows : Return the number of stream boundary inflows specified by the user as timeseries input data
IWFMModel.get_n_layers : Return the number of layers in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_nodes()
441
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNNodes"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize n_nodes variable
n_nodes = ctypes.c_int(0)
self.dll.IW_Model_GetNNodes(ctypes.byref(n_nodes), ctypes.byref(status))
if not hasattr(self, "n_nodes"):
self.n_nodes = n_nodes
return self.n_nodes.value
def get_node_coordinates(self):
"""
Return the x,y coordinates of the nodes in an IWFM model
Returns
-------
tuple
np.ndarray of groundwater node x-coordinates
np.ndarray of groundwater node y-coordinates
See Also
--------
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_node_ids : Return an array of node ids in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> x, y = model.get_node_coordinates()
>>> x
array([1804440. , 1811001.6, 1817563.2, 1824124.8, 1830686.4, 1837248. , ..., 1902864. , 1909425.6, 1915987.2, 1922548.8, 1929110.4, 1935672. ])
>>> y
array([14435520. , 14435520. , 14435520. , 14435520. , 14435520. , ..., 14566752. , 14566752. , 14566752. , 14566752. , 14566752. ])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNodeXY"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNodeXY"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# get number of nodes
num_nodes = ctypes.c_int(self.get_n_nodes())
# initialize output variables
x_coordinates = (ctypes.c_double * num_nodes.value)()
y_coordinates = (ctypes.c_double * num_nodes.value)()
self.dll.IW_Model_GetNodeXY(
ctypes.byref(num_nodes), x_coordinates, y_coordinates, ctypes.byref(status)
)
return np.array(x_coordinates), np.array(y_coordinates)
def get_node_ids(self):
"""
Return an array of node ids in an IWFM model
Returns
-------
np.ndarray
array of groundwater node ids
See Also
--------
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_node_coordinates : Return the x,y coordinates of the nodes in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_node_ids()
array([ 1, 2, 3, 4, 5, ..., 437, 438, 439, 440, 441])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNodeIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNodeIDs"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# get number of nodes
num_nodes = ctypes.c_int(self.get_n_nodes())
# initialize output variables
node_ids = (ctypes.c_int * num_nodes.value)()
self.dll.IW_Model_GetNodeIDs(
ctypes.byref(num_nodes), node_ids, ctypes.byref(status)
)
return np.array(node_ids)
def get_n_elements(self):
"""
Return the number of elements in an IWFM model
Returns
-------
int
number of elements in the IWFM model application
See Also
--------
IWFMModel.get_element_ids : Return an array of element IDs in an IWFM model
IWFMModel.get_element_config : Return an array of node IDs for an IWFM element
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_n_stream_inflows : Return the number of stream boundary inflows specified by the user as timeseries input data
IWFMModel.get_n_layers : Return the number of layers in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_elements()
400
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNElements"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNElements"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize n_nodes variable
n_elements = ctypes.c_int(0)
self.dll.IW_Model_GetNElements(ctypes.byref(n_elements), ctypes.byref(status))
if not hasattr(self, "n_elements"):
self.n_elements = n_elements
return self.n_elements.value
def get_element_ids(self):
"""
Return an array of element ids in an IWFM model
Returns
-------
np.ndarray
array of element ids in an IWFM model application
See Also
--------
IWFMModel.get_n_elements : Return the number of elements in the IWFM model
IWFMModel.get_element_config : Return an array of node IDs for an IWFM element
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_element_ids()
array([ 1, 2, 3, ..., 398, 399, 400])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetElementIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetElementIDs"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# get number of elements
num_elements = ctypes.c_int(self.get_n_elements())
# initialize output variables
element_ids = (ctypes.c_int * num_elements.value)()
self.dll.IW_Model_GetElementIDs(
ctypes.byref(num_elements), element_ids, ctypes.byref(status)
)
return np.array(element_ids)
def get_element_config(self, element_id):
"""
Return an array of node ids for an IWFM element.
The node ids are provided in a counter-clockwise direction
Parameters
----------
element_id : int
single element ID for IWFM model. Must be one of the values returned by
get_element_ids method
Returns
-------
np.ndarray
array of node IDs for element
Note
----
In IWFM, elements can be composed of either 3 or 4 nodes. If
the element has 3 nodes, the fourth is returned as a 0. Nodes IDs
must also be in counter-clockwise order.
See Also
--------
IWFMModel.get_n_elements : Return the number of elements in an IWFM model
IWFMModel.get_element_ids : Return an array of element IDs in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_element_config(1)
array([ 1, 2, 23, 22])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetElementConfigData"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetElementConfigData"
)
)
# check that element_id is an integer
if not isinstance(element_id, (int, np.int32)):
raise TypeError("element_id must be an integer")
# check that element_id is a valid element_id
element_ids = self.get_element_ids()
if not np.any(element_ids == element_id):
raise ValueError("element_id is not a valid element ID")
# convert element_id to element index
element_index = np.where(element_ids == element_id)[0][0] + 1
# set instance variable status to 0
status = ctypes.c_int(0)
# set input variables
element_index = ctypes.c_int(element_index)
max_nodes_per_element = ctypes.c_int(4)
# initialize output variables
nodes_in_element = (ctypes.c_int * max_nodes_per_element.value)()
self.dll.IW_Model_GetElementConfigData(
ctypes.byref(element_index),
ctypes.byref(max_nodes_per_element),
nodes_in_element,
ctypes.byref(status),
)
# convert node indices to node IDs
node_indices = np.array(nodes_in_element)
node_ids = self.get_node_ids()
return node_ids[node_indices - 1]
def get_n_subregions(self):
"""
Return the number of subregions in an IWFM model
Returns
-------
int
number of subregions in the IWFM model
See Also
--------
IWFMModel.get_subregion_ids : Return an array of IDs for subregions in an IWFM model
IWFMModel.get_subregion_name : Return the name corresponding to the subregion_id in an IWFM model
IWFMModel.get_subregions_by_element : Return an array identifying the IWFM Model elements contained within each subregion.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_subregions()
2
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNSubregions"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNSubregions"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize n_subregions variable
n_subregions = ctypes.c_int(0)
self.dll.IW_Model_GetNSubregions(
ctypes.byref(n_subregions), ctypes.byref(status)
)
if not hasattr(self, "n_subregions"):
self.n_subregions = n_subregions
return self.n_subregions.value
def get_subregion_ids(self):
"""
Return an array of IDs for subregions identified in an IWFM model
Returns
-------
np.ndarray
array containing integer IDs for the subregions specified in the IWFM model
Note
----
The resulting integer array will have a length equal to the value returned by the get_n_subregions method
See Also
--------
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_subregion_name : Return the name corresponding to the subregion_id in an IWFM model
IWFMModel.get_subregions_by_element ; Return an array identifying the IWFM Model elements contained within each subregion.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_subregion_ids()
array([1, 2])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSubregionIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetSubregionIDs"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# get number of model subregions
n_subregions = ctypes.c_int(self.get_n_subregions())
# initialize output variables
subregion_ids = (ctypes.c_int * n_subregions.value)()
self.dll.IW_Model_GetSubregionIDs(
ctypes.byref(n_subregions), subregion_ids, ctypes.byref(status)
)
return np.array(subregion_ids)
def get_subregion_name(self, subregion_id):
"""
Return the name corresponding to the subregion_id in an IWFM model
Parameters
----------
subregion_id : int
subregion identification number used to return name
Returns
-------
str
name of the subregion
See Also
--------
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_subregion_ids : Return an array of IDs for subregions identified in an IWFM model
IWFMModel.get_subregions_by_element : Return an array identifying the IWFM Model elements contained within each subregion.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_subregion_name(1)
'Region1 (SR1)'
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSubregionName"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetSubregionName"
)
)
# check that subregion_id is an integer
if not isinstance(subregion_id, int):
raise TypeError("subregion_id must be an integer")
# check that subregion_id is valid
subregion_ids = self.get_subregion_ids()
if subregion_id not in subregion_ids:
subregions = " ".join([str(val) for val in subregion_ids])
raise ValueError(
"subregion_id provided is not a valid "
"subregion id. value provided {}. Must be "
"one of: {}".format(subregion_id, subregions)
)
# convert subregion_id to subregion index adding 1 to handle fortran indexing
subregion_index = np.where(subregion_ids == subregion_id)[0][0] + 1
# set instance variable status to 0
status = ctypes.c_int(0)
# convert subregion_index to ctypes
subregion_index = ctypes.c_int(subregion_index)
# initialize name length as 50 characters
length_name = ctypes.c_int(50)
# initialize output variables
subregion_name = ctypes.create_string_buffer(length_name.value)
self.dll.IW_Model_GetSubregionName(
ctypes.byref(subregion_index),
ctypes.byref(length_name),
subregion_name,
ctypes.byref(status),
)
return subregion_name.value.decode("utf-8")
def get_subregions_by_element(self):
"""
Return an array identifying the IWFM Model elements contained within each subregion.
Returns
-------
np.ndarray
array identifying the subregion where each model element is assigned
Note
----
The resulting integer array will have a length equal to the value returned by get_n_elements method
See Also
--------
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_subregion_ids : Return an array of IDs for subregions in an IWFM model
IWFMModel.get_subregion_name : Return the name corresponding to the subregion_id in an IWFM model
IWFMModel.get_n_elements : Return the number of elements in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_subregions_by_element()
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetElemSubregions"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetElemSubregions"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# get number of elements in model
n_elements = ctypes.c_int(self.get_n_elements())
# initialize output variables
element_subregions = (ctypes.c_int * n_elements.value)()
self.dll.IW_Model_GetElemSubregions(
ctypes.byref(n_elements), element_subregions, ctypes.byref(status)
)
# convert subregion indices to subregion IDs
subregion_index_by_element = np.array(element_subregions)
subregion_ids = self.get_subregion_ids()
return subregion_ids[subregion_index_by_element - 1]
def get_n_stream_nodes(self):
"""
Return the number of stream nodes in an IWFM model
Returns
-------
int
number of stream nodes in the IWFM model
See Also
--------
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
IWFMModel.get_n_stream_nodes_upstream_of_stream_node : Return the number of stream nodes immediately upstream of the provided stream node id
IWFMModel.get_stream_nodes_upstream_of_stream_node : Return an array of the stream node ids immediately upstream of the provided stream node id
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_n_stream_inflows : Return the number of stream boundary inflows specified by the user as timeseries input data
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_stream_nodes()
23
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNStrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNStrmNodes"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize n_stream_nodes variable
n_stream_nodes = ctypes.c_int(0)
self.dll.IW_Model_GetNStrmNodes(
ctypes.byref(n_stream_nodes), ctypes.byref(status)
)
if not hasattr(self, "n_stream_nodes"):
self.n_stream_nodes = n_stream_nodes
return self.n_stream_nodes.value
def get_stream_node_ids(self):
"""
Return an array of stream node IDs in the IWFM model
Returns
-------
np.ndarray
array of stream node IDs from the IWFM model
Note
----
The resulting integer array will have a length equal to the value returned by the get_n_stream_nodes method
See Also
--------
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_n_stream_nodes_upstream_of_stream_node : Return the number of stream nodes immediately upstream of the provided stream node id
IWFMModel.get_stream_nodes_upstream_of_stream_node : Return an array of the stream node ids immediately upstream of the provided stream node id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_node_ids()
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmNodeIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetStrmNodeIDs"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# get number of stream nodes
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# initialize output variables
stream_node_ids = (ctypes.c_int * n_stream_nodes.value)()
self.dll.IW_Model_GetStrmNodeIDs(
ctypes.byref(n_stream_nodes), stream_node_ids, ctypes.byref(status)
)
return np.array(stream_node_ids, dtype=np.int32)
def get_n_stream_nodes_upstream_of_stream_node(self, stream_node_id):
"""
Return the number of stream nodes immediately upstream of
the provided stream node id
Parameters
----------
stream_node_id : int, np.int32
stream node id used to determine number of stream nodes upstream
Returns
-------
int
number of stream nodes immediately upstream of given stream node
Note
----
Most stream nodes will only have 1 stream node immediately upstream.
The upstream-most stream node has no upstream stream nodes and will return 0.
Stream nodes at a confluence of two stream reaches will return a value 2
See Also
--------
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
IWFMModel.get_stream_nodes_upstream_of_stream_node : Return an array of the stream node ids immediately upstream of the provided stream node id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_stream_nodes_upstream_of_stream_node(11)
0
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmNUpstrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmNUpstrmNodes")
)
# check that stream_node_id is an integer
if not isinstance(stream_node_id, (int, np.int32)):
raise TypeError("stream_node_id must be an integer")
# check that stream_node_id is a valid stream_node_id
stream_node_ids = self.get_stream_node_ids()
if not np.any(stream_node_ids == stream_node_id):
raise ValueError(
"stream_node_id '{}' is not a valid Stream Node ID".format(
stream_node_id
)
)
# convert stream_node_id to stream node index
# add 1 to convert index from python index to fortran index
stream_node_index = np.where(stream_node_ids == stream_node_id)[0][0] + 1
# set input variables
stream_node_index = ctypes.c_int(stream_node_index)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
n_upstream_stream_nodes = ctypes.c_int(0)
self.dll.IW_Model_GetStrmNUpstrmNodes(
ctypes.byref(stream_node_index),
ctypes.byref(n_upstream_stream_nodes),
ctypes.byref(status),
)
return n_upstream_stream_nodes.value
def get_stream_nodes_upstream_of_stream_node(self, stream_node_id):
"""
Return an array of the stream node ids immediately upstream
of the provided stream node id
Parameters
----------
stream_node_id : int
stream node id used to determine upstream stream nodes
Returns
-------
np.ndarray
integer array of stream node ids upstream of the provided stream node id
Note
----
stream node ids returned are for the stream node immediately upstream of the specified
stream node id only. if stream node specified is the most upstream node, None is returned
See Also
--------
IWFMModel.get_n_stream_nodes_upstream_of_stream_node : Return the number of stream nodes immediately upstream of the provided stream node id
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> print(model.get_stream_nodes_upstream_of_stream_node(11))
None
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_nodes_upstream_of_stream_node(2)
array([1])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmUpstrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmUpstrmNodes")
)
# check that stream_node_id is an integer
if not isinstance(stream_node_id, int):
raise TypeError("stream_node_id must be an integer")
# check that stream_node_id is a valid stream_node_id
stream_node_ids = self.get_stream_node_ids()
if not np.any(stream_node_ids == stream_node_id):
raise ValueError("stream_node_id is not a valid Stream Node ID")
# convert stream_node_id to stream node index
# add 1 to convert between python indexing and fortran indexing
stream_node_index = np.where(stream_node_ids == stream_node_id)[0][0] + 1
# set input variables
n_upstream_stream_nodes = ctypes.c_int(
self.get_n_stream_nodes_upstream_of_stream_node(stream_node_id)
)
# return None if no upstream stream nodes
if n_upstream_stream_nodes.value == 0:
return
stream_node_index = ctypes.c_int(stream_node_index)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
upstream_nodes = (ctypes.c_int * n_upstream_stream_nodes.value)()
self.dll.IW_Model_GetStrmUpstrmNodes(
ctypes.byref(stream_node_index),
ctypes.byref(n_upstream_stream_nodes),
upstream_nodes,
ctypes.byref(status),
)
# convert stream node indices to stream node ids
upstream_node_indices = np.array(upstream_nodes)
return stream_node_ids[upstream_node_indices - 1]
def get_stream_bottom_elevations(self):
"""
Return the stream channel bottom elevation at each stream node
Returns
-------
np.ndarray
array of floats with the stream channel elevation for each stream node
See Also
--------
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
IWFMModel.get_n_rating_table_points : Return the number of data points in the stream flow rating table for a stream node
IWFMModel.get_stream_rating_table : Return the stream rating table for a specified stream node
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_bottom_elevations()
array([300., 298., 296., 294., 292., 290., 288., 286., 284., 282., 282.,
280., 278., 276., 274., 272., 272., 270., 268., 266., 264., 262.,
260.])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmBottomElevs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmBottomElevs")
)
# set input variables
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# reset_instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
stream_bottom_elevations = (ctypes.c_double * n_stream_nodes.value)()
self.dll.IW_Model_GetStrmBottomElevs(
ctypes.byref(n_stream_nodes), stream_bottom_elevations, ctypes.byref(status)
)
return np.array(stream_bottom_elevations)
def get_n_rating_table_points(self, stream_node_id):
"""
Return the number of data points in the stream flow rating
table for a stream node
Parameters
----------
stream_node_id : int
stream node id used to determine number of data points in
the rating table
Returns
-------
int
number of data points in the stream flow rating table
See Also
--------
IWFMModel.get_stream_rating_table : Return the stream rating table for a specified stream node
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
IWFMModel.get_stream_bottom_elevations : Return the stream channel bottom elevation at each stream node
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_rating_table_points(1)
5
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetNStrmRatingTablePoints"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetNStrmRatingTablePoints"
)
)
# check that stream_node_id is an integer
if not isinstance(stream_node_id, int):
raise TypeError("stream_node_id must be an integer")
# check that stream_node_id is a valid stream_node_id
stream_node_ids = self.get_stream_node_ids()
if not np.any(stream_node_ids == stream_node_id):
raise ValueError("stream_node_id is not a valid Stream Node ID")
# convert stream_node_id to stream node index
# add 1 to convert python index to fortran index
stream_node_index = np.where(stream_node_ids == stream_node_id)[0][0] + 1
# set input variables convert to ctypes, if not already
stream_node_index = ctypes.c_int(stream_node_index)
# reset instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
n_rating_table_points = ctypes.c_int(0)
self.dll.IW_Model_GetNStrmRatingTablePoints(
ctypes.byref(stream_node_index),
ctypes.byref(n_rating_table_points),
ctypes.byref(status),
)
return n_rating_table_points.value
def get_stream_rating_table(self, stream_node_id):
"""
Return the stream rating table for a specified stream node
Parameters
----------
stream_node_id : int
stream node id used to return the rating table
Returns
-------
tuple (length=2)
np.ndarrays representing stage and flow, respectively
See Also
--------
IWFMModel.get_n_rating_table_points : Return the number of data points in the stream flow rating table for a stream node
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
IWFMModel.get_stream_bottom_elevations : Return the stream channel bottom elevation at each stream node
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> stage, flow = model.get_stream_rating_table(1)
>>> stage
array([ 0., 2., 5., 15., 25.])
>>> flow
array([0.00000000e+00, 6.34988160e+07, 2.85058656e+08, 1.64450304e+09,
3.59151408e+09])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmRatingTable"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmRatingTable")
)
# check that stream_node_id is an integer
if not isinstance(stream_node_id, int):
raise TypeError("stream_node_id must be an integer")
# check that stream_node_id is a valid stream_node_id
stream_node_ids = self.get_stream_node_ids()
if not np.any(stream_node_ids == stream_node_id):
raise ValueError("stream_node_id is not a valid Stream Node ID")
# convert stream_node_id to stream node index
# add 1 to convert between python index and fortan index
stream_node_index = np.where(stream_node_ids == stream_node_id)[0][0] + 1
# set input variables
stream_node_index = ctypes.c_int(stream_node_index)
n_rating_table_points = ctypes.c_int(
self.get_n_rating_table_points(stream_node_id)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
stage = (ctypes.c_double * n_rating_table_points.value)()
flow = (ctypes.c_double * n_rating_table_points.value)()
self.dll.IW_Model_GetStrmRatingTable(
ctypes.byref(stream_node_index),
ctypes.byref(n_rating_table_points),
stage,
flow,
ctypes.byref(status),
)
return np.array(stage), np.array(flow)
def get_n_stream_inflows(self):
"""
Return the number of stream boundary inflows specified by the
user as timeseries input data
Returns
-------
int
number of stream boundary inflows
See Also
--------
IWFMModel.get_stream_inflow_nodes : Return the stream nodes indices that receive boundary inflows specified by the user as timeseries input data
IWFMModel.get_stream_inflow_ids : Return the identification numbers for the stream boundary inflows specified by the user as timeseries input data
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_stream_inflows()
1
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmNInflows"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmNInflows")
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
n_stream_inflows = ctypes.c_int(0)
self.dll.IW_Model_GetStrmNInflows(
ctypes.byref(n_stream_inflows), ctypes.byref(status)
)
return n_stream_inflows.value
def get_stream_inflow_nodes(self):
"""
Return the stream node indices that receive boundary
inflows specified by the user as timeseries input data
Returns
-------
np.ndarray
integer array of stream node IDs where inflows occur
See Also
--------
IWFMModel.get_n_stream_inflows : Return the number of stream boundary inflows specified by the user as timeseries input data
IWFMModel.get_stream_inflow_ids : Return the identification numbers for the stream boundary inflows specified by the user as timeseries input data
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_inflow_nodes()
array([1])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmInflowNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmInflowNodes")
)
# get number of stream inflow nodes
n_stream_inflows = ctypes.c_int(self.get_n_stream_inflows())
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
stream_inflow_nodes = (ctypes.c_int * n_stream_inflows.value)()
self.dll.IW_Model_GetStrmInflowNodes(
ctypes.byref(n_stream_inflows), stream_inflow_nodes, ctypes.byref(status)
)
# convert stream node indices to stream node IDs
stream_node_ids = self.get_stream_node_ids()
stream_inflow_node_indices = np.array(stream_inflow_nodes)
return stream_node_ids[stream_inflow_node_indices - 1]
def get_stream_inflow_ids(self):
"""
Return the identification numbers for the stream boundary
inflows specified by the user as timeseries input data
Returns
-------
np.ndarray
integer array of stream inflow IDs
See Also
--------
IWFMModel.get_n_stream_inflows : Return the number of stream boundary inflows specified by the user as timeseries input data
IWFMModel.get_stream_inflow_nodes : Return the stream node indices that receive boundary inflows specified by the user as timeseries input data
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_inflow_ids()
array([1])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmInflowIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmInflowIDs")
)
# get number of stream inflow nodes
n_stream_inflows = ctypes.c_int(self.get_n_stream_inflows())
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
stream_inflow_ids = (ctypes.c_int * n_stream_inflows.value)()
self.dll.IW_Model_GetStrmInflowIDs(
ctypes.byref(n_stream_inflows), stream_inflow_ids, ctypes.byref(status)
)
return np.array(stream_inflow_ids)
def get_stream_inflows_at_some_locations(
self, stream_inflow_locations="all", inflow_conversion_factor=1.0
):
"""
Return stream boundary inflows at a specified set of inflow
locations listed by their indices for the current simulation timestep
Parameters
----------
stream_inflow_locations : int, list, tuple, np.ndarray, or str='all', default='all'
one or more stream inflow ids used to return flows
inflow_conversion_factor : float, default=1.0
conversion factor for stream boundary inflows from the
simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
array of inflows for the inflow locations at the current
simulation time step
Note
----
This method is designed for use when is_for_inquiry=0 to return
stream inflows at the current timestep during a simulation.
See Also
--------
IWFMModel.get_stream_flow_at_location : Return stream flow at a stream node for the current time step in a simulation
IWFMModel.get_stream_flows : Return stream flows at every stream node for the current timestep
IWFMModel.get_stream_stages : Return stream stages at every stream node for the current timestep
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # print stream inflows
... print(model.get_stream_inflows_at_some_locations(1)[0])
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
86400000.
* TIME STEP 2 AT 10/02/1990_24:00
86400000.
* TIME STEP 3 AT 10/03/1990_24:00
86400000.
* TIME STEP 4 AT 10/04/1990_24:00
86400000.
.
.
.
* TIME STEP 3652 AT 09/29/2000_24:00
86400000.
* TIME STEP 3653 AT 09/30/2000_24:00
86400000.
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmInflows_AtSomeInflows"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetStrmInflows_AtSomeInflows"
)
)
# get possible stream inflow locations
stream_inflow_ids = self.get_stream_inflow_ids()
if isinstance(stream_inflow_locations, str):
if stream_inflow_locations.lower() == "all":
stream_inflow_locations = stream_inflow_ids
else:
raise ValueError('if stream_nodes is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(stream_inflow_locations, int):
stream_inflow_locations = np.array([stream_inflow_locations])
# if list or tuple convert to np.ndarray
if isinstance(stream_inflow_locations, (list, tuple)):
stream_inflow_locations = np.array(stream_inflow_locations)
# if stream_inflow_locations were provided as an int, list, tuple, 'all',
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(stream_inflow_locations, np.ndarray):
raise TypeError(
"stream_inflow_locations must be an int, list, or np.ndarray"
)
# check if all of the provided stream_inflow_locations are valid
if not np.all(np.isin(stream_inflow_locations, stream_inflow_ids)):
raise ValueError("One or more stream inflow locations are invalid")
# convert stream_inflow_locations to stream inflow indices
# add 1 to convert between python indices and fortran indices
stream_inflow_indices = (
np.array(
[
np.where(stream_inflow_ids == item)[0][0]
for item in stream_inflow_locations
]
)
+ 1
)
# initialize input variables
n_stream_inflow_locations = ctypes.c_int(len(stream_inflow_locations))
stream_inflow_indices = (ctypes.c_int * n_stream_inflow_locations.value)(
*stream_inflow_indices
)
inflow_conversion_factor = ctypes.c_double(inflow_conversion_factor)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
inflows = (ctypes.c_double * n_stream_inflow_locations.value)()
self.dll.IW_Model_GetStrmInflows_AtSomeInflows(
ctypes.byref(n_stream_inflow_locations),
stream_inflow_indices,
ctypes.byref(inflow_conversion_factor),
inflows,
ctypes.byref(status),
)
return np.array(inflows)
def get_stream_flow_at_location(self, stream_node_id, flow_conversion_factor=1.0):
"""
Return stream flow at a stream node for the current time
step in a simulation
Parameters
----------
stream_node_id : int
stream node ID where flow is retrieved
flow_conversion_factor : float, default=1.0
conversion factor for stream flows from the
simulation units of volume to a desired unit of volume
Returns
-------
float
stream flow at specified stream node
Note
----
This method is designed for use when is_for_inquiry=0 to return
a stream flow at the current timestep during a simulation.
See Also
--------
IWFMModel.get_stream_inflows_at_some_locations : Return stream boundary inflows at a specified set of inflow locations listed by their indices for the current simulation timestep
IWFMModel.get_stream_flows : Return stream flows at every stream node for the current timestep
IWFMModel.get_stream_stages : Return stream stages at every stream node for the current timestep
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # print stream flow at stream node ID = 1
... print(model.get_stream_flow_at_location(1))
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
75741791.53232515
* TIME STEP 2 AT 10/02/1990_24:00
75741791.53232515
* TIME STEP 3 AT 10/03/1990_24:00
75741791.53232515
* TIME STEP 4 AT 10/04/1990_24:00
75741791.53232515
.
.
.
* TIME STEP 3652 AT 09/29/2000_24:00
85301157.65510693
* TIME STEP 3653 AT 09/30/2000_24:00
85301292.67626143
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmFlow"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmFlow")
)
# check that stream_node_id is a valid stream_node_id
stream_node_ids = self.get_stream_node_ids()
if not np.any(stream_node_ids == stream_node_id):
raise ValueError("stream_node_id is not a valid Stream Node ID")
# convert stream_node_id to stream node index
# add 1 to convert between python index and fortan index
stream_node_index = np.where(stream_node_ids == stream_node_id)[0][0] + 1
# convert input variables to ctypes
stream_node_index = ctypes.c_int(stream_node_index)
flow_conversion_factor = ctypes.c_double(flow_conversion_factor)
# initialize output variables
stream_flow = ctypes.c_double(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmFlow(
ctypes.byref(stream_node_index),
ctypes.byref(flow_conversion_factor),
ctypes.byref(stream_flow),
ctypes.byref(status),
)
return stream_flow.value
def get_stream_flows(self, flow_conversion_factor=1.0):
"""
Return stream flows at every stream node for the current timestep
Parameters
----------
flow_conversion_factor : float, default=1.0
conversion factor for stream flows from the
simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
flows for all stream nodes for the current simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
stream flows at the current timestep during a simulation.
See Also
--------
IWFMModel.get_stream_inflows_at_some_locations : Return stream boundary inflows at a specified set of inflow locations listed by their indices for the current simulation timestep
IWFMModel.get_stream_flow_at_location : Return stream flow at a stream node for the current time step in a simulation
IWFMModel.get_stream_stages : Return stream stages at every stream node for the current timestep
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # get stream flows
... stream_flows = model.get_stream_flows()
... stream_node_ids = model.get_stream_node_ids()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
>>> for i, flow in enumerate(stream_flows):
... print(stream_node_ids[i], flow)
* TIME STEP 3653 AT 09/30/2000_24:00
1 85301292.67626143
2 83142941.70620254
3 81028792.9071748
4 78985517.65754062
5 77081104.67763746
6 75724877.72101441
7 74440170.86435351
8 73367874.87547392
9 71735544.16731748
10 70995694.52663273
11 53285997.91790043
12 44.84964866936207
13 0.0
14 0.0
15 0.0
16 0.0
17 0.0
18 2553191.7510338724
19 1948997.4229038805
20 1487781.3046951443
21 2345774.2345003784
22 1599258.8286072314
23 2495579.2758224607
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmFlows"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmFlows")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
flow_conversion_factor = ctypes.c_double(flow_conversion_factor)
# initialize output variables
stream_flows = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmFlows(
ctypes.byref(n_stream_nodes),
ctypes.byref(flow_conversion_factor),
stream_flows,
ctypes.byref(status),
)
return np.array(stream_flows)
def get_stream_stages(self, stage_conversion_factor=1.0):
"""
Return stream stages at every stream node for the current timestep
Parameters
----------
stage_conversion_factor : float
conversion factor for stream stages from the
simulation units of length to a desired unit of length
Returns
-------
np.ndarray
stages for all stream nodes for the current simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
stream stages at the current timestep during a simulation.
See Also
--------
IWFMModel.get_stream_inflows_at_some_locations : Return stream boundary inflows at a specified set of inflow locations listed by their indices for the current simulation timestep
IWFMModel.get_stream_flow_at_location : Return stream flow at a stream node for the current time step in a simulation
IWFMModel.get_stream_flows : Return stream flows at every stream node for the current timestep
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # get stream flows
... stream_flows = model.get_stream_stages()
... stream_node_ids = model.get_stream_node_ids()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
>>> for i, flow in enumerate(stream_flows):
... print(stream_node_ids[i], flow)
* TIME STEP 3653 AT 09/30/2000_24:00
1 2.2952133835661925
2 2.265988534377925
3 2.23736219849917
4 2.209695515995236
5 2.183909078616921
6 2.1655452773528054
7 2.148149883990982
8 2.133630610251487
9 2.1115282647882054
10 2.1015104342912423
11 1.6783304406148432
12 1.4126136989034421e-06
13 0.0
14 0.0
15 0.0
16 0.0
17 0.0
18 0.08041698765009642
19 0.061386890202925315
20 0.046860127429624754
21 0.07388403067233185
22 0.050371296013054234
23 0.07860238766727434
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmStages"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmStages")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
stage_conversion_factor = ctypes.c_double(stage_conversion_factor)
# initialize output variables
stream_stages = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmStages(
ctypes.byref(n_stream_nodes),
ctypes.byref(stage_conversion_factor),
stream_stages,
ctypes.byref(status),
)
return np.array(stream_stages)
def get_stream_tributary_inflows(self, inflow_conversion_factor=1.0):
"""
Return small watershed inflows at every stream node for the current timestep
Parameters
----------
inflow_conversion_factor : float
conversion factor for small watershed flows from the
simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
inflows from small watersheds for all stream nodes for the
current simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
small watershed inflows at the current timestep during a simulation.
stream nodes without a small watershed draining to it will be 0
See Also
--------
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmTributaryInflows"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetStrmTributaryInflows"
)
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
inflow_conversion_factor = ctypes.c_double(inflow_conversion_factor)
# initialize output variables
small_watershed_inflows = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmTributaryInflows(
ctypes.byref(n_stream_nodes),
ctypes.byref(inflow_conversion_factor),
small_watershed_inflows,
ctypes.byref(status),
)
return np.array(small_watershed_inflows)
def get_stream_rainfall_runoff(self, runoff_conversion_factor=1.0):
"""
Return rainfall runoff at every stream node for the current timestep
Parameters
----------
runoff_conversion_factor : float
conversion factor for inflows due to rainfall-runoff from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
inflows from rainfall-runoff for all stream nodes for the
current simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
inflows from rainfall-runoff at the current timestep during a simulation.
stream nodes without rainfall-runoff draining to it will be 0
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmRainfallRunoff"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmRainfallRunoff")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
runoff_conversion_factor = ctypes.c_double(runoff_conversion_factor)
# initialize output variables
rainfall_runoff_inflows = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmRainfallRunoff(
ctypes.byref(n_stream_nodes),
ctypes.byref(runoff_conversion_factor),
rainfall_runoff_inflows,
ctypes.byref(status),
)
return np.array(rainfall_runoff_inflows)
def get_stream_return_flows(self, return_flow_conversion_factor=1.0):
"""
Return agricultural and urban return flows at every stream
node for the current timestep
Parameters
----------
return_flow_conversion_factor : float
conversion factor for return flows from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
return flows for all stream nodes for the
current simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
return flows at the current timestep during a simulation.
stream nodes without return flows will be 0
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmReturnFlows"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmReturnFlows")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
return_flow_conversion_factor = ctypes.c_double(return_flow_conversion_factor)
# initialize output variables
return_flows = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmReturnFlows(
ctypes.byref(n_stream_nodes),
ctypes.byref(return_flow_conversion_factor),
return_flows,
ctypes.byref(status),
)
return np.array(return_flows)
def get_stream_tile_drain_flows(self, tile_drain_conversion_factor=1.0):
"""
Return tile drain flows into every stream node for the current timestep
Parameters
----------
tile_drain_conversion_factor : float
conversion factor for tile drain flows from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
tile drain flows for all stream nodes for the current
simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
tile drain flows at the current timestep during a simulation.
stream nodes without tile drain flows will be 0
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmTileDrains"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmTileDrains")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
tile_drain_conversion_factor = ctypes.c_double(tile_drain_conversion_factor)
# initialize output variables
tile_drain_flows = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmTileDrains(
ctypes.byref(n_stream_nodes),
ctypes.byref(tile_drain_conversion_factor),
tile_drain_flows,
ctypes.byref(status),
)
return np.array(tile_drain_flows)
def get_stream_riparian_evapotranspiration(
self, evapotranspiration_conversion_factor=1.0
):
"""
Return riparian evapotranspiration from every stream node for the current timestep
Parameters
----------
evapotranspiration_conversion_factor : float
conversion factor for riparian evapotranspiration from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
riparian evapotranspiration for all stream nodes for the current
simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
riparian evapotranspiration at the current timestep during a
simulation.
stream nodes without riparian evapotranspiration will be 0
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmRiparianETs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmRiparianETs")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
evapotranspiration_conversion_factor = ctypes.c_double(
evapotranspiration_conversion_factor
)
# initialize output variables
riparian_evapotranspiration = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmRiparianETs(
ctypes.byref(n_stream_nodes),
ctypes.byref(evapotranspiration_conversion_factor),
riparian_evapotranspiration,
ctypes.byref(status),
)
return np.array(riparian_evapotranspiration)
def get_stream_gain_from_groundwater(self, stream_gain_conversion_factor=1.0):
"""
Return gain from groundwater for every stream node for the current timestep
Parameters
----------
stream_gain_conversion_factor : float
conversion factor for gain from groundwater from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
gain from groundwater for all stream nodes for the current
simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
gain from groundwater at the current timestep during a
simulation.
stream nodes with gain from groundwater will be +
stream nodes with loss to groundwater will be -
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmGainFromGW"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmGainFromGW")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
stream_gain_conversion_factor = ctypes.c_double(stream_gain_conversion_factor)
# initialize output variables
gain_from_groundwater = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmGainFromGW(
ctypes.byref(n_stream_nodes),
ctypes.byref(stream_gain_conversion_factor),
gain_from_groundwater,
ctypes.byref(status),
)
return np.array(gain_from_groundwater)
def get_stream_gain_from_lakes(self, lake_inflow_conversion_factor=1.0):
"""
Return gain from lakes for every stream node for the current timestep
Parameters
----------
lake_inflow_conversion_factor : float
conversion factor for gain from lakes from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
gain from lakes for all stream nodes for the current
simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
gain from groundwater at the current timestep during a
simulation.
stream nodes without gain from lakes will be 0
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmGainFromLakes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmGainFromLakes")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
lake_inflow_conversion_factor = ctypes.c_double(lake_inflow_conversion_factor)
# initialize output variables
gain_from_lakes = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmGainFromLakes(
ctypes.byref(n_stream_nodes),
ctypes.byref(lake_inflow_conversion_factor),
gain_from_lakes,
ctypes.byref(status),
)
return np.array(gain_from_lakes)
def get_net_bypass_inflows(self, bypass_inflow_conversion_factor=1.0):
"""
Return net bypass inflows for every stream node for the current timestep
Parameters
----------
bypass_inflow_conversion_factor : float
conversion factor for net bypass inflow from
the simulation units of volume to a desired unit of volume
Returns
-------
np.ndarray
net bypass inflow for all stream nodes for the current
simulation timestep
Note
----
This method is designed for use when is_for_inquiry=0 to return
net bypass inflow to streams at the current timestep during a
simulation.
stream nodes without net bypass inflow will be 0
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_actual_stream_diversions_at_some_locations : Return actual diversion amounts for a list of diversions during a model simulation
"""
if not hasattr(self.dll, "IW_Model_GetStrmGainFromLakes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetStrmGainFromLakes")
)
# get number of stream nodes in the model
n_stream_nodes = ctypes.c_int(self.get_n_stream_nodes())
# convert unit conversion factor to ctypes
bypass_inflow_conversion_factor = ctypes.c_double(
bypass_inflow_conversion_factor
)
# initialize output variables
net_bypass_inflow = (ctypes.c_double * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStrmGainFromLakes(
ctypes.byref(n_stream_nodes),
ctypes.byref(bypass_inflow_conversion_factor),
net_bypass_inflow,
ctypes.byref(status),
)
return np.array(net_bypass_inflow)
def get_actual_stream_diversions_at_some_locations(
self, diversion_locations="all", diversion_conversion_factor=1.0
):
"""
Return actual diversion amounts for a list of diversions during a model simulation
Parameters
----------
diversion_locations : int, list, tuple, np.ndarray, or str='all', default='all'
one or more diversion ids where actual diversions are
returned.
diversion_conversion_factor: float, default=1.0
conversion factor for actual diversions from the simulation
unit of volume to a desired unit of volume
Returns
-------
np.ndarray
actual diversions for the diversion ids provided
Note
----
This method is designed for use when is_for_inquiry=0 to return
actual diversions amounts for selected diversion locations at
the current timestep during a simulation.
Actual diversion amounts can be less than the required diversion
amount if stream goes dry at the stream node where the diversion
occurs
See Also
--------
IWFMModel.get_stream_tributary_inflows : Return small watershed inflows at every stream node for the current timestep
IWFMModel.get_stream_rainfall_runoff : Return rainfall runoff at every stream node for the current timestep
IWFMModel.get_stream_return_flows : Return agricultural and urban return flows at every stream node for the current timestep
IWFMModel.get_stream_tile_drain_flows : Return tile drain flows into every stream node for the current timestep
IWFMModel.get_stream_riparian_evapotranspiration : Return riparian evapotranspiration from every stream node for the current timestep
IWFMModel.get_stream_gain_from_groundwater : Return gain from groundwater for every stream node for the current timestep
IWFMModel.get_stream_gain_from_lakes : Return gain from lakes for every stream node for the current timestep
IWFMModel.get_net_bypass_inflows : Return net bypass inflows for every stream node for the current timestep
"""
if not hasattr(self.dll, "IW_Model_GetStrmActualDiversions_AtSomeDiversions"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetStrmActualDiversions_AtSomeDiversions"
)
)
# check that diversion locations are provided in correct format
# get possible stream inflow locations
diversion_ids = self.get_diversion_ids()
if isinstance(diversion_locations, str):
if diversion_locations.lower() == "all":
diversion_locations = diversion_ids
else:
raise ValueError('if diversion_locations is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(diversion_locations, int):
diversion_locations = np.array([diversion_locations])
# if list or tuple convert to np.ndarray
if isinstance(diversion_locations, (list, tuple)):
diversion_locations = np.array(diversion_locations)
# if stream_inflow_locations were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(diversion_locations, np.ndarray):
raise TypeError("diversion_locations must be an int, list, or np.ndarray")
# check if all of the provided stream_inflow_locations are valid
if not np.all(np.isin(diversion_locations, diversion_ids)):
raise ValueError("One or more diversion locations are invalid")
# convert stream_inflow_locations to stream inflow indices
# add 1 to convert between python indices and fortran indices
diversion_indices = (
np.array(
[np.where(diversion_ids == item)[0][0] for item in diversion_locations]
)
+ 1
)
# initialize input variables
n_diversions = ctypes.c_int(len(diversion_indices))
diversion_indices = (ctypes.c_int * n_diversions.value)(*diversion_indices)
diversion_conversion_factor = ctypes.c_double(diversion_conversion_factor)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
actual_diversion_amounts = (ctypes.c_double * n_diversions.value)()
self.dll.IW_Model_GetStrmActualDiversions_AtSomeDiversions(
ctypes.byref(n_diversions),
diversion_indices,
ctypes.byref(diversion_conversion_factor),
actual_diversion_amounts,
ctypes.byref(status),
)
return np.array(actual_diversion_amounts)
def get_stream_diversion_locations(self, diversion_locations="all"):
"""
Return the stream node IDs corresponding to diversion locations
Parameters
----------
diversion_locations : int, list, tuple, np.ndarray, str='all', default='all'
one or more diversion IDs used to return the corresponding stream node ID
Returns
-------
np.ndarray
array of stream node IDs corresponding to where diversions are exported
See Also
--------
IWFMModel.get_n_diversions : Return the number of surface water diversions in an IWFM model
IWFMModel.get_diversion_ids : Return the surface water diversion identification numbers specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_diversion_locations()
array([ 9, 12, 12, 22, 23])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetStrmDiversionsExportNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetStrmDiversionsExportNodes"
)
)
# check that diversion locations are provided in correct format
# get possible stream inflow locations
diversion_ids = self.get_diversion_ids()
if isinstance(diversion_locations, str):
if diversion_locations.lower() == "all":
diversion_locations = diversion_ids
else:
raise ValueError('if diversion_locations is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(diversion_locations, int):
diversion_locations = np.array([diversion_locations])
# if list or tuple convert to np.ndarray
if isinstance(diversion_locations, (list, tuple)):
diversion_locations = np.array(diversion_locations)
# if stream_inflow_locations were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(diversion_locations, np.ndarray):
raise TypeError("diversion_locations must be an int, list, or np.ndarray")
# check if all of the provided stream_inflow_locations are valid
if not np.all(np.isin(diversion_locations, diversion_ids)):
raise ValueError("One or more diversion locations are invalid")
# convert stream_inflow_locations to stream inflow indices
# add 1 to convert between python indices and fortran indices
diversion_indices = (
np.array(
[np.where(diversion_ids == item)[0][0] for item in diversion_locations]
)
+ 1
)
# set input variables
n_diversions = ctypes.c_int(len(diversion_indices))
diversion_list = (ctypes.c_int * n_diversions.value)(*diversion_indices)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
diversion_stream_nodes = (ctypes.c_int * n_diversions.value)()
self.dll.IW_Model_GetStrmDiversionsExportNodes(
ctypes.byref(n_diversions),
diversion_list,
diversion_stream_nodes,
ctypes.byref(status),
)
# convert stream node indices to stream node ids
stream_node_ids = self.get_stream_node_ids()
stream_diversion_indices = np.array(diversion_stream_nodes)
return stream_node_ids[stream_diversion_indices - 1]
def get_n_stream_reaches(self):
"""
Return the number of stream reaches in an IWFM model
Returns
-------
int
number of stream reaches in the IWFM model
See Also
--------
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach indices that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_stream_reaches()
3
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNReaches"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_ModelGetNReaches"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize n_stream_reaches variable
n_stream_reaches = ctypes.c_int(0)
self.dll.IW_Model_GetNReaches(
ctypes.byref(n_stream_reaches), ctypes.byref(status)
)
return n_stream_reaches.value
def get_stream_reach_ids(self):
"""
Return an array of stream reach IDs in an IWFM Model
stream reaches in an IWFM model
Returns
-------
stream reach_ids : np.ndarray of ints
integer array containing stream reach ids
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_ids()
array([2, 1, 3])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachIDs")
)
# set input variables
n_stream_reaches = ctypes.c_int(self.get_n_stream_reaches())
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
stream_reach_ids = (ctypes.c_int * n_stream_reaches.value)()
self.dll.IW_Model_GetReachIDs(
ctypes.byref(n_stream_reaches), stream_reach_ids, ctypes.byref(status)
)
return np.array(stream_reach_ids)
def get_n_nodes_in_stream_reach(self, reach_id):
"""
Return the number of stream nodes in a stream reach
Parameters
----------
reach_id : int
ID for stream reach used to retrieve the number of stream nodes contained in it
Returns
-------
int
number of stream nodes specified in the stream reach
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_nodes_in_stream_reach(1)
10
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachNNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachNNodes")
)
# make sure reach_id is an integer
if not isinstance(reach_id, int):
raise TypeError("reach_id must be an integer")
# get all possible stream reach ids
reach_ids = self.get_stream_reach_ids()
# check that provided reach_id is valid
if not np.any(reach_ids == reach_id):
raise ValueError("reach_id provided is not valid")
# convert reach_id to reach index
# add 1 to index to convert between python index and fortran index
reach_index = np.where(reach_ids == reach_id)[0][0] + 1
# convert reach index to ctypes
reach_index = ctypes.c_int(reach_index)
# initialize output variables
n_nodes_in_reach = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachNNodes(
ctypes.byref(reach_index),
ctypes.byref(n_nodes_in_reach),
ctypes.byref(status),
)
return n_nodes_in_reach.value
def get_stream_reach_groundwater_nodes(self, reach_id):
"""
Return the groundwater node IDs corresponding to stream
nodes in a specified reach
Parameters
----------
reach_id : int
stream reach ID used to obtain the corresponding groundwater nodes
Returns
-------
np.ndarray
integer array of groundwater node IDs corresponding to the stream reach
Note
----
in the case where wide streams are simulated, more than one groundwater node
can be identified for a corresponding stream node. As of this version, only
the first groundwater node specified for each stream node will be returned.
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_reach_groundwater_nodes(1)
array([433, 412, 391, 370, 349, 328, 307, 286, 265, 264])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachGWNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachGWNodes")
)
# make sure reach_id is an integer
if not isinstance(reach_id, int):
raise TypeError("reach_id must be an integer")
# get all possible stream reach ids
reach_ids = self.get_stream_reach_ids()
# check that provided reach_id is valid
if not np.any(reach_ids == reach_id):
raise ValueError("reach_id provided is not valid")
# convert reach_id to reach index
# add 1 to index to convert between python index and fortran index
reach_index = np.where(reach_ids == reach_id)[0][0] + 1
# convert reach index to ctypes
reach_index = ctypes.c_int(reach_index)
# get number of nodes in stream reach
n_nodes_in_reach = ctypes.c_int(self.get_n_nodes_in_stream_reach(reach_id))
# initialize output variables
reach_groundwater_nodes = (ctypes.c_int * n_nodes_in_reach.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachGWNodes(
ctypes.byref(reach_index),
ctypes.byref(n_nodes_in_reach),
reach_groundwater_nodes,
ctypes.byref(status),
)
# convert groundwater node indices to groundwater node IDs
groundwater_node_ids = self.get_node_ids()
reach_groundwater_node_indices = np.array(reach_groundwater_nodes)
return groundwater_node_ids[reach_groundwater_node_indices - 1]
def get_stream_reach_stream_nodes(self, reach_id):
"""
Return the stream node IDs corresponding to stream
nodes in a specified reach
Parameters
----------
reach_id : int
stream reach ID to obtain the corresponding stream nodes
Returns
-------
np.ndarray
integer array of stream node IDs corresponding to the stream reach
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_reach_stream_nodes(1)
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachStrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachStrmNodes")
)
# make sure reach_id is an integer
if not isinstance(reach_id, int):
raise TypeError("reach_id must be an integer")
# get all possible stream reach ids
reach_ids = self.get_stream_reach_ids()
# check that provided reach_id is valid
if not np.any(reach_ids == reach_id):
raise ValueError("reach_id provided is not valid")
# convert reach_id to reach index
# add 1 to index to convert between python index and fortran index
reach_index = np.where(reach_ids == reach_id)[0][0] + 1
# convert reach index to ctypes
reach_index = ctypes.c_int(reach_index)
# get number of nodes in stream reach
n_nodes_in_reach = ctypes.c_int(self.get_n_nodes_in_stream_reach(reach_id))
# initialize output variables
reach_stream_nodes = (ctypes.c_int * n_nodes_in_reach.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachStrmNodes(
ctypes.byref(reach_index),
ctypes.byref(n_nodes_in_reach),
reach_stream_nodes,
ctypes.byref(status),
)
# convert stream node indices to IDs
stream_node_ids = self.get_stream_node_ids()
stream_node_indices = np.array(reach_stream_nodes)
return stream_node_ids[stream_node_indices - 1]
def get_stream_reaches_for_stream_nodes(self, stream_nodes="all"):
"""
Return the stream reach IDs that correspond to one or more stream node IDs
Parameters
---------
stream_node_ids : int, list, tuple, np.ndarray, str='all', default='all'
one or more stream node IDs where the stream reach IDs will be returned
Returns
-------
np.ndarray
array of stream reach IDs corresponding to stream node IDs provided
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_reaches_for_stream_nodes()
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReaches_ForStrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetReaches_ForStrmNodes"
)
)
# get possible stream nodes locations
stream_node_ids = self.get_stream_node_ids()
if isinstance(stream_nodes, str):
if stream_nodes.lower() == "all":
stream_nodes = stream_node_ids
else:
raise ValueError('if stream_nodes is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(stream_nodes, int):
stream_nodes = np.array([stream_nodes])
# if list or tuple convert to np.ndarray
if isinstance(stream_nodes, (list, tuple)):
stream_nodes = np.array(stream_nodes)
# if stream_inflow_locations were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(stream_nodes, np.ndarray):
raise TypeError(
'stream_nodes must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided stream_inflow_locations are valid
if not np.all(np.isin(stream_nodes, stream_node_ids)):
raise ValueError("One or more stream nodes provided are invalid")
# convert stream_inflow_locations to stream inflow indices
# add 1 to convert between python indices and fortran indices
stream_node_indices = (
np.array([np.where(stream_node_ids == item)[0][0] for item in stream_nodes])
+ 1
)
# get number of stream nodes indices provided
n_stream_nodes = ctypes.c_int(len(stream_node_indices))
# convert stream node indices to ctypes
stream_node_indices = (ctypes.c_int * n_stream_nodes.value)(
*stream_node_indices
)
# initialize output variables
stream_reaches = (ctypes.c_int * n_stream_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReaches_ForStrmNodes(
ctypes.byref(n_stream_nodes),
stream_node_indices,
stream_reaches,
ctypes.byref(status),
)
# convert stream reach indices to stream reach IDs
stream_reach_ids = self.get_stream_reach_ids()
stream_reach_indices = np.array(stream_reaches)
return stream_reach_ids[stream_reach_indices - 1]
def get_upstream_nodes_in_stream_reaches(self):
"""
Return the IDs for the upstream stream node in each
stream reach
Returns
-------
np.ndarray
array of stream node IDs corresponding to the most
upstream stream node in each stream reach
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_upstream_nodes_in_stream_reaches()
array([11, 1, 17])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachUpstrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachUpstrmNodes")
)
# get number of reaches specified in the model
n_reaches = ctypes.c_int(self.get_n_stream_reaches())
# initialize output variables
upstream_stream_nodes = (ctypes.c_int * n_reaches.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachUpstrmNodes(
ctypes.byref(n_reaches), upstream_stream_nodes, ctypes.byref(status)
)
# convert upstream stream node indices to stream node IDs
stream_node_ids = self.get_stream_node_ids()
upstream_stream_node_indices = np.array(upstream_stream_nodes)
return stream_node_ids[upstream_stream_node_indices - 1]
def get_n_reaches_upstream_of_reach(self, reach_id):
"""
Return the number of stream reaches immediately upstream
of the specified reach
Parameters
----------
reach_id : int
stream reach ID to obtain the corresponding stream nodes
Returns
-------
int
number of stream reaches immediately upstream of specified
stream reach.
Note
----
0 if there are no stream reaches upstream.
Number of tributaries if the reach is downstream of a confluence.
Otherwise, 1.
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_reaches_upstream_of_reach(1)
0
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_reaches_upstream_of_reach(3)
1
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachNUpstrmReaches"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachNUpstrmReaches")
)
# make sure reach_id is an integer
if not isinstance(reach_id, int):
raise TypeError("reach_id must be an integer")
# get all possible stream reach ids
reach_ids = self.get_stream_reach_ids()
# check that provided reach_id is valid
if not np.any(reach_ids == reach_id):
raise ValueError("reach_id provided is not valid")
# convert reach_id to reach index
# add 1 to index to convert between python index and fortran index
reach_index = np.where(reach_ids == reach_id)[0][0] + 1
# convert reach_index to ctypes
reach_index = ctypes.c_int(reach_index)
# initialize output variables
n_upstream_reaches = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachNUpstrmReaches(
ctypes.byref(reach_index),
ctypes.byref(n_upstream_reaches),
ctypes.byref(status),
)
return n_upstream_reaches.value
def get_reaches_upstream_of_reach(self, reach_id):
"""
Return the IDs of the reaches that are immediately
upstream of the specified reach
Parameters
----------
reach_id : int
stream reach ID to obtain the corresponding stream nodes
Returns
-------
np.ndarray
array of reach IDs immediately upstream of the specified reach
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> print(model.get_n_reaches_upstream_of_reach(1))
None
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_reaches_upstream_of_reach(3)
array([2])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachUpstrmReaches"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachUpstrmReaches")
)
# make sure reach_id is an integer
if not isinstance(reach_id, int):
raise TypeError("reach_id must be an integer")
# get all possible stream reach ids
reach_ids = self.get_stream_reach_ids()
# check that provided reach_id is valid
if not np.any(reach_ids == reach_id):
raise ValueError("reach_id provided is not valid")
# convert reach_id to reach index
# add 1 to index to convert between python index and fortran index
reach_index = np.where(reach_ids == reach_id)[0][0] + 1
# get the number of reaches upstream of the specified reach
n_upstream_reaches = ctypes.c_int(
self.get_n_reaches_upstream_of_reach(reach_id)
)
# if there are no upstream reaches, then return
if n_upstream_reaches.value == 0:
return
# convert reach index to ctypes
reach_index = ctypes.c_int(reach_index)
# initialize output variables
upstream_reaches = (ctypes.c_int * n_upstream_reaches.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachUpstrmReaches(
ctypes.byref(reach_index),
ctypes.byref(n_upstream_reaches),
upstream_reaches,
ctypes.byref(status),
)
# convert reach indices to reach IDs
stream_reach_ids = self.get_stream_reach_ids()
upstream_reach_indices = np.array(upstream_reaches)
return stream_reach_ids[upstream_reach_indices - 1]
def get_downstream_node_in_stream_reaches(self):
"""
Return the IDs for the downstream stream node in each
stream reach
Returns
-------
np.ndarray
array of stream node IDs for the downstream stream node
in each stream reach
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_downstream_node_in_stream_reaches()
array([16, 10, 23])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachDownstrmNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachDownstrmNodes")
)
# get number of reaches specified in the model
n_reaches = ctypes.c_int(self.get_n_stream_reaches())
# initialize output variables
downstream_stream_nodes = (ctypes.c_int * n_reaches.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachDownstrmNodes(
ctypes.byref(n_reaches), downstream_stream_nodes, ctypes.byref(status)
)
# convert stream node indices to stream node IDs
stream_node_ids = self.get_stream_node_ids()
downstream_stream_node_indices = np.array(downstream_stream_nodes)
return stream_node_ids[downstream_stream_node_indices - 1]
def get_reach_outflow_destination(self):
"""
Return the destination index that each stream reach flows
into.
Returns
-------
np.ndarray
array of destination indices corresponding to the destination
of flows exiting each stream reach
Note
----
To find out the type of destination (i.e. lake, another
stream node or outside the model domain) that the reaches
flow into, it is necessary to call:
IWFMModel.get_reach_outflow_destination_types
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination_types : Return the outflow destination types that each stream reach flows into.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_reach_outflow_destination()
array([17, 1, 0])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachOutflowDest"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetReachOutflowDest")
)
# get number of reaches
n_reaches = ctypes.c_int(self.get_n_stream_reaches())
# initialize output variables
reach_outflow_destinations = (ctypes.c_int * n_reaches.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachOutflowDest(
ctypes.byref(n_reaches), reach_outflow_destinations, ctypes.byref(status)
)
return np.array(reach_outflow_destinations)
def get_reach_outflow_destination_types(self):
"""
Return the outflow destination types that each stream reach
flows into.
Returns
-------
np.ndarray
array of destination types for each reach in the IWFM model
Note
----
A return value of 0 corresponds to flow leaving the model domain
A return value of 1 corresponds to flow to a stream node in another reach
A return value of 3 corresponds to flow to a lake
See Also
--------
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_n_nodes_in_stream_reach : Return the number of stream nodes in a stream reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reaches_for_stream_nodes : Return the stream reach IDs that correspond to a list of stream nodes
IWFMModel.get_upstream_nodes_in_stream_reaches : Return the IDs for the upstream stream node in each stream reach
IWFMModel.get_n_reaches_upstream_of_reach : Return the number of stream reaches immediately upstream of the specified reach
IWFMModel.get_reaches_upstream_of_reach : Return the IDs of the reaches that are immediately upstream of the specified reach
IWFMModel.get_downstream_node_in_stream_reaches : Return the IDs for the downstream stream node in each stream reach
IWFMModel.get_reach_outflow_destination : Return the destination index that each stream reach flows into
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_reach_outflow_destination_types()
array([1, 3, 0])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetReachOutflowDestTypes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetReachOutflowDestTypes"
)
)
# get number of reaches
n_reaches = ctypes.c_int(self.get_n_stream_reaches())
# initialize output variables
reach_outflow_destination_types = (ctypes.c_int * n_reaches.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetReachOutflowDestTypes(
ctypes.byref(n_reaches),
reach_outflow_destination_types,
ctypes.byref(status),
)
return np.array(reach_outflow_destination_types)
def get_n_diversions(self):
"""
Return the number of surface water diversions in an IWFM model
Returns
-------
int
number of surface water diversions in the IWFM Model
See Also
--------
IWFMModel.get_diversion_ids : Return the surface water diversion identification numbers specified in an IWFM model
IWFMModel.get_stream_diversion_locations : Return the stream node IDs corresponding to diversion locations
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_diversions()
5
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNDiversions"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNDiversions")
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize n_stream_reaches variable
n_diversions = ctypes.c_int(0)
self.dll.IW_Model_GetNDiversions(
ctypes.byref(n_diversions), ctypes.byref(status)
)
return n_diversions.value
def get_diversion_ids(self):
"""
Return the surface water diversion identification numbers
specified in an IWFM model
Returns
-------
np.ndarray
array of diversion IDs
See Also
--------
IWFMModel.get_n_diversions : Return the number of surface water diversions in an IWFM model
IWFMModel.get_stream_diversion_locations : Return the stream node IDs corresponding to diversion locations
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_diversion_ids()
array([1, 2, 3, 4, 5])
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetDiversionIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetDiversionIDs")
)
# set input variables
n_diversions = ctypes.c_int(self.get_n_diversions())
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
diversion_ids = (ctypes.c_int * n_diversions.value)()
self.dll.IW_Model_GetDiversionIDs(
ctypes.byref(n_diversions), diversion_ids, ctypes.byref(status)
)
return np.array(diversion_ids)
def get_n_lakes(self):
"""
Return the number of lakes in an IWFM model
Returns
-------
int
number of lakes in the IWFM model
See Also
--------
IWFMModel.get_lake_ids : Return an array of lake IDs in an IWFM model
IWFMModel.get_n_elements_in_lake : Return the number of finite element grid cells that make up a lake
IWFMModel.get_elements_in_lake : Return the element ids with the specified lake ID
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_lakes()
1
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNLakes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNLakes")
)
# initialize n_stream_reaches variable
n_lakes = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNLakes(ctypes.byref(n_lakes), ctypes.byref(status))
return n_lakes.value
def get_lake_ids(self):
"""
Return an array of lake IDs in an IWFM model
Returns
-------
np.ndarray
array of lake identification numbers for each lake in the model
See Also
--------
IWFMModel.get_n_lakes : Return the number of lakes in an IWFM model
IWFMModel.get_n_elements_in_lake : Return the number of finite element grid cells that make up a lake
IWFMModel.get_elements_in_lake : Return the element ids with the specified lake ID
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_lake_ids()
array([1])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetLakeIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetLakeIDs")
)
# initialize n_stream_reaches variable
n_lakes = ctypes.c_int(self.get_n_lakes())
# stop here if no lakes are specified
if n_lakes.value == 0:
return
# initialize output variables
lake_ids = (ctypes.c_int * n_lakes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetLakeIDs(
ctypes.byref(n_lakes), lake_ids, ctypes.byref(status)
)
return np.array(lake_ids)
def get_n_elements_in_lake(self, lake_id):
"""
Return the number of finite element grid cells that make
up a lake
Parameters
----------
lake_id : int
lake identification number used to obtain number of elements
contained in the lake
Returns
-------
int
number of elements representing the lake with the provided
id number
See Also
--------
IWFMModel.get_n_lakes : Return the number of lakes in an IWFM model
IWFMModel.get_lake_ids : Return an array of the lake IDs in an IWFM model
IWFMModel.get_elements_in_lake : Return the element ids with the specified lake ID
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_elements_in_lake()
10
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetLakeIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetLakeIDs")
)
# check if any lakes exist
n_lakes = self.get_n_lakes()
# if no lakes exist, return 0
if n_lakes == 0:
return 0
# check that lake_id is an integer
if not isinstance(lake_id, int):
raise TypeError("lake_id must be an integer")
# get all lake ids
lake_ids = self.get_lake_ids()
# check to see if the lake_id provided is a valid lake ID
if not np.any(lake_ids == lake_id):
raise ValueError("lake_id specified is not valid")
# convert lake_id to lake index
# add 1 to index to convert from python index to fortran index
lake_index = np.where(lake_ids == lake_id)[0][0] + 1
# convert lake id to ctypes
lake_index = ctypes.c_int(lake_index)
# initialize output variables
n_elements_in_lake = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNElementsInLake(
ctypes.byref(lake_index),
ctypes.byref(n_elements_in_lake),
ctypes.byref(status),
)
return n_elements_in_lake.value
def get_elements_in_lake(self, lake_id):
"""
Return the element ids with the specified lake ID
Parameters
----------
lake_id : int
lake ID used to obtain element IDs that correspond to the lake
Returns
-------
np.ndarray
array of element IDs representing the lake with the provided
ID number
See Also
--------
IWFMModel.get_n_lakes : Return the number of lakes in an IWFM model
IWFMModel.get_lake_ids : Return an array of the lake IDs in an IWFM model
IWFMModel.get_n_elements_in_lake : Return the number of finite element grid cells that make up a lake
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_elements_in_lake(1)
array([169, 170, 171, 188, 189, 190, 207, 208, 209, 210])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetLakeIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetLakeIDs")
)
# get number of lakes
n_lakes = self.get_n_lakes()
# if no lakes exist in the model return
if n_lakes == 0:
return
# check that lake_id is an integer
if not isinstance(lake_id, int):
raise TypeError("lake_id must be an integer")
# get all lake ids
lake_ids = self.get_lake_ids()
# check to see if the lake_id provided is a valid lake ID
if not np.any(lake_ids == lake_id):
raise ValueError("lake_id specified is not valid")
# convert lake_id to lake index
# add 1 to index to convert from python index to fortran index
lake_index = np.where(lake_ids == lake_id)[0][0] + 1
# convert lake id to ctypes
lake_index = ctypes.c_int(lake_index)
# get number of elements in lake
n_elements_in_lake = ctypes.c_int(self.get_n_elements_in_lake(lake_id))
# initialize output variables
elements_in_lake = (ctypes.c_int * n_elements_in_lake.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetElementsInLake(
ctypes.byref(lake_index),
ctypes.byref(n_elements_in_lake),
elements_in_lake,
ctypes.byref(status),
)
# convert element indices to element IDs
element_ids = self.get_element_ids()
lake_element_indices = np.array(elements_in_lake)
return element_ids[lake_element_indices - 1]
def get_n_tile_drains(self):
"""
Return the number of tile drain nodes in an IWFM model
Returns
-------
int
number of tile drains simulated in the IWFM model
application
See Also
--------
IWFMModel.get_tile_drain_ids : Return the user-specified IDs for tile drains simulated in an IWFM model
IWFMModel.get_tile_drain_nodes : Return the node ids where tile drains are specified
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_tile_drains()
21
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNTileDrainNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNTileDrainNodes")
)
# initialize output variables
n_tile_drains = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNTileDrainNodes(
ctypes.byref(n_tile_drains), ctypes.byref(status)
)
return n_tile_drains.value
def get_tile_drain_ids(self):
"""
Return the user-specified IDs for tile drains simulated in an IWFM model
Returns
-------
np.ndarray
array of tile drain IDs specified in an IWFM model
See Also
--------
IWFMModel.get_n_tile_drains : Return the number of tile drain nodes in an IWFM model
IWFMModel.get_tile_drain_nodes : Return the node ids where tile drains are specified
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_tile_drain_ids()
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetTileDrainIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetTileDrainIDs")
)
# initialize n_stream_reaches variable
n_tile_drains = ctypes.c_int(self.get_n_tile_drains())
# stop here if no lakes are specified
if n_tile_drains.value == 0:
return
# initialize output variables
tile_drain_ids = (ctypes.c_int * n_tile_drains.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetTileDrainIDs(
ctypes.byref(n_tile_drains), tile_drain_ids, ctypes.byref(status)
)
return np.array(tile_drain_ids)
def get_tile_drain_nodes(self):
"""
Return the node ids where tile drains are specified
Returns
-------
np.ndarray
array of node ids where tiles drains are specified
See Also
--------
IWFMModel.get_n_tile_drains : Return the number of tile drain nodes in an IWFM model
IWFMModel.get_tile_drain_ids : Return the user-specified IDs for tile drains simulated in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_tile_drain_nodes()
array([ 6, 27, 48, 69, 90, 111, 132, 153, 174, 195, 216, 237, 258,
279, 300, 321, 342, 363, 384, 405, 426])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetTileDrainNodes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetTileDrainNodes")
)
# get number of tile_drains
n_tile_drains = ctypes.c_int(self.get_n_tile_drains())
# if no tile drains exist in the model return None
if n_tile_drains.value == 0:
return
# initialize output variables
tile_drain_nodes = (ctypes.c_int * n_tile_drains.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetTileDrainNodes(
ctypes.byref(n_tile_drains), tile_drain_nodes, ctypes.byref(status)
)
# convert tile drain node indices to node IDs
node_ids = self.get_node_ids()
tile_drain_node_indices = np.array(tile_drain_nodes)
return node_ids[tile_drain_node_indices - 1]
def get_n_layers(self):
"""
Return the number of layers in an IWFM model
Returns
-------
int
number of layers specified in an IWFM model
See Also
--------
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_n_elements : Return the number of elements in an IWFM model
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_n_stream_inflows : Return the number of stream boundary inflows specified by the user as timeseries input data
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_layers()
2
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNLayers"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNLayers")
)
# initialize n_layers variable
n_layers = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNLayers(ctypes.byref(n_layers), ctypes.byref(status))
return n_layers.value
def get_ground_surface_elevation(self):
"""
Return the ground surface elevation for each node specified
in the IWFM model
Returns
-------
np.ndarray
array of ground surface elevation at every finite element
node in an IWFM model
See Also
--------
IWFMModel.get_aquifer_top_elevation : Return the aquifer top elevations for each finite element node and each layer
IWFMModel.get_aquifer_bottom_elevation : Return the aquifer bottom elevations for each finite element node and each layer
IWFMModel.get_stratigraphy_atXYcoordinate : Return the stratigraphy at given X,Y coordinates
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_ground_surface_elevation()
array([500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
270., 270., 270., 270., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 270., 270.,
250., 270., 270., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 270., 270., 250., 250.,
270., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 270., 270., 270., 270., 270.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500.])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetGSElev"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetGSElev")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# initialize output variables
gselev = (ctypes.c_double * n_nodes.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetGSElev(ctypes.byref(n_nodes), gselev, ctypes.byref(status))
return np.array(gselev)
def get_aquifer_top_elevation(self):
"""
Return the aquifer top elevations for each finite element
node and each layer
Returns
-------
np.ndarray
array of aquifer top elevations for each model layer
Note
----
Resulting array has a shape of (n_layers, n_nodes)
See Also
--------
IWFMModel.get_ground_surface_elevation : Return the ground surface elevation for each node specified in the IWFM model
IWFMModel.get_aquifer_bottom_elevation : Return the aquifer bottom elevations for each finite element node and each layer
IWFMModel.get_stratigraphy_atXYcoordinate : Return the stratigraphy at given X,Y coordinates
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquifer_top_elevation()
array([[500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
270., 270., 270., 270., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 270., 270.,
250., 270., 270., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 270., 270., 250., 250.,
270., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 270., 270., 270., 270., 270.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500., 500., 500., 500., 500., 500., 500., 500., 500., 500., 500.,
500.],
[-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
-10., -10., -10., -10., -10., -10., -10., -10., -10., -10., -10.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0.]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferTopElev"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferTopElev")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_top_elevations = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferTopElev(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_top_elevations,
ctypes.byref(status),
)
return np.array(aquifer_top_elevations)
def get_aquifer_bottom_elevation(self):
"""
Return the aquifer bottom elevations for each finite element
node and each layer
Returns
-------
np.ndarray
array of aquifer bottom elevations
Note
----
Resulting array has a shape of (n_layers, n_nodes)
See Also
--------
IWFMModel.get_ground_surface_elevation : Return the ground surface elevation for each node specified in the IWFM model
IWFMModel.get_aquifer_top_elevation : Return the aquifer top elevations for each finite element node and each layer
IWFMModel.get_stratigraphy_atXYcoordinate : Return the stratigraphy at given X,Y coordinates
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquifer_bottom_elevation()
array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.],
[-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -110., -110., -110.,
-110., -110., -110., -110., -110., -110., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100.]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferBottomElev"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferBottomElev")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_bottom_elevations = (
(ctypes.c_double * n_nodes.value) * n_layers.value
)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferBottomElev(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_bottom_elevations,
ctypes.byref(status),
)
return np.array(aquifer_bottom_elevations)
def get_stratigraphy_atXYcoordinate(self, x, y, fact=1.0, output_options=1):
"""
Return the stratigraphy at given X,Y coordinates
Parameters
----------
x : int, float
x-coordinate for spatial location
y : int, float
y-coordinate for spatial location
fact : int, float, default=1.0
conversion factor for x,y coordinates to model length units
output_options : int, string, default=1
selects how output is returned by the function {1 or 'combined', 2 or 'gse', 3 or 'tops', 4 or 'bottoms'}
Returns
-------
np.ndarray : if output_options == 1 or 'combined',
array contains ground surface elevation and the bottoms of all layers
float : if output_options == 2 or 'gse',
ground surface elevation at x,y coordinates
np.ndarray : if output_options == 3 or 'tops':
array containing the top elevations of each model layer
np.ndarray : if output_options == 4 or 'bottoms':
array containing the bottom elevations of each model layer
tuple : length 3, if output_options is some other integer or string not defined above,
ground surface elevation at x,y coordinates,
numpy array of top elevation of each layer,
numpy array of bottom elevation of each layer
Note
----
All return values will be zero if the coordinates provided do not fall within a model element
If model units are in feet and x,y coordinates are provided in meters, then FACT=3.2808
See Also
--------
IWFMModel.get_ground_surface_elevation : Return the ground surface elevation for each node specified in the IWFM model
IWFMModel.get_aquifer_top_elevation : Return the aquifer top elevations for each finite element node and each layer
IWFMModel.get_aquifer_bottom_elevation : Return the aquifer bottom elevations for each finite element node and each layer
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stratigraphy_atXYcoordinate(590000.0, 4440000.0, 3.2808, 5)
(500.0, array([500., 0.]), array([ 0., -100.]))
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stratigraphy_atXYcoordinate(590000.0, 4440000.0, 3.2808, 1)
array([ 500., 0., -100.])
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stratigraphy_atXYcoordinate(590000.0, 4440000.0, 3.2808, ''gse')
500.0
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetStratigraphy_AtXYCoordinate"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetStratigraphy_AtXYCoordinate"
)
)
if not isinstance(x, (int, float)):
raise TypeError("X-coordinate must be an int or float")
if not isinstance(y, (int, float)):
raise TypeError("Y-coordinate must be an int or float")
if not isinstance(fact, (int, float)):
raise TypeError("conversion factor must be an int or float")
if not isinstance(output_options, (int, str)):
raise TypeError("output_options must be an integer or string")
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# convert input variables to ctypes
x = ctypes.c_double(x * fact)
y = ctypes.c_double(y * fact)
# initialize output variables
gselev = ctypes.c_double(0.0)
top_elevs = (ctypes.c_double * n_layers.value)()
bottom_elevs = (ctypes.c_double * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetStratigraphy_AtXYCoordinate(
ctypes.byref(n_layers),
ctypes.byref(x),
ctypes.byref(y),
ctypes.byref(gselev),
top_elevs,
bottom_elevs,
ctypes.byref(status),
)
# user output options
if output_options == 1 or output_options == "combined":
output = np.concatenate((gselev.value, np.array(bottom_elevs)), axis=None)
elif output_options == 2 or output_options == "gse":
output = gselev.value
elif output_options == 3 or output_options == "tops":
output = np.array(top_elevs)
elif output_options == 4 or output_options == "bottoms":
output = np.array(bottom_elevs)
else:
output = (gselev.value, np.array(top_elevs), np.array(bottom_elevs))
return output
def get_aquifer_horizontal_k(self):
"""
Return the aquifer horizontal hydraulic conductivity for
each finite element node and each layer
Returns
-------
np.ndarray
array of aquifer horizontal hydraulic conductivity
See Also
--------
IWFMModel.get_aquifer_vertical_k : Return the aquifer vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquitard_vertical_k : Return the aquitard vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_specific_yield : Return the aquifer specific yield for each finite element node and each layer
IWFMModel.get_aquifer_specific_storage : Return the aquifer specific storage for each finite element node and each layer
IWFMModel.get_aquifer_parameters : Return all aquifer parameters at each model node and layer
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquifer_horizontal_k()
array([[50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.],
[50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.,
50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50., 50.]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferHorizontalK"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferHorizontalK")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_horizontal_k = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferHorizontalK(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_horizontal_k,
ctypes.byref(status),
)
return np.array(aquifer_horizontal_k)
def get_aquifer_vertical_k(self):
"""
Return the aquifer vertical hydraulic conductivity for each finite element
node and each layer
Returns
-------
np.ndarray
array of aquifer vertical hydraulic conductivity
See Also
--------
IWFMModel.get_aquifer_horizontal_k : Return the aquifer horizontal hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquitard_vertical_k : Return the aquitard vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_specific_yield : Return the aquifer specific yield for each finite element node and each layer
IWFMModel.get_aquifer_specific_storage : Return the aquifer specific storage for each finite element node and each layer
IWFMModel.get_aquifer_parameters : Return all aquifer parameters at each model node and layer
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquifer_vertical_k()
array([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1.]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferVerticalK"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferVerticalK")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_vertical_k = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferVerticalK(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_vertical_k,
ctypes.byref(status),
)
return np.array(aquifer_vertical_k)
def get_aquitard_vertical_k(self):
"""
Return the aquitard vertical hydraulic conductivity for
each finite element node and each layer
Returns
-------
np.ndarray
array of aquitard vertical hydraulic conductivity
See Also
--------
IWFMModel.get_aquifer_horizontal_k : Return the aquifer horizontal hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_vertical_k : Return the aquifer vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_specific_yield : Return the aquifer specific yield for each finite element node and each layer
IWFMModel.get_aquifer_specific_storage : Return the aquifer specific storage for each finite element node and each layer
IWFMModel.get_aquifer_parameters : Return all aquifer parameters at each model node and layer
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquitard_vertical_k()
array([[0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquitardVerticalK"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquitardVerticalK")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquitard_vertical_k = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquitardVerticalK(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquitard_vertical_k,
ctypes.byref(status),
)
return np.array(aquitard_vertical_k)
def get_aquifer_specific_yield(self):
"""
Return the aquifer specific yield for each finite element
node and each layer
Returns
-------
np.ndarray
array of aquifer specific yield
See Also
--------
IWFMModel.get_aquifer_horizontal_k : Return the aquifer horizontal hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_vertical_k : Return the aquifer vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquitard_vertical_k : Return the aquitard vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_specific_storage : Return the aquifer specific storage for each finite element node and each layer
IWFMModel.get_aquifer_parameters : Return all aquifer parameters at each model node and layer
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquifer_specific_yield()
array([[0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25],
[0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferSy"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferSy")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_specific_yield = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferSy(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_specific_yield,
ctypes.byref(status),
)
return np.array(aquifer_specific_yield)
def get_aquifer_specific_storage(self):
"""
Return the aquifer specific storage for each finite element
node and each layer
Returns
-------
np.ndarray
array of aquifer specific storage
See Also
--------
IWFMModel.get_aquifer_horizontal_k : Return the aquifer horizontal hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_vertical_k : Return the aquifer vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquitard_vertical_k : Return the aquitard vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_specific_yield : Return the aquifer specific yield for each finite element node and each layer
IWFMModel.get_aquifer_parameters : Return all aquifer parameters at each model node and layer
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_aquifer_specific_yield()
array([[2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01,
2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01, 2.5e-01],
[9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05,
9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05, 9.0e-05]])
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferSs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferSs")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_specific_storage = (
(ctypes.c_double * n_nodes.value) * n_layers.value
)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferSs(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_specific_storage,
ctypes.byref(status),
)
return np.array(aquifer_specific_storage)
def get_aquifer_parameters(self):
"""
Return all aquifer parameters at each model node and layer
Returns
-------
tuple of np.ndarray
aquifer horizontal hydraulic conductivity for each node and layer,
aquifer vertical hydraulic conductivity for each node and layer,
aquitard vertical hydraulic conductivity for each node and layer,
aquifer specific yield for each node and layer,
aquifer specific storage for each node and layer
See Also
--------
IWFMModel.get_aquifer_horizontal_k : Return the aquifer horizontal hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_vertical_k : Return the aquifer vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquitard_vertical_k : Return the aquitard vertical hydraulic conductivity for each finite element node and each layer
IWFMModel.get_aquifer_specific_yield : Return the aquifer specific yield for each finite element node and each layer
IWFMModel.get_aquifer_specific_storage : Return the aquifer specific storage for each finite element node and each layer
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> hk, vk, avk, sy, ss = model.get_aquifer_parameters()
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetAquiferParameters"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetAquiferParameters")
)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
aquifer_horizontal_k = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
aquifer_vertical_k = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
aquitard_vertical_k = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
aquifer_specific_yield = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
aquifer_specific_storage = (
(ctypes.c_double * n_nodes.value) * n_layers.value
)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetAquiferParameters(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
aquifer_horizontal_k,
aquifer_vertical_k,
aquitard_vertical_k,
aquifer_specific_yield,
aquifer_specific_storage,
ctypes.byref(status),
)
return (
np.array(aquifer_horizontal_k),
np.array(aquifer_vertical_k),
np.array(aquitard_vertical_k),
np.array(aquifer_specific_yield),
np.array(aquifer_specific_storage),
)
def get_n_ag_crops(self):
"""
Return the number of agricultural crops simulated in an
IWFM model
Returns
-------
int
number of agricultural crops (both non-ponded and ponded)
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_ag_crops()
7
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNAgCrops"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNAgCrops")
)
# initialize output variables
n_ag_crops = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNAgCrops(ctypes.byref(n_ag_crops), ctypes.byref(status))
return n_ag_crops.value
def get_n_wells(self):
"""
Return the number of wells simulated in an
IWFM model
Returns
-------
int
number of wells simulated in the IWFM model
Note
----
This method is intended to be used when is_for_inquiry=0 when performing a model simulation
See Also
--------
IWFMModel.get_well_ids : Return the well IDs specified in an IWFM model
IWFMModel.get_n_element_pumps : Return the number of element pumping wells in an IWFM model
IWFMModel.get_element_pump_ids : Return the element pump IDs specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_wells()
0
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNWells"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNWells")
)
# initialize output variables
n_wells = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNWells(ctypes.byref(n_wells), ctypes.byref(status))
return n_wells.value
def get_well_ids(self):
"""
Return the pumping well IDs specified in an IWFM model
Returns
-------
np.ndarray
array of well IDs
Note
----
This method is intended to be used when is_for_inquiry=0 when performing a model simulation
See Also
--------
IWFMModel.get_n_wells : Return the number of pumping wells in an IWFM model
IWFMModel.get_n_element_pumps : Return the number of element pumping wells in an IWFM model
IWFMModel.get_element_pump_ids : Return the element pump IDs specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> model.get_well_ids()
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetWellIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetWellIDs")
)
# set input variables
n_wells = ctypes.c_int(self.get_n_wells())
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
well_ids = (ctypes.c_int * n_wells.value)()
self.dll.IW_Model_GetWellIDs(
ctypes.byref(n_wells), well_ids, ctypes.byref(status)
)
return np.array(well_ids)
def get_n_element_pumps(self):
"""
Return the number of element pumps simulated in an
IWFM model
Returns
-------
int
number of element pumps simulated in the IWFM model
Note
----
This method is intended to be used when is_for_inquiry=0 when performing a model simulation
See Also
--------
IWFMModel.get_n_wells : Return the number of wells simulated in an IWFM model
IWFMModel.get_well_ids : Return the pumping well IDs specified in an IWFM model
IWFMModel.get_element_pump_ids : Return the element pump IDs specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> model.get_n_element_pumps()
5
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNElemPumps"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNElemPumps")
)
# initialize output variables
n_elem_pumps = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNElemPumps(
ctypes.byref(n_elem_pumps), ctypes.byref(status)
)
return n_elem_pumps.value
def get_element_pump_ids(self):
"""
Return the element pump IDs specified in an IWFM model
Returns
-------
np.ndarray
array of element pump IDs
Note
----
This method is intended to be used when is_for_inquiry=0 when performing a model simulation
See Also
--------
IWFMModel.get_well_ids : Return the well IDs specified in an IWFM model
IWFMModel.get_n_wells : Return the number of pumping wells in an IWFM model
IWFMModel.get_n_element_pumps : Return the number of element pumping wells in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> model.get_element_pump_ids()
>>> model.kill()
>>> model.close_log_file()
"""
if not hasattr(self.dll, "IW_Model_GetElemPumpIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetElemPumpIDs")
)
# set input variables
n_element_pumps = ctypes.c_int(self.get_n_element_pumps())
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
element_pump_ids = (ctypes.c_int * n_element_pumps.value)()
self.dll.IW_Model_GetWellIDs(
ctypes.byref(n_element_pumps), element_pump_ids, ctypes.byref(status)
)
return np.array(element_pump_ids)
def _get_supply_purpose(self, supply_type_id, supply_indices):
"""
private method returning the flags for the initial assignment of water supplies
(diversions, well pumping, element pumping) designating if they serve
agricultural, urban, or both
Parameters
----------
supply_type_id : int
supply type identification number used by IWFM for surface water
diversions, well pumping, or element pumping
supply_indices : np.ndarray
indices of supplies for which flags are being retrieved. This is
one or more indices for the supply type chosen
e.g. supply_type_id for diversions supply indices would be one or
more diversion ids.
Returns
-------
np.ndarray
array of flags for each supply index provided
Note
----
flag equal to 10 for agricultural water demand
flag equal to 01 for urban water demands
flag equal to 11 for both ag and urban
automatic supply adjustment in IWFM allows the supply purpose
to change dynamically, so this only returns the user-specified
initial value.
It is assumed that type checking and validation is performed in
the calling method
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSupplyPurpose"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetSupplyPurpose")
)
# convert supply_type_id to ctypes
supply_type_id = ctypes.c_int(supply_type_id)
# get number of supply indices
n_supply_indices = ctypes.c_int(len(supply_indices))
# convert supply_indices to ctypes
supply_indices = (ctypes.c_int * n_supply_indices.value)(*supply_indices)
# initialize output variables
supply_purpose_flags = (ctypes.c_int * n_supply_indices.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSupplyPurpose(
ctypes.byref(supply_type_id),
ctypes.byref(n_supply_indices),
supply_indices,
supply_purpose_flags,
ctypes.byref(status),
)
return np.array(supply_purpose_flags)
def get_diversion_purpose(self, diversions="all"):
"""
Return the flags for the initial purpose of the diversions as ag, urban, or both
Parameters
----------
diversions : int, list, tuple, np.ndarray, or str='all', default='all'
One or more diversion identification numbers used to return
the supply purpose.
Returns
-------
np.ndarray
array of flags for each supply index provided
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
after the timeseries data are read
If it is used when is_for_inquiry=1, it will return the urban flag for each diversion
regardless if it is urban, ag, or both
flag equal to 1 for urban water demands
flag equal to 10 for agricultural water demand
flag equal to 11 for both ag and urban
automatic supply adjustment in IWFM allows the supply purpose
to change dynamically, so this only returns the user-specified
initial value.
See Also
--------
IWFMModel.get_well_pumping_purpose : Return the flags for the initial purpose of the well pumping as ag, urban, or both
IWFMModel.get_element_pumping_purpose : Return the flags for the initial purpose of the element pumping as ag, urban, or both
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # get diversion supply purpose
... print(model.get_diversion_purpose())
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
[ 1 1 10 10 10]
* TIME STEP 2 AT 10/02/1990_24:00
[ 1 1 10 10 10]
* TIME STEP 3 AT 10/03/1990_24:00
[ 1 1 10 10 10]
.
.
.
* TIME STEP 3652 AT 09/29/2000_24:00
[ 1 1 10 10 10]
* TIME STEP 3653 AT 09/30/2000_24:00
[ 1 1 10 10 10]
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_diversion_purpose()
array([1, 1, 1, 1, 1])
>>> model.kill()
>>> model.close_log_file()
"""
supply_type_id = self.get_supply_type_id_diversion()
# get all diversion IDs
diversion_ids = self.get_diversion_ids()
if isinstance(diversions, str):
if diversions.lower() == "all":
diversions = diversion_ids
else:
raise ValueError('if diversions is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(diversions, int):
diversions = np.array([diversions])
# if list or tuple convert to np.ndarray
if isinstance(diversions, (list, tuple)):
diversions = np.array(diversions)
# if diversions were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(diversions, np.ndarray):
raise TypeError(
'diversions must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided diversion IDs are valid
if not np.all(np.isin(diversions, diversion_ids)):
raise ValueError("One or more diversion IDs provided are invalid")
# convert diversion IDs to diversion indices
# add 1 to convert between python indices and fortran indices
diversion_indices = (
np.array([np.where(diversion_ids == item)[0][0] for item in diversions]) + 1
)
return self._get_supply_purpose(supply_type_id, diversion_indices)
def get_well_pumping_purpose(self, wells="all"):
"""
Return the flags for the initial purpose of the well pumping as ag, urban, or both
Parameters
----------
wells : int, list, tuple, np.ndarray, or str='all', default='all'
One or more well identification numbers used to return
the supply purpose.
Returns
-------
np.ndarray
array of flags for each supply index provided
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
after the timeseries data are read
If it is used when is_for_inquiry=1, it will return the urban flag for each diversion
regardless if it is urban, ag, or both
flag equal to 1 for urban water demands
flag equal to 10 for agricultural water demand
flag equal to 11 for both ag and urban
automatic supply adjustment in IWFM allows the supply purpose
to change dynamically, so this only returns the user-specified
initial value.
See Also
--------
IWFMModel.get_diversion_purpose : Return the flags for the initial purpose of the diversions as ag, urban, or both
IWFMModel.get_element_pump_purpose : Return the flags for the initial purpose of the element pumping as ag, urban, or both
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # get well pumping supply purpose
... print(model.get_well_pumping_purpose())
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
[ 1 1 10 10 10]
* TIME STEP 2 AT 10/02/1990_24:00
[ 1 1 10 10 10]
* TIME STEP 3 AT 10/03/1990_24:00
[ 1 1 10 10 10]
.
.
.
* TIME STEP 3652 AT 09/29/2000_24:00
[ 1 1 10 10 10]
* TIME STEP 3653 AT 09/30/2000_24:00
[ 1 1 10 10 10]
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_well_pumping_purpose()
array([1, 1, 1, 1, 1])
>>> model.kill()
>>> model.close_log_file()"""
supply_type_id = self.get_supply_type_id_well()
# get all well IDs
well_ids = self.get_well_ids()
if isinstance(wells, str):
if wells.lower() == "all":
wells = well_ids
else:
raise ValueError('if wells is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(wells, int):
wells = np.array([wells])
# if list or tuple convert to np.ndarray
if isinstance(wells, (list, tuple)):
wells = np.array(wells)
# if wells were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(wells, np.ndarray):
raise TypeError('wells must be an int, list, tuple, np.ndarray, or "all"')
# check if all of the provided well IDs are valid
if not np.all(np.isin(wells, well_ids)):
raise ValueError("One or more well IDs provided are invalid")
# convert well IDs to well indices
# add 1 to convert between python indices and fortran indices
well_indices = (
np.array([np.where(well_ids == item)[0][0] for item in wells]) + 1
)
return self._get_supply_purpose(supply_type_id, well_indices)
def get_element_pump_purpose(self, element_pumps="all"):
"""
Return the flags for the initial purpose of the element pumping as ag, urban, or both
Parameters
----------
element_pumps : int, list, tuple, np.ndarray, or str='all', default='all'
One or more element pump identification numbers used to return
the supply purpose.
Returns
-------
np.ndarray
array of flags for each supply index provided
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
after the timeseries data are read
If it is used when is_for_inquiry=1, it will return the urban flag for each diversion
regardless if it is urban, ag, or both
flag equal to 1 for urban water demands
flag equal to 10 for agricultural water demand
flag equal to 11 for both ag and urban
automatic supply adjustment in IWFM allows the supply purpose
to change dynamically, so this only returns the user-specified
initial value.
See Also
--------
IWFMModel.get_diversion_purpose : Return the flags for the initial purpose of the diversions as ag, urban, or both
IWFMModel.get_well_pumping_purpose : Return the flags for the initial purpose of the well pumping as ag, urban, or both
Examples
--------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # get well pumping supply purpose
... print(model.get_element_pumping_purpose())
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
>>> model.kill()
>>> model.close_log_file()
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_element_pumping_purpose()
>>> model.kill()
>>> model.close_log_file()"""
supply_type_id = self.get_supply_type_id_well()
# get all element pump IDs
element_pump_ids = self.get_element_pump_ids()
if isinstance(element_pumps, str):
if element_pumps.lower() == "all":
element_pumps = element_pump_ids
else:
raise ValueError('if element_pumps is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(element_pumps, int):
element_pumps = np.array([element_pumps])
# if list or tuple convert to np.ndarray
if isinstance(element_pumps, (list, tuple)):
element_pumps = np.array(element_pumps)
# if element_pumps were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(element_pumps, np.ndarray):
raise TypeError(
'element_pumps must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided element pump IDs are valid
if not np.all(np.isin(element_pumps, element_pump_ids)):
raise ValueError("One or more element pump IDs provided are invalid")
# convert element pump IDs to element pump indices
# add 1 to convert between python indices and fortran indices
element_pump_indices = (
np.array(
[np.where(element_pump_ids == item)[0][0] for item in element_pumps]
)
+ 1
)
return self._get_supply_purpose(supply_type_id, element_pump_indices)
def _get_supply_requirement_ag(
self, location_type_id, locations_list, conversion_factor
):
"""
Return the agricultural water supply requirement at a
specified set of locations
Parameters
----------
location_type_id : int
location type identification number used by IWFM for elements
or subregions.
locations_list : list or np.ndarray
indices of locations where ag supply requirements are returned
conversion_factor : float
factor to convert ag supply requirement from model units to
desired output units
Returns
-------
np.ndarray
array of ag supply requirement for locations specified
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSupplyRequirement_Ag"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetSupplyRequirement_Ag"
)
)
# convert location_type_id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# get number of locations
n_locations = ctypes.c_int(len(locations_list))
# convert locations_list to ctypes
locations_list = (ctypes.c_int * n_locations.value)(*locations_list)
# convert conversion_factor to ctypes
conversion_factor = ctypes.c_double(conversion_factor)
# initialize output variables
ag_supply_requirement = (ctypes.c_double * n_locations.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSupplyRequirement_Ag(
ctypes.byref(location_type_id),
ctypes.byref(n_locations),
locations_list,
ctypes.byref(conversion_factor),
ag_supply_requirement,
ctypes.byref(status),
)
return np.array(ag_supply_requirement)
def get_supply_requirement_ag_elements(self, elements="all", conversion_factor=1.0):
"""
Return the agricultural supply requirement for one or more model elements
Parameters
----------
elements : int, list, tuple, np.ndarray, or str='all', default='all'
one or more element identification numbers used to return
the ag supply requirement
conversion_factor : float, default=1.0
factor to convert ag supply requirement from model units to
desired output units
Returns
-------
np.ndarray
array of ag supply requirement for elements specified
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_supply_requirement_ag_subregions : Return the agricultural supply requirement for one or more model subregions
IWFMModel.get_supply_requirement_urban_elements : Return the urban supply requirement for one or more model elements
IWFMModel.get_supply_requirement_urban_subregions : Return the urban supply requirement for one or more model subregions
"""
location_type_id = self.get_location_type_id_element()
# get all element IDs
element_ids = self.get_element_ids()
if isinstance(elements, str):
if elements.lower() == "all":
elements = element_ids
else:
raise ValueError('if elements is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(elements, int):
elements = np.array([elements])
# if list or tuple convert to np.ndarray
if isinstance(elements, (list, tuple)):
elements = np.array(elements)
# if elements were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(elements, np.ndarray):
raise TypeError('element must be an int, list, tuple, np.ndarray, or "all"')
# check if all of the provided element IDs are valid
if not np.all(np.isin(elements, element_ids)):
raise ValueError("One or more element IDs provided are invalid")
# convert element IDs to element indices
# add 1 to convert between python indices and fortran indices
element_indices = (
np.array([np.where(element_ids == item)[0][0] for item in elements]) + 1
)
return self._get_supply_requirement_ag(
location_type_id, element_indices, conversion_factor
)
def get_supply_requirement_ag_subregions(
self, subregions="all", conversion_factor=1.0
):
"""
Return the agricultural supply requirement for one or more model subregions
Parameters
----------
subregions : int, list, tuple, np.ndarray, or str='all', default='all'
one or more subregion identification numbers used to return
the ag supply requirement
conversion_factor : float, default=1.0
factor to convert ag supply requirement from model units to
desired output units
Returns
-------
np.ndarray
array of ag supply requirement for subregions specified
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_supply_requirement_ag_elements : Return the agricultural supply requirement for one or more model elements
IWFMModel.get_supply_requirement_urban_elements : Return the urban supply requirement for one or more model elements
IWFMModel.get_supply_requirement_urban_subregions : Return the urban supply requirement for one or more model subregions
"""
location_type_id = self.get_location_type_id_subregion()
# get all subregion IDs
subregion_ids = self.get_subregion_ids()
if isinstance(subregions, str):
if subregions.lower() == "all":
subregions = subregion_ids
else:
raise ValueError('if subregions is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(subregions, int):
subregions = np.array([subregions])
# if list or tuple convert to np.ndarray
if isinstance(subregions, (list, tuple)):
subregions = np.array(subregions)
# if subregions were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(subregions, np.ndarray):
raise TypeError(
'subregions must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided subregion IDs are valid
if not np.all(np.isin(subregions, subregion_ids)):
raise ValueError("One or more subegion IDs provided are invalid")
# convert subregion IDs to subregion indices
# add 1 to convert between python indices and fortran indices
subregion_indices = (
np.array([np.where(subregion_ids == item)[0][0] for item in subregions]) + 1
)
return self._get_supply_requirement_ag(
location_type_id, subregion_indices, conversion_factor
)
def _get_supply_requirement_urban(
self, location_type_id, locations_list, conversion_factor
):
"""
Return the urban water supply requirement at a
specified set of locations
Parameters
----------
location_type_id : int
location type identification number used by IWFM for elements
or subregions.
locations_list : list or np.ndarray
indices of locations where ag supply requirements are returned
conversion_factor : float
factor to convert ag supply requirement from model units to
desired output units
Returns
-------
np.ndarray
array of urban supply requirement for locations specified
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSupplyRequirement_Urb"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetSupplyRequirement_Urb"
)
)
# convert location_type_id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# get number of locations
n_locations = ctypes.c_int(len(locations_list))
# convert locations_list to ctypes
locations_list = (ctypes.c_int * n_locations.value)(*locations_list)
# convert conversion_factor to ctypes
conversion_factor = ctypes.c_double(conversion_factor)
# initialize output variables
urban_supply_requirement = (ctypes.c_double * n_locations.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSupplyRequirement_Urb(
ctypes.byref(location_type_id),
ctypes.byref(n_locations),
locations_list,
ctypes.byref(conversion_factor),
urban_supply_requirement,
ctypes.byref(status),
)
return np.array(urban_supply_requirement)
def get_supply_requirement_urban_elements(
self, elements="all", conversion_factor=1.0
):
"""
Return the urban supply requirement for one or more model elements
Parameters
----------
elements : int, list, tuple, np.ndarray, or str='all', default='all'
one or more element identification numbers used to return
the urban supply requirement
conversion_factor : float, default=1.0
factor to convert urban supply requirement from model units to
desired output units
Returns
-------
np.ndarray
array of urban supply requirement for elements specified
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_supply_requirement_ag_elements : Return the agricultural supply requirement for one or more model elements
IWFMModel.get_supply_requirement_ag_subregions : Return the agricultural supply requirement for one or more model subregions
IWFMModel.get_supply_requirement_urban_subregions : Return the urban supply requirement for one or more model subregions
"""
location_type_id = self.get_location_type_id_element()
# get all element IDs
element_ids = self.get_element_ids()
if isinstance(elements, str):
if elements.lower() == "all":
elements = element_ids
else:
raise ValueError('if elements is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(elements, int):
elements = np.array([elements])
# if list or tuple convert to np.ndarray
if isinstance(elements, (list, tuple)):
elements = np.array(elements)
# if elements were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(elements, np.ndarray):
raise TypeError('element must be an int, list, tuple, np.ndarray, or "all"')
# check if all of the provided element IDs are valid
if not np.all(np.isin(elements, element_ids)):
raise ValueError("One or more element IDs provided are invalid")
# convert element IDs to element indices
# add 1 to convert between python indices and fortran indices
element_indices = (
np.array([np.where(element_ids == item)[0][0] for item in elements]) + 1
)
return self._get_supply_requirement_urban(
location_type_id, element_indices, conversion_factor
)
def get_supply_requirement_urban_subregions(
self, subregions="all", conversion_factor=1.0
):
"""
Return the urban supply requirement for one or more model subregions
Parameters
----------
subregions : int, list, tuple, np.ndarray, or str='all', default='all'
one or more subregion identification numbers used to return
the urban supply requirement
conversion_factor : float, default=1.0
factor to convert urban supply requirement from model units to
desired output units
Returns
-------
np.ndarray
array of urban supply requirement for subregions specified
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_supply_requirement_ag_elements : Return the agricultural supply requirement for one or more model elements
IWFMModel.get_supply_requirement_ag_subregions : Return the agricultural supply requirement for one or more model subregions
IWFMModel.get_supply_requirement_urban_elements : Return the urban supply requirement for one or more model elements
"""
location_type_id = self.get_location_type_id_subregion()
# get all subregion IDs
subregion_ids = self.get_subregion_ids()
if isinstance(subregions, str):
if subregions.lower() == "all":
subregions = subregion_ids
else:
raise ValueError('if subregions is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(subregions, int):
subregions = np.array([subregions])
# if list or tuple convert to np.ndarray
if isinstance(subregions, (list, tuple)):
subregions = np.array(subregions)
# if subregions were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(subregions, np.ndarray):
raise TypeError(
'subregions must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided subregion IDs are valid
if not np.all(np.isin(subregions, subregion_ids)):
raise ValueError("One or more subegion IDs provided are invalid")
# convert subregion IDs to subregion indices
# add 1 to convert between python indices and fortran indices
subregion_indices = (
np.array([np.where(subregion_ids == item)[0][0] for item in subregions]) + 1
)
return self._get_supply_requirement_urban(
location_type_id, subregion_indices, conversion_factor
)
def _get_supply_shortage_at_origin_ag(
self, supply_type_id, supply_location_list, supply_conversion_factor
):
"""
private method returning the supply shortage for agriculture at the destination of those
supplies plus any conveyance losses
Parameters
----------
supply_type_id : int
supply identification number used by IWFM for diversions,
well pumping, or element pumping
supply_location_list : list or np.ndarray
indices of supplies where ag supply shortages are returned
supply_conversion_factor : float
factor to convert agricultural supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of agricultural supply shortages for each supply location
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSupplyShortAtOrigin_Ag"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetSupplyShortAtOrigin_Ag"
)
)
# convert location_type_id to ctypes
supply_type_id = ctypes.c_int(supply_type_id)
# get number of locations
n_locations = ctypes.c_int(len(supply_location_list))
# convert locations_list to ctypes
supply_location_list = (ctypes.c_int * n_locations.value)(*supply_location_list)
# convert conversion_factor to ctypes
supply_conversion_factor = ctypes.c_double(supply_conversion_factor)
# initialize output variables
ag_supply_shortage = (ctypes.c_double * n_locations.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSupplyShortAtOrigin_Ag(
ctypes.byref(supply_type_id),
ctypes.byref(n_locations),
supply_location_list,
ctypes.byref(supply_conversion_factor),
ag_supply_shortage,
ctypes.byref(status),
)
return np.array(ag_supply_shortage)
def get_ag_diversion_supply_shortage_at_origin(
self, diversions="all", conversion_factor=1.0
):
"""
Return the supply shortage for agricultural diversions at the destination of those
supplies plus any conveyance losses
Parameters
----------
diversions : int, list, tuple, np.ndarray, or str='all', default='all'
indices of diversions where ag supply shortages are returned
conversion_factor : float, default=1.0
factor to convert agricultural supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of agricultural supply shortages for each diversion location
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_ag_well_supply_shortage_at_origin : Return the supply shortage for agricultural wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_elempump_supply_shortage_at_origin : Return the supply shortage for agricultural element pumping at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_diversion_supply_shortage_at_origin : Return the supply shortage for urban diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_well_supply_shortage_at_origin : Return the supply shortage for urban wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_elempump_supply_shortage_at_origin : Return the supply shortage for urban element pumping at the destination of those supplies plus any conveyance losses
"""
supply_type_id = self.get_supply_type_id_diversion()
# get all diversion IDs
diversion_ids = self.get_diversion_ids()
if isinstance(diversions, str):
if diversions.lower() == "all":
diversions = diversion_ids
else:
raise ValueError('if diversions is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(diversions, int):
diversions = np.array([diversions])
# if list or tuple convert to np.ndarray
if isinstance(diversions, (list, tuple)):
diversions = np.array(diversions)
# if diversions were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(diversions, np.ndarray):
raise TypeError(
'diversions must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided diversion IDs are valid
if not np.all(np.isin(diversions, diversion_ids)):
raise ValueError("One or more diversion IDs provided are invalid")
# convert diversion IDs to diversion indices
# add 1 to convert between python indices and fortran indices
diversion_indices = (
np.array([np.where(diversion_ids == item)[0][0] for item in diversions]) + 1
)
return self._get_supply_shortage_at_origin_ag(
supply_type_id, diversion_indices, conversion_factor
)
def get_ag_well_supply_shortage_at_origin(self, wells="all", conversion_factor=1.0):
"""
Return the supply shortage for agricultural wells at the destination of those
supplies plus any conveyance losses
Parameters
----------
wells : int, list, tuple, np.ndarray, or str='all', default='all'
indices of wells where ag supply shortages are returned
conversion_factor : float, default=1.0
factor to convert agricultural supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of agricultural supply shortages for each well location
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_ag_diversion_supply_shortage_at_origin : Return the supply shortage for agricultural diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_elempump_supply_shortage_at_origin : Return the supply shortage for agricultural element pumping at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_diversion_supply_shortage_at_origin : Return the supply shortage for urban diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_well_supply_shortage_at_origin : Return the supply shortage for urban wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_elempump_supply_shortage_at_origin : Return the supply shortage for urban element pumping at the destination of those supplies plus any conveyance losses
"""
supply_type_id = self.get_supply_type_id_well()
# get all well IDs
well_ids = self.get_well_ids()
if isinstance(wells, str):
if wells.lower() == "all":
wells = well_ids
else:
raise ValueError('if wells is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(wells, int):
wells = np.array([wells])
# if list or tuple convert to np.ndarray
if isinstance(wells, (list, tuple)):
wells = np.array(wells)
# if wells were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(wells, np.ndarray):
raise TypeError('wells must be an int, list, tuple, np.ndarray, or "all"')
# check if all of the provided well IDs are valid
if not np.all(np.isin(wells, well_ids)):
raise ValueError("One or more well IDs provided are invalid")
# convert well IDs to well indices
# add 1 to convert between python indices and fortran indices
well_indices = (
np.array([np.where(well_ids == item)[0][0] for item in wells]) + 1
)
return self._get_supply_shortage_at_origin_ag(
supply_type_id, well_indices, conversion_factor
)
def get_ag_elempump_supply_shortage_at_origin(
self, element_pumps="all", conversion_factor=1.0
):
"""
Return the supply shortage for agricultural element pumping
at the destination of those supplies plus any conveyance losses
Parameters
----------
element_pumps : int, list, tuple, np.ndarray, or str='all', default='all'
indices of element pumping locations where ag supply shortages are returned
conversion_factor : float, default=1.0
factor to convert agricultural supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of agricultural supply shortages for each element pumping location
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_ag_diversion_supply_shortage_at_origin : Return the supply shortage for agricultural diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_well_supply_shortage_at_origin : Return the supply shortage for agricultural wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_diversion_supply_shortage_at_origin : Return the supply shortage for urban diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_well_supply_shortage_at_origin : Return the supply shortage for urban wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_elempump_supply_shortage_at_origin : Return the supply shortage for urban element pumping at the destination of those supplies plus any conveyance losses
"""
supply_type_id = self.get_supply_type_id_elempump()
# get all element pump IDs
element_pump_ids = self.get_element_pump_ids()
if isinstance(element_pumps, str):
if element_pumps.lower() == "all":
element_pumps = element_pump_ids
else:
raise ValueError('if element_pumps is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(element_pumps, int):
element_pumps = np.array([element_pumps])
# if list or tuple convert to np.ndarray
if isinstance(element_pumps, (list, tuple)):
element_pumps = np.array(element_pumps)
# if element_pumps were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(element_pumps, np.ndarray):
raise TypeError(
'element_pumps must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided element pump IDs are valid
if not np.all(np.isin(element_pumps, element_pump_ids)):
raise ValueError("One or more element pump IDs provided are invalid")
# convert element pump IDs to element pump indices
# add 1 to convert between python indices and fortran indices
element_pump_indices = (
np.array(
[np.where(element_pump_ids == item)[0][0] for item in element_pumps]
)
+ 1
)
return self._get_supply_shortage_at_origin_ag(
supply_type_id, element_pump_indices, conversion_factor
)
def _get_supply_shortage_at_origin_urban(
self, supply_type_id, supply_location_list, supply_conversion_factor
):
"""
Return the supply shortage for agriculture at the destination of those
supplies plus any conveyance losses
Parameters
----------
supply_type_id : int
supply identification number used by IWFM for diversions,
well pumping, or element pumping
supply_location_list : list or np.ndarray
indices of supplies where ag supply shortages are returned
supply_conversion_factor : float
factor to convert agricultural supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of agricultural supply shortages for each supply location
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSupplyShortAtOrigin_Urb"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetSupplyShortAtOrigin_Urb"
)
)
# convert location_type_id to ctypes
supply_type_id = ctypes.c_int(supply_type_id)
# get number of locations
n_locations = ctypes.c_int(len(supply_location_list))
# convert locations_list to ctypes
supply_location_list = (ctypes.c_int * n_locations.value)(*supply_location_list)
# convert conversion_factor to ctypes
supply_conversion_factor = ctypes.c_double(supply_conversion_factor)
# initialize output variables
urban_supply_shortage = (ctypes.c_double * n_locations.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSupplyShortAtOrigin_Urb(
ctypes.byref(supply_type_id),
ctypes.byref(n_locations),
supply_location_list,
ctypes.byref(supply_conversion_factor),
urban_supply_shortage,
ctypes.byref(status),
)
return np.array(urban_supply_shortage)
def get_urban_diversion_supply_shortage_at_origin(
self, diversions="all", conversion_factor=1.0
):
"""
Return the supply shortage for urban diversions at the destination of those
supplies plus any conveyance losses
Parameters
----------
diversions : int, list, tuple, np.ndarray, or str='all', default='all'
indices of diversions where urban supply shortages are returned
conversion_factor : float, default=1.0
factor to convert urban supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of urban supply shortages for each diversion location
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_ag_diversion_supply_shortage_at_origin : Return the supply shortage for agricultural diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_well_supply_shortage_at_origin : Return the supply shortage for agricultural wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_elempump_supply_shortage_at_origin : Return the supply shortage for agricultural element pumping at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_well_supply_shortage_at_origin : Return the supply shortage for urban wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_elempump_supply_shortage_at_origin : Return the supply shortage for urban element pumping at the destination of those supplies plus any conveyance losses
"""
supply_type_id = self.get_supply_type_id_diversion()
# get all diversion IDs
diversion_ids = self.get_diversion_ids()
if isinstance(diversions, str):
if diversions.lower() == "all":
diversions = diversion_ids
else:
raise ValueError('if diversions is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(diversions, int):
diversions = np.array([diversions])
# if list or tuple convert to np.ndarray
if isinstance(diversions, (list, tuple)):
diversions = np.array(diversions)
# if diversions were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(diversions, np.ndarray):
raise TypeError(
'diversions must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided diversion IDs are valid
if not np.all(np.isin(diversions, diversion_ids)):
raise ValueError("One or more diversion IDs provided are invalid")
# convert diversion IDs to diversion indices
# add 1 to convert between python indices and fortran indices
diversion_indices = (
np.array([np.where(diversion_ids == item)[0][0] for item in diversions]) + 1
)
return self._get_supply_shortage_at_origin_urban(
supply_type_id, diversion_indices, conversion_factor
)
def get_urban_well_supply_shortage_at_origin(
self, wells="all", conversion_factor=1.0
):
"""
Return the supply shortage for urban wells at the destination of those
supplies plus any conveyance losses
Parameters
----------
wells : int, list, tuple, np.ndarray, or str='all', default='all'
indices of wells where urban supply shortages are returned
conversion_factor : float, default=1.0
factor to convert urban supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of urban supply shortages for each well location
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_ag_diversion_supply_shortage_at_origin : Return the supply shortage for agricultural diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_well_supply_shortage_at_origin : Return the supply shortage for agricultural wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_elempump_supply_shortage_at_origin : Return the supply shortage for agricultural element pumping at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_diversion_supply_shortage_at_origin : Return the supply shortage for urban diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_elempump_supply_shortage_at_origin : Return the supply shortage for urban element pumping at the destination of those supplies plus any conveyance losses
"""
supply_type_id = self.get_supply_type_id_well()
# get all well IDs
well_ids = self.get_well_ids()
if isinstance(wells, str):
if wells.lower() == "all":
wells = well_ids
else:
raise ValueError('if wells is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(wells, int):
wells = np.array([wells])
# if list or tuple convert to np.ndarray
if isinstance(wells, (list, tuple)):
wells = np.array(wells)
# if wells were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(wells, np.ndarray):
raise TypeError('wells must be an int, list, tuple, np.ndarray, or "all"')
# check if all of the provided well IDs are valid
if not np.all(np.isin(wells, well_ids)):
raise ValueError("One or more well IDs provided are invalid")
# convert well IDs to well indices
# add 1 to convert between python indices and fortran indices
well_indices = (
np.array([np.where(well_ids == item)[0][0] for item in wells]) + 1
)
return self._get_supply_shortage_at_origin_urban(
supply_type_id, well_indices, conversion_factor
)
def get_urban_elempump_supply_shortage_at_origin(
self, element_pumps="all", conversion_factor=1.0
):
"""
Return the supply shortage for urban element pumping
at the destination of those supplies plus any conveyance losses
Parameters
----------
element_pumps : int, list, tuple, np.ndarray, or str='all', default='all'
indices of element pumping locations where urban supply shortages are returned
conversion_factor : float, default=1.0
factor to convert urban supply shortage from model
units to the desired output units
Returns
-------
np.ndarray
array of urban supply shortages for each element pumping location
Note
----
This method is intended to be used during a model simulation (is_for_inquiry=0)
See Also
--------
IWFMModel.get_ag_diversion_supply_shortage_at_origin : Return the supply shortage for agricultural diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_ag_well_supply_shortage_at_origin : Return the supply shortage for agricultural wells at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_elempump_supply_shortage_at_origin : Return the supply shortage for agricultural element pumping at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_diversion_supply_shortage_at_origin : Return the supply shortage for urban diversions at the destination of those supplies plus any conveyance losses
IWFMModel.get_urban_well_supply_shortage_at_origin : Return the supply shortage for urban wells at the destination of those supplies plus any conveyance losses
"""
supply_type_id = self.get_supply_type_id_elempump()
# get all element pump IDs
element_pump_ids = self.get_element_pump_ids()
if isinstance(element_pumps, str):
if element_pumps.lower() == "all":
element_pumps = element_pump_ids
else:
raise ValueError('if element_pumps is a string, must be "all"')
# if int convert to np.ndarray
if isinstance(element_pumps, int):
element_pumps = np.array([element_pumps])
# if list or tuple convert to np.ndarray
if isinstance(element_pumps, (list, tuple)):
element_pumps = np.array(element_pumps)
# if element_pumps were provided as an int, list, or
# np.ndarray they should now all be np.ndarray, so check if np.ndarray
if not isinstance(element_pumps, np.ndarray):
raise TypeError(
'element_pumps must be an int, list, tuple, np.ndarray, or "all"'
)
# check if all of the provided element pump IDs are valid
if not np.all(np.isin(element_pumps, element_pump_ids)):
raise ValueError("One or more element pump IDs provided are invalid")
# convert element pump IDs to element pump indices
# add 1 to convert between python indices and fortran indices
element_pump_indices = (
np.array(
[np.where(element_pump_ids == item)[0][0] for item in element_pumps]
)
+ 1
)
return self._get_supply_shortage_at_origin_urban(
supply_type_id, element_pump_indices, conversion_factor
)
def _get_names(self, location_type_id):
"""
Return the available names for a given location_type
Parameters
----------
location_type_id : int
location type identifier used by IWFM to represent model
features
Returns
-------
list of strings
list containing names for the provided location_type_id. Returns
empty list if no names are available for given feature_type.
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNames"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNames")
)
# convert location type id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# get number of locations for specified location type
if location_type_id.value == 8:
# num_names = ctypes.c_int(self.get_n_nodes())
raise NotImplementedError("IWFM does not allow names for groundwater nodes")
elif location_type_id.value == 2:
# num_names = ctypes.c_int(self.get_n_elements())
raise NotImplementedError("IWFM does not allow names for elements")
elif location_type_id.value == 4:
num_names = ctypes.c_int(self.get_n_subregions())
elif location_type_id.value == 7:
# need to determine if API call exists for this
raise NotImplementedError(
"The IWFM Model Object does not include zone definitions"
)
elif location_type_id.value == 3:
# num_names = ctypes.c_int(self.get_n_lakes())
raise NotImplementedError("IWFM does not allow names for lakes")
elif location_type_id.value == 1:
# num_names = ctypes.c_int(self.get_n_stream_nodes())
raise NotImplementedError("IWFM does not allow names for stream nodes")
elif location_type_id.value == 11:
num_names = ctypes.c_int(self.get_n_stream_reaches())
elif location_type_id.value == 13:
# num_names = ctypes.c_int(self.get_n_tile_drains())
raise NotImplementedError("IWFM does not allow names for tile drains")
elif location_type_id.value == 14:
# self.get_n_small_watersheds()
raise NotImplementedError("IWFM does not allow names for small watersheds")
elif location_type_id.value in [9, 10, 12]:
num_names = ctypes.c_int(self._get_n_hydrographs(location_type_id.value))
# initialize output variables
delimiter_position_array = (ctypes.c_int * num_names.value)()
names_string_length = ctypes.c_int(30 * num_names.value)
raw_names_string = ctypes.create_string_buffer(names_string_length.value)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNames(
ctypes.byref(location_type_id),
ctypes.byref(num_names),
delimiter_position_array,
ctypes.byref(names_string_length),
raw_names_string,
ctypes.byref(status),
)
return self._string_to_list_by_array(
raw_names_string, delimiter_position_array, num_names
)
def get_subregion_names(self):
"""
Return the subregions names specified
in an IWFM model
Returns
-------
list
list of names for each subregion in the model
See Also
--------
IWFMModel.get_subregion_name : Return the name corresponding to the subregion_id in an IWFM model
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_subregion_ids : Return an array of IDs for subregions in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_subregion_names()
['Region1 (SR1)', 'Region2 (SR2)']
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_subregion()
return self._get_names(location_type_id)
def get_stream_reach_names(self):
"""
Return the stream reach names specified in an IWFM model
Returns
-------
list
list of names for each stream reach in the model
See Also
--------
IWFMModel.get_subregion_names : Return the subregion names specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_names : Return the groundwater hydrograph location names specified in an IWFM model
IWFMModel.get_stream_hydrograph_names : Return the stream flow hydrograph location names specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_names : Return the subsidence hydrograph location names specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_reach_names()
['Reach2', 'Reach1', 'Reach3']
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_streamreach()
return self._get_names(location_type_id)
def get_groundwater_hydrograph_names(self):
"""
Return the groundwater hydrograph location names
specified in an IWFM model
Returns
-------
list
list of names for each groundwater hydrograph location
See Also
--------
IWFMModel.get_subregion_names : Return the subregion names specified in an IWFM model
IWFMModel.get_stream_reach_names : Return the stream reach names specified in an IWFM model
IWFMModel.get_stream_hydrograph_names : Return the stream flow hydrograph location names specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_names : Return the subsidence hydrograph location names specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_groundwater_hydrograph_names()
['GWHyd1',
'GWHyd2',
'GWHyd3',
'GWHyd4',
'GWHyd5',
'GWHyd6',
'GWHyd7',
'GWHyd8',
'GWHyd9',
'GWHyd10',
'GWHyd11',
'GWHyd12',
'GWHyd13',
'GWHyd14',
'GWHyd15',
'GWHyd16',
'GWHyd17',
'GWHyd18',
'GWHyd19',
'GWHyd20',
'GWHyd21',
'GWHyd22',
'GWHyd23',
'GWHyd24',
'GWHyd25',
'GWHyd26',
'GWHyd27',
'GWHyd28',
'GWHyd29',
'GWHyd30',
'GWHyd31',
'GWHyd32',
'GWHyd33',
'GWHyd34',
'GWHyd35',
'GWHyd36',
'GWHyd37',
'GWHyd38',
'GWHyd39',
'GWHyd40',
'GWHyd41',
'GWHyd42']
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_gwheadobs()
return self._get_names(location_type_id)
def get_stream_hydrograph_names(self):
"""
Return the stream flow hydrograph location names specified
in an IWFM model
Returns
-------
list
list of names for each stream hydrograph location
See Also
--------
IWFMModel.get_subregion_names : Return the subregion names specified in an IWFM model
IWFMModel.get_stream_reach_names : Return the stream reach names specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_names : Return the groundwater hydrograph location names specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_names : Return the subsidence hydrograph location names specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_hydrograph_names()
['StrmHyd_1',
'StrmHyd_2',
'StrmHyd_3',
'StrmHyd_4',
'StrmHyd_5',
'StrmHyd_6',
'StrmHyd_7',
'StrmHyd_8',
'StrmHyd_9',
'StrmHyd_10',
'StrmHyd_11',
'StrmHyd_12',
'StrmHyd_13',
'StrmHyd_14',
'StrmHyd_15',
'StrmHyd_16',
'StrmHyd_17',
'StrmHyd_18',
'StrmHyd_19',
'StrmHyd_20',
'StrmHyd_21',
'StrmHyd_22',
'StrmHyd_23']
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_streamhydobs()
return self._get_names(location_type_id)
def get_subsidence_hydrograph_names(self):
"""
Return the subsidence hydrograph location names specified
in an IWFM model
Returns
-------
list
list of names for each subsidence hydrograph locations
See Also
--------
IWFMModel.get_subregion_names : Return the subregion names specified in an IWFM model
IWFMModel.get_stream_reach_names : Return the stream reach names specified in an IWFM model
IWFMModel.get_stream_hydrograph_names : Return the stream flow hydrograph location names specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_names : Return the groundwater hydrograph location names specified in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_subsidence_hydrograph_names()
['SubsHyd1', 'SubsHyd2', 'SubsHyd3', 'SubsHyd4', 'SubsHyd5']
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_subsidenceobs()
return self._get_names(location_type_id)
def get_n_hydrograph_types(self):
"""
Return the number of different hydrograph types being
printed by the IWFM model
Returns
-------
int
number of hydrograph types produced by the model
See Also
--------
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_hydrograph_types()
5
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNHydrographTypes"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNHydrographTypes")
)
# initialize output variables
n_hydrograph_types = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNHydrographTypes(
ctypes.byref(n_hydrograph_types), ctypes.byref(status)
)
return n_hydrograph_types.value
def get_hydrograph_type_list(self):
"""
Return a list of different hydrograph types being printed
by the IWFM model
Returns
-------
dict
keys are different hydrograph types printed by the IWFM model
values are corresponding hydrograph type ids
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_hydrograph_type_list()
{'Groundwater hydrograph': 9,
'Groundwater hydrograph at node and layer': 8,
'Subsidence hydrograph': 10,
'Tile drain hydrograph': 13,
'Stream hydrograph (flow)': 12}
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetHydrographTypeList"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetHydrographTypeList"
)
)
# get number of hydrograph types
n_hydrograph_types = ctypes.c_int(self.get_n_hydrograph_types())
# set length of hydrograph type list string
length_hydrograph_type_list = ctypes.c_int(3000)
# initialize output variables
raw_hydrograph_type_string = ctypes.create_string_buffer(
length_hydrograph_type_list.value
)
delimiter_position_array = (ctypes.c_int * n_hydrograph_types.value)()
hydrograph_location_type_list = (ctypes.c_int * n_hydrograph_types.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetHydrographTypeList(
ctypes.byref(n_hydrograph_types),
delimiter_position_array,
ctypes.byref(length_hydrograph_type_list),
raw_hydrograph_type_string,
hydrograph_location_type_list,
ctypes.byref(status),
)
hydrograph_type_list = self._string_to_list_by_array(
raw_hydrograph_type_string, delimiter_position_array, n_hydrograph_types
)
return dict(zip(hydrograph_type_list, np.array(hydrograph_location_type_list)))
def _get_n_hydrographs(self, location_type_id):
"""
private method returning the number of hydrographs for a given IWFM feature type
Parameters
----------
location_type_id : int
integer id used internally to IWFM for location types
Returns
-------
int
number of hydrographs for the provided feature type
Notes
-----
this method only works with location_type_ids
- 9 (groundwater hydrographs)
- 10 (subsidence hydrographs)
- 12 (stream hydrographs)
- 13 (tile drains)
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNHydrographs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetNHydrographs"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# initialize output variables
n_hydrographs = ctypes.c_int(0)
# convert location_type_id to ctypes
location_type_id = ctypes.c_int(location_type_id)
self.dll.IW_Model_GetNHydrographs(
ctypes.byref(location_type_id),
ctypes.byref(n_hydrographs),
ctypes.byref(status),
)
return n_hydrographs.value
def get_n_groundwater_hydrographs(self):
"""
Return the number of groundwater hydrographs specified in
an IWFM model
Returns
-------
int
number of groundwater hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_groundwater_hydrographs()
42
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_gwheadobs()
return self._get_n_hydrographs(location_type_id)
def get_n_subsidence_hydrographs(self):
"""
Return the number of subsidence hydrographs specified in
an IWFM model
Returns
-------
int
number of subsidence hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_subsidence_hydrographs()
5
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_subsidenceobs()
return self._get_n_hydrographs(location_type_id)
def get_n_stream_hydrographs(self):
"""
Return the number of stream flow hydrographs specified in
an IWFM model
Returns
-------
int
number of stream hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_stream_hydrographs()
23
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_streamhydobs()
return self._get_n_hydrographs(location_type_id)
def get_n_tile_drain_hydrographs(self):
"""
Return the number of tile drain hydrographs specified in
an IWFM model
Returns
-------
int
number of tile drain hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_n_tile_drain_hydrographs()
6
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_tiledrainobs()
return self._get_n_hydrographs(location_type_id)
def _get_hydrograph_ids(self, location_type_id):
"""
private method returning the ids of the hydrographs for a
provided location type
Parameters
----------
location_type_id : int
integer id used internally to IWFM for location types
Returns
-------
np.array
integer array containing ids for hydrographs of the given location type
Notes
-----
this method only works with location_type_ids
- 9 (groundwater hydrographs)
- 10 (subsidence hydrographs)
- 12 (stream hydrographs)
- 13 (tile drains)
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetHydrographIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetHydrographIDs"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# convert location_type_id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# get number of hydrographs
num_hydrographs = ctypes.c_int(self._get_n_hydrographs(location_type_id.value))
if num_hydrographs.value != 0:
# initialize output variables
hydrograph_ids = (ctypes.c_int * num_hydrographs.value)()
self.dll.IW_Model_GetHydrographIDs(
ctypes.byref(location_type_id),
ctypes.byref(num_hydrographs),
hydrograph_ids,
ctypes.byref(status),
)
return np.array(hydrograph_ids)
def get_groundwater_hydrograph_ids(self):
"""
Return the ids for the groundwater hydrographs specified
in an IWFM model
Returns
-------
np.ndarray
integer array of ids for groundwater hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_groundwater_hydrograph_ids()
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42])
>>> model.kill()
>>> model.close_log_file()
"""
# get the location type id for groundwater head observations
location_type_id = self.get_location_type_id_gwheadobs()
return self._get_hydrograph_ids(location_type_id)
def get_subsidence_hydrograph_ids(self):
"""
Return the ids for the subsidence hydrographs specified
in an IWFM model
Returns
-------
np.ndarray
integer array of ids for subsidence hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_subsidence_hydrograph_ids()
array([1, 2, 3, 4, 5])
>>> model.kill()
>>> model.close_log_file()
"""
# get the location type id for groundwater head observations
location_type_id = self.get_location_type_id_subsidenceobs()
return self._get_hydrograph_ids(location_type_id)
def get_stream_hydrograph_ids(self):
"""
Return the ids for the stream hydrographs specified
in an IWFM model
Returns
-------
np.ndarray
integer array of ids for stream hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_hydrograph_ids()
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23])
>>> model.kill()
>>> model.close_log_file()
"""
# get the location type id for stream flow observations
location_type_id = self.get_location_type_id_streamhydobs()
return self._get_hydrograph_ids(location_type_id)
def get_tile_drain_hydrograph_ids(self):
"""
Return the ids for the tile drain hydrographs specified
in an IWFM model
Returns
-------
np.ndarray
integer array of ids for tile drain hydrographs
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_tile_drain_hydrograph_ids()
array([ 1, 4, 7, 10, 13, 16])
>>> model.kill()
>>> model.close_log_file()
"""
# get the location type id for tile drain observations
location_type_id = self.get_location_type_id_tiledrainobs()
return self._get_hydrograph_ids(location_type_id)
def _get_hydrograph_coordinates(self, location_type_id):
"""
private method returning the hydrograph coordinates for a provided feature type
Parameters
----------
location_type_id : int
integer id used internally to IWFM for location types
Returns
-------
tuple : length 2
index 0: np.array of x-coordinates of hydrographs
index 1: np.array of y-coordinates of hydrographs
Notes
-----
this method only works with location_type_ids
- 9 (groundwater hydrographs)
- 10 (subsidence hydrographs)
- 12 (stream hydrographs)
- 13 (tile drains)
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetHydrographCoordinates"):
raise AttributeError(
'IWFM API does not have "{}" procedure. Check for an updated version'.format(
"IW_Model_GetHydrographCoordinates"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
# convert location_type_id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# get number of hydrographs
num_hydrographs = ctypes.c_int(self._get_n_hydrographs(location_type_id.value))
if num_hydrographs.value != 0:
# initialize output variables
x = (ctypes.c_double * num_hydrographs.value)()
y = (ctypes.c_double * num_hydrographs.value)()
self.dll.IW_Model_GetHydrographCoordinates(
ctypes.byref(location_type_id),
ctypes.byref(num_hydrographs),
x,
y,
ctypes.byref(status),
)
return np.array(x), np.array(y)
def get_groundwater_hydrograph_coordinates(self):
"""
Return the x,y-coordinates for the groundwater hydrographs
specified in an IWFM model
Returns
-------
tuple
np.ndarray of x-coordinates
np.ndarray of y-coordinates
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> x, y = model.get_groundwater_hydrograph_coordinates()
>>> x
array([1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2])
>>> y
array([14566752. , 14560190.4, 14553628.8, 14547067.2, 14540505.6,
14533944. , 14527382.4, 14520820.8, 14514259.2, 14507697.6,
14501136. , 14494574.4, 14488012.8, 14481451.2, 14474889.6,
14468328. , 14461766.4, 14455204.8, 14448643.2, 14442081.6,
14435520. , 14566752. , 14560190.4, 14553628.8, 14547067.2,
14540505.6, 14533944. , 14527382.4, 14520820.8, 14514259.2,
14507697.6, 14501136. , 14494574.4, 14488012.8, 14481451.2,
14474889.6, 14468328. , 14461766.4, 14455204.8, 14448643.2,
14442081.6, 14435520. ])
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_gwheadobs()
return self._get_hydrograph_coordinates(location_type_id)
def get_subsidence_hydrograph_coordinates(self):
"""
Return the x,y-coordinates for the subsidence hydrograph
locations specified in an IWFM model
Returns
-------
tuple
np.ndarray of x-coordinates
np.ndarray of y-coordinates
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> x, y = model.get_subsidence_hydrograph_coordinates()
>>> x
array([1935672. , 1804440. , 1811001.6, 1817563.2, 1863494.4])
>>> y
array([14481451.2, 14488012.8, 14488012.8, 14488012.8, 14488012.8])
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_subsidenceobs()
return self._get_hydrograph_coordinates(location_type_id)
def get_stream_hydrograph_coordinates(self):
"""
Return the x,y-coordinates for the stream flow observation
locations specified in an IWFM model
Returns
-------
tuple
np.ndarray of x-coordinates
np.ndarray of y-coordinates
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> x, y = model.get_subsidence_hydrograph_coordinates()
>>> x
array([1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1876617.6, 1876617.6, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2,
1883179.2, 1883179.2, 1883179.2, 1883179.2, 1883179.2])
>>> y
array([14566752. , 14560190.4, 14553628.8, 14547067.2, 14540505.6,
14533944. , 14527382.4, 14520820.8, 14514259.2, 14514259.2,
14501136. , 14501136. , 14494574.4, 14488012.8, 14481451.2,
14474889.6, 14474889.6, 14468328. , 14461766.4, 14455204.8,
14448643.2, 14442081.6, 14435520. ])
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_streamhydobs()
return self._get_hydrograph_coordinates(location_type_id)
def get_tile_drain_hydrograph_coordinates(self):
"""
Return the x,y-coordinates for the tile drain observations
specified in an IWFM model
Returns
-------
tuple
np.ndarray of x-coordinates
np.ndarray of y-coordinates
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> x, y = model.get_tile_drain_hydrograph_coordinates()
>>> x
array([1837248., 1837248., 1837248., 1837248., 1837248., 1837248.])
>>> y
array([14435520. , 14455204.8, 14474889.6, 14494574.4, 14514259.2,
14533944. ])
>>> model.kill()
>>> model.close_log_file()
"""
location_type_id = self.get_location_type_id_tiledrainobs()
return self._get_hydrograph_coordinates(location_type_id)
def _get_hydrograph(
self,
hydrograph_type,
hydrograph_index,
layer_number,
begin_date,
end_date,
length_conversion_factor,
volume_conversion_factor,
):
"""
private method returning a simulated hydrograph for a selected hydrograph type and hydrograph index
Parameters
----------
hydrograph_type : int
one of the available hydrograph types for the model retrieved using
get_hydrograph_type_list method
hydrograph_index : int
index for hydrograph being retrieved
layer_number : int
layer number for returning hydrograph. only used for groundwater hydrograph
at node and layer
begin_date : str
IWFM-style date for the beginning date of the simulated groundwater heads
end_date : str
IWFM-style date for the end date of the simulated groundwater heads
length_conversion_factor : float, int
hydrographs with units of length are multiplied by this
value to convert simulation units to desired output units
volume_conversion_factor : float, int
hydrographs with units of volume are multiplied by this
value to convert simulation units to desired output units
Returns
-------
np.arrays
1-D array of dates
1-D array of hydrograph values
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetHydrograph"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetHydrograph")
)
# check that layer_number is an integer
if not isinstance(layer_number, int):
raise TypeError(
"layer_number must be an integer, "
"value {} provided is of type {}".format(
layer_number, type(layer_number)
)
)
# check layer number is valid
n_layers = self.get_n_layers()
if layer_number not in range(1, n_layers + 1):
raise ValueError(
"Layer Number provided {} is not valid. "
"Model only has {} layers".format(layer_number, n_layers)
)
# handle start and end dates
# get time specs
dates_list, output_interval = self.get_time_specs()
if begin_date is None:
begin_date = dates_list[0]
else:
self._validate_iwfm_date(begin_date)
if begin_date not in dates_list:
raise ValueError(
"begin_date was not recognized as a model time step. use IWFMModel.get_time_specs() method to check."
)
if end_date is None:
end_date = dates_list[-1]
else:
self._validate_iwfm_date(end_date)
if end_date not in dates_list:
raise ValueError(
"end_date was not found in the Simulation file. use IWFMModel.get_time_specs() method to check."
)
if self.is_date_greater(begin_date, end_date):
raise ValueError("end_date must occur after begin_date")
# check that length conversion factor is a number
if not isinstance(length_conversion_factor, (int, float)):
raise TypeError(
"length_conversion_factor must be a number. "
"value {} provides is of type {}".format(
length_conversion_factor, type(length_conversion_factor)
)
)
# check that volume conversion factor is a number
if not isinstance(volume_conversion_factor, (int, float)):
raise TypeError(
"volume_conversion_factor must be a number. "
"value {} provides is of type {}".format(
volume_conversion_factor, type(volume_conversion_factor)
)
)
# convert hydrograph type to ctypes
hydrograph_type = ctypes.c_int(hydrograph_type)
# convert hydrograph_id to ctypes
hydrograph_index = ctypes.c_int(hydrograph_index)
# convert layer number to ctypes
layer_number = ctypes.c_int(layer_number)
# get number of time intervals
num_time_intervals = ctypes.c_int(
self.get_n_intervals(begin_date, end_date, output_interval)
)
# convert output interval to ctypes
output_interval = ctypes.create_string_buffer(output_interval.encode("utf-8"))
# get length of time interval
length_time_interval = ctypes.c_int(ctypes.sizeof(output_interval))
# convert dates to ctypes
begin_date = ctypes.create_string_buffer(begin_date.encode("utf-8"))
end_date = ctypes.create_string_buffer(end_date.encode("utf-8"))
# get length of begin_date and end_date strings
length_date_string = ctypes.c_int(ctypes.sizeof(begin_date))
# convert length_conversion_factor to ctypes
length_conversion_factor = ctypes.c_double(length_conversion_factor)
# convert volume_conversion_factor to ctypes
volume_conversion_factor = ctypes.c_double(volume_conversion_factor)
# initialize output variables
output_dates = (ctypes.c_double * num_time_intervals.value)()
output_hydrograph = (ctypes.c_double * num_time_intervals.value)()
data_unit_type_id = ctypes.c_int(0)
num_time_steps = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetHydrograph(
ctypes.byref(hydrograph_type),
ctypes.byref(hydrograph_index),
ctypes.byref(layer_number),
ctypes.byref(length_date_string),
begin_date,
end_date,
ctypes.byref(length_time_interval),
output_interval,
ctypes.byref(length_conversion_factor),
ctypes.byref(volume_conversion_factor),
ctypes.byref(num_time_intervals),
output_dates,
output_hydrograph,
ctypes.byref(data_unit_type_id),
ctypes.byref(num_time_steps),
ctypes.byref(status),
)
return np.array("1899-12-30", dtype="datetime64") + np.array(
output_dates, dtype="timedelta64[D]"
), np.array(output_hydrograph)
def get_groundwater_hydrograph(
self,
groundwater_hydrograph_id,
begin_date=None,
end_date=None,
length_conversion_factor=1.0,
volume_conversion_factor=1.0,
):
"""
Return the simulated groundwater hydrograph for the
provided groundwater hydrograph ID
Parameters
----------
groundwater_hydrograph_id : int
ID for hydrograph being retrieved
begin_date : str or None, default=None
IWFM-style date for the beginning date of the simulated groundwater heads
end_date : str or None, default=None
IWFM-style date for the end date of the simulated groundwater heads
length_conversion_factor : float, int, default=1.0
hydrographs with units of length are multiplied by this
value to convert simulation units to desired output units
volume_conversion_factor : float, int, default=1.0
hydrographs with units of volume are multiplied by this
value to convert simulation units to desired output units
e.g. use 2.29568E-8 for ft^3 --> TAF
Returns
-------
np.arrays
1-D array of dates
1-D array of hydrograph values
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> dates, values = model.get_groundwater_hydrograph(1)
>>> dates
array(['1990-10-01', '1990-10-02', '1990-10-03', ..., '2000-09-28',
'2000-09-29', '2000-09-30'], dtype='datetime64[D]')
>>> values
array([ 1.9855, 3.9691, 5.9509, ..., 302.0719, 302.0719, 302.072 ])
>>> model.kill()
>>> model.close_log_file()
"""
hydrograph_type = self.get_location_type_id_gwheadobs()
# check that groundwater_hydrograph_id is an integer
if not isinstance(groundwater_hydrograph_id, int):
raise TypeError("groundwater_hydrograph_id must be an int")
# get possible groundwater hydrograph IDs
groundwater_hydrograph_ids = self.get_groundwater_hydrograph_ids()
# check to see if the groundwater_hydrograph_id provided is a valid groundwater hydrograph ID
if not np.any(groundwater_hydrograph_ids == groundwater_hydrograph_id):
raise ValueError("groundwater_hydrograph_id specified is not valid")
# convert groundwater_hydrograph_id to groundwater hydrograph index
# add 1 to index to convert from python index to fortran index
groundwater_hydrograph_index = (
np.where(groundwater_hydrograph_ids == groundwater_hydrograph_id)[0][0] + 1
)
# layer_number only applies to groundwater hydrographs at node and layer
# so hardcoded to layer 1 for _get_hydrograph method,
layer_number = 1
return self._get_hydrograph(
hydrograph_type,
groundwater_hydrograph_index,
layer_number,
begin_date,
end_date,
length_conversion_factor,
volume_conversion_factor,
)
def get_groundwater_hydrograph_at_node_and_layer(
self,
node_id,
layer_number,
begin_date=None,
end_date=None,
length_conversion_factor=1.0,
volume_conversion_factor=1.0,
):
"""
Return a simulated groundwater hydrograph for a node and layer
Parameters
----------
node_id : int
id for node where hydrograph being retrieved
layer_number : int
layer number for returning hydrograph. only used for groundwater hydrograph
at node and layer
begin_date : str or None, default=None
IWFM-style date for the beginning date of the simulated groundwater heads
end_date : str or None, default=None
IWFM-style date for the end date of the simulated groundwater heads
length_conversion_factor : float or int, default=1.0
hydrographs with units of length are multiplied by this
value to convert simulation units to desired output units
volume_conversion_factor : float or int, default=1.0
hydrographs with units of volume are multiplied by this
value to convert simulation units to desired output units
e.g. use 2.29568E-8 for ft^3 --> TAF
Returns
-------
np.arrays
1-D array of dates
1-D array of hydrograph values
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph id
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> dates, values = model.get_groundwater_hydrograph_at_node_and_layer(25, 1)
>>> dates
array(['1990-10-01', '1990-10-02', '1990-10-03', ..., '2000-09-28',
'2000-09-29', '2000-09-30'], dtype='datetime64[D]')
>>> values
array([ 0. , 0. , 0. , ..., 180.9377, 181.0441, 181.1501])
>>> model.kill()
>>> model.close_log_file()
"""
hydrograph_type = self.get_location_type_id_node()
# check that node_id is an integer
if not isinstance(node_id, int):
raise TypeError("node_id must be an int")
# get possible node IDs
node_ids = self.get_node_ids()
# check to see if the node_id provided is a valid node ID
if not np.any(node_ids == node_id):
raise ValueError("groundwater_hydrograph_id specified is not valid")
# convert node_id to node index
# add 1 to index to convert from python index to fortran index
node_index = np.where(node_ids == node_id)[0][0] + 1
return self._get_hydrograph(
hydrograph_type,
node_index,
layer_number,
begin_date,
end_date,
length_conversion_factor,
volume_conversion_factor,
)
def get_subsidence_hydrograph(
self,
subsidence_hydrograph_id,
begin_date=None,
end_date=None,
length_conversion_factor=1.0,
volume_conversion_factor=1.0,
):
"""
Return the simulated subsidence hydrograph for the
provided subsidence hydrograph ID
Parameters
----------
subsidence_hydrograph_id : int
ID for subsidence hydrograph location being retrieved
begin_date : str or None, default=None
IWFM-style date for the beginning date of the simulated subsidence
end_date : str or None, default=None
IWFM-style date for the end date of the simulated subsidence
length_conversion_factor : float, int, default=1.0
hydrographs with units of length are multiplied by this
value to convert simulation units to desired output units
volume_conversion_factor : float, int, default=1.0
hydrographs with units of volume are multiplied by this
value to convert simulation units to desired output units
e.g. use 2.29568E-8 for ft^3 --> TAF
Returns
-------
np.arrays
1-D array of dates
1-D array of hydrograph values
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph id
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_stream_hydrograph : Return the simulated stream hydrograph for the provided stream hydrograph id
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph id
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> dates, values = model.get_subsidence_hydrograph(1)
>>> dates
array(['1990-10-01', '1990-10-02', '1990-10-03', ..., '2000-09-28',
'2000-09-29', '2000-09-30'], dtype='datetime64[D]')
>>> values
array([-0.0152, -0.0153, -0.0153, ..., -0.0189, -0.0189, -0.0189])
>>> model.kill()
>>> model.close_log_file()
"""
hydrograph_type = self.get_location_type_id_subsidenceobs()
# check that subsidence_hydrograph_id is an integer
if not isinstance(subsidence_hydrograph_id, int):
raise TypeError("subsidence_hydrograph_id must be an int")
# get possible subsidence hydrograph IDs
subsidence_hydrograph_ids = self.get_subsidence_hydrograph_ids()
# check to see if the subsidence_hydrograph_id provided is a valid subsidence hydrograph ID
if not np.any(subsidence_hydrograph_ids == subsidence_hydrograph_id):
raise ValueError("subsidence_hydrograph_id specified is not valid")
# convert subsidence_hydrograph_id to subsidence hydrograph index
# add 1 to index to convert from python index to fortran index
subsidence_hydrograph_index = (
np.where(subsidence_hydrograph_ids == subsidence_hydrograph_id)[0][0] + 1
)
# layer_number only applies to groundwater hydrographs at node and layer
# so hardcoded to layer 1 for _get_hydrograph method
layer_number = 1
return self._get_hydrograph(
hydrograph_type,
subsidence_hydrograph_index,
layer_number,
begin_date,
end_date,
length_conversion_factor,
volume_conversion_factor,
)
def get_stream_hydrograph(
self,
stream_hydrograph_id,
begin_date=None,
end_date=None,
length_conversion_factor=1.0,
volume_conversion_factor=1.0,
):
"""
Return the simulated stream hydrograph for the
provided stream hydrograph id
Parameters
----------
stream_hydrograph_id : int
ID for stream hydrograph location being retrieved
begin_date : str or None, default=None
IWFM-style date for the beginning date of the simulated stream flows
end_date : str or None, default=None
IWFM-style date for the end date of the simulated stream flows
length_conversion_factor : float, int, default=1.0
hydrographs with units of length are multiplied by this
value to convert simulation units to desired output units
volume_conversion_factor : float, int, default=1.0
hydrographs with units of volume are multiplied by this
value to convert simulation units to desired output units
e.g. use 2.29568E-8 for ft^3 --> TAF
Returns
-------
np.arrays
1-D array of dates
1-D array of hydrograph values
See Also
--------
IWFMModel.get_n_hydrograph_types : Return the number of different hydrograph types being printed by the IWFM model
IWFMModel.get_hydrograph_type_list : Return a list of different hydrograph types being printed by the IWFM model
IWFMModel.get_n_groundwater_hydrographs : Return the number of groundwater hydrographs specified in an IWFM model
IWFMModel.get_n_subsidence_hydrographs : Return the number of subsidence hydrographs specified in an IWFM model
IWFMModel.get_n_stream_hydrographs : Return the number of stream flow hydrographs specified in an IWFM model
IWFMModel.get_n_tile_drain_hydrographs : Return the number of tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_ids : Return the IDs for the subsidence hydrographs specified in an IWFM model
IWFMModel.get_stream_hydrograph_ids : Return the IDs for the stream hydrographs specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_ids : Return the IDs for the tile drain hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_subsidence_hydrograph_coordinates : Return the x,y-coordinates for the subsidence hydrograph locations specified in an IWFM model
IWFMModel.get_stream_hydrograph_coordinates : Return the x,y-coordinates for the stream flow observation locations specified in an IWFM model
IWFMModel.get_tile_drain_hydrograph_coordinates : Return the x,y-coordinates for the tile drain observations specified in an IWFM model
IWFMModel.get_groundwater_hydrograph : Return the simulated groundwater hydrograph for the provided groundwater hydrograph ID
IWFMModel.get_groundwater_hydrograph_at_node_and_layer : Return a simulated groundwater hydrograph for a node and layer
IWFMModel.get_subsidence_hydrograph : Return the simulated subsidence hydrograph for the provided subsidence hydrograph ID
IWFMModel.get_tile_drain_hydrograph : Return the simulated tile drain hydrograph for the provided tile drain hydrograph ID
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> dates, values = model.get_stream_hydrograph(1)
>>> dates
array(['1990-10-01', '1990-10-02', '1990-10-03', ..., '2000-09-28',
'2000-09-29', '2000-09-30'], dtype='datetime64[D]')
>>> values
array([75741603.86810122, 75741603.86810122, 75741603.86810122, ...,
85301215.31559001, 85301215.31559001, 85301215.31559001])
>>> model.kill()
>>> model.close_log_file()
"""
hydrograph_type = self.get_location_type_id_streamhydobs()
# check that stream_hydrograph_id is an integer
if not isinstance(stream_hydrograph_id, int):
raise TypeError("stream_hydrograph_id must be an int")
# get possible stream hydrograph IDs
stream_hydrograph_ids = self.get_stream_hydrograph_ids()
# check to see if the stream_hydrograph_id provided is a valid subsidence hydrograph ID
if not np.any(stream_hydrograph_ids == stream_hydrograph_id):
raise ValueError("stream_hydrograph_id specified is not valid")
# convert stream_hydrograph_id to subsidence hydrograph index
# add 1 to index to convert from python index to fortran index
stream_hydrograph_index = (
np.where(stream_hydrograph_ids == stream_hydrograph_id)[0][0] + 1
)
# layer_number only applies to groundwater hydrographs at node and layer
# so hardcoded to layer 1 for _get_hydrograph method
layer_number = 1
return self._get_hydrograph(
hydrograph_type,
stream_hydrograph_index,
layer_number,
begin_date,
end_date,
length_conversion_factor,
volume_conversion_factor,
)
def get_gwheads_foralayer(
self, layer_number, begin_date=None, end_date=None, length_conversion_factor=1.0
):
"""
Return the simulated groundwater heads for a single user-specified model layer for
every model node over a user-specified time interval.
Parameters
----------
layer_number : int
layer number for a layer in the model
begin_date : str, default=None
IWFM-style date for the beginning date of the simulated groundwater heads
end_date : str, default=None
IWFM-style date for the end date of the simulated groundwater heads
length_conversion_factor : float, int, default=1.0
simulated heads are multiplied by this value to convert simulation units to desired output units
Returns
-------
np.arrays
1-D array of dates
2-D array of heads for all nodes for each date
Note
----
the interval between the begin_date and the end_date is determined from the model time interval
using get_time_specs()
See Also
--------
IWFMModel.get_gwheads_all : Return the groundwater heads at all nodes in every aquifer layer for the current simulation time step
Example
-------
>>> model = IWFMModel(dll, preprocessor_file, simulation_file)
>>> dates, heads = model.get_gwheadsall_foralayer(1, '09/30/1980_24:00', '09/30/2000_24:00')
>>> dates
['09/30/1980',
'10/31/1980',
'11/30/1980',
'12/31/1980',
.
.
.
'09/30/2000']
>>> heads
[[458.57, 460.32, 457.86, ..., 686.42],
[459.86, 462.38, 459.11, ..., 689.05],
.
.
.
[435.75, 439.23, 440.99, ..., 650.78]]
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetGWHeads_ForALayer"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetGWHeads_ForALayer")
)
# check that layer_number is an integer
if not isinstance(layer_number, int):
raise TypeError(
"layer_number must be an integer, "
"value {} provided is of type {}".format(
layer_number, type(layer_number)
)
)
# check if layer_number provided is a valid layer
n_layers = self.get_n_layers()
layers = np.arange(1, n_layers + 1)
if not np.any(layers == layer_number):
raise ValueError("layer_number entered is invalid")
# convert specified layer number to ctypes
layer_number = ctypes.c_int(layer_number)
# handle start and end dates
# get time specs
dates_list, output_interval = self.get_time_specs()
if begin_date is None:
begin_date = dates_list[0]
else:
self._validate_iwfm_date(begin_date)
if begin_date not in dates_list:
raise ValueError(
"begin_date was not recognized as a model time step. use IWFMModel.get_time_specs() method to check."
)
if end_date is None:
end_date = dates_list[-1]
else:
self._validate_iwfm_date(end_date)
if end_date not in dates_list:
raise ValueError(
"end_date was not found in the Budget file. use IWFMModel.get_time_specs() method to check."
)
if self.is_date_greater(begin_date, end_date):
raise ValueError("end_date must occur after begin_date")
# check that length conversion factor is a number
if not isinstance(length_conversion_factor, (int, float)):
raise TypeError(
"length_conversion_factor must be a number. value {} provides is of type {}".format(
length_conversion_factor, type(length_conversion_factor)
)
)
# get number of time intervals between dates
num_time_intervals = ctypes.c_int(
self.get_n_intervals(begin_date, end_date, output_interval)
)
# convert dates to ctypes
begin_date = ctypes.create_string_buffer(begin_date.encode("utf-8"))
end_date = ctypes.create_string_buffer(end_date.encode("utf-8"))
# get length of begin_date and end_date strings
length_date_string = ctypes.c_int(ctypes.sizeof(begin_date))
# convert length_conversion_factor to ctypes
length_conversion_factor = ctypes.c_double(length_conversion_factor)
# get number of model nodes
num_nodes = ctypes.c_int(self.get_n_nodes())
# initialize output variables
output_dates = (ctypes.c_double * num_time_intervals.value)()
output_gwheads = (
(ctypes.c_double * num_nodes.value) * num_time_intervals.value
)()
# set instance variable status to 0
status = ctypes.c_int(0)
# call DLL procedure
self.dll.IW_Model_GetGWHeads_ForALayer(
ctypes.byref(layer_number),
begin_date,
end_date,
ctypes.byref(length_date_string),
ctypes.byref(length_conversion_factor),
ctypes.byref(num_nodes),
ctypes.byref(num_time_intervals),
output_dates,
output_gwheads,
ctypes.byref(status),
)
return np.array("1899-12-30", dtype="datetime64") + np.array(
output_dates, dtype="timedelta64[D]"
), np.array(output_gwheads)
def get_gwheads_all(self, end_of_timestep=True, head_conversion_factor=1.0):
"""
Return the groundwater heads at all nodes in every aquifer
layer for the current simulation time step
Parameters
----------
end_of_timestep : bool, default=True
flag to specify if the groundwater heads are returned for
the beginning of the timestep or end of the time step
head_conversion_factor : float, default=1.0
factor to convert groundwater heads from simulation unit
of length to a desired unit of length
Returns
-------
np.ndarray
2-D array of heads (n_nodes x n_layers)
Note
----
This method is designed for use when is_for_inquiry=0 to return
the simulated groundwater heads after one time step is simulated
i.e. after calling simulate_for_one_time_step method
See Also
--------
IWFMModel.get_gwheads_foralayer : Return the simulated groundwater heads for a single user-specified model layer for every model node over a user-specified time interval
IWFMModel.get_subsidence_all : Return the simulated subsidence at all nodes in every aquifer layer for the current simulation time step
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # get groundwater heads
... heads = model.get_gwheads_all()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
>>> print(heads)
[[290. 270.91608535 227.00867495 167.74555991 105.91475847
59.04810571 39.79054585 50.65187809 91.00726903 153.29920797
226.15115974 271.19315214 260.2607369 303.35660843 311.31169633
305.90957475 321.35463253 355.7188358 384.48837442 386.49402002
...
18.12576488 8.93821192 17.6193171 49.84626859 106.55261355
173.83027192 241.06147185 302.07195334 242.38004499 182.36353339
135.25658569 113.92664973 148.55304883 213.27613546 283.62446262
350. ]
[289.86590389 270.87158528 227.06977264 167.88058171 106.13198455
59.32285765 40.07646505 50.94091062 91.26247611 153.4724245
226.06276796 270.90750721 260.56535206 303.12285555 311.18101766
305.9841988 321.44444412 355.67939387 384.33129245 386.27373876
...
173.79083667 240.64972466 298.0367555 242.18629887 182.68916297
136.05482407 115.70455947 149.58589408 213.48004259 283.3592372
345.65879897]]
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetGWHeads_All"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetGWHeads_All")
)
if end_of_timestep:
previous = ctypes.c_int(0)
else:
previous = ctypes.c_int(1)
# convert head_conversion_factor to ctypes equivalent
head_conversion_factor = ctypes.c_double(head_conversion_factor)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
heads = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetGWHeads_All(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
ctypes.byref(previous),
ctypes.byref(head_conversion_factor),
heads,
ctypes.byref(status),
)
return np.array(heads)
def get_subsidence_all(self, subsidence_conversion_factor=1.0):
"""
Return the simulated subsidence at all nodes in every aquifer
layer for the current simulation time step
Parameters
----------
subsidence_conversion_factor : float, default=1.0
factor to convert subsidence from simulation unit
of length to a desired unit of length
Returns
-------
np.ndarray
2-D array of subsidence at each node and layer (n_nodes x n_layers)
Note
----
This method is designed for use when is_for_inquiry=0 to return
the simulated subsidence after one time step is simulated
i.e. after calling simulate_for_one_time_step method
See Also
--------
IWFMModel.get_gwheads_all : Return the groundwater heads at all nodes in every aquifer layer for the current simulation time step
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # get subsidence
... subsidence = model.get_subsidence_all()
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
>>> print(subsidence)
[[-0.00000000e+00 -1.12421873e-06 -1.73373541e-06 -1.63445271e-06
-1.04725462e-06 -4.92948676e-07 -2.86274019e-07 -4.11426842e-07
-9.21177410e-07 -1.62634163e-06 -1.59144202e-06 -1.22135411e-07
3.85916107e-09 -1.56677111e-06 -5.15424348e-06 -8.17841866e-06
...
-1.36860631e-07 -3.07195572e-07 -3.52772869e-07 -2.18096043e-07
-8.84415247e-10 -3.02272008e-07 -5.16997563e-07 -5.97240436e-07
-6.66264783e-07 -7.44911097e-07 -6.84703993e-07 -4.14116606e-07
-0.00000000e+00]
[-1.77884442e-08 -2.07113403e-06 -3.81570268e-06 -4.87282031e-06
-4.94854603e-06 -4.18511495e-06 -3.61317621e-06 -4.07439096e-06
-5.06630654e-06 -5.30119974e-06 -3.51566730e-06 -2.43953427e-07
...
-3.77063898e-08 -6.31092635e-07 -1.42168088e-06 -2.20884863e-06
-2.89134140e-06 -2.49219032e-06 -1.60718472e-06 -7.37134674e-07
-4.80396324e-08]]
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSubsidence_All"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetSubsidence_All")
)
# convert head_conversion_factor to ctypes equivalent
subsidence_conversion_factor = ctypes.c_double(subsidence_conversion_factor)
# get number of model nodes
n_nodes = ctypes.c_int(self.get_n_nodes())
# get number of model layers
n_layers = ctypes.c_int(self.get_n_layers())
# initialize output variables
subsidence = ((ctypes.c_double * n_nodes.value) * n_layers.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSubsidence_All(
ctypes.byref(n_nodes),
ctypes.byref(n_layers),
ctypes.byref(subsidence_conversion_factor),
subsidence,
ctypes.byref(status),
)
return np.array(subsidence)
def get_subregion_ag_pumping_average_depth_to_water(self):
"""
Return subregional depth-to-groundwater values that are
weighted-averaged with respect to agricultural pumping rates
during a model run
Returns
-------
np.ndarray
array of weighted-average depth to groundwater
Note
----
This method is intended to be used when is_for_inquiry=0 while performing a simulation
i.e. after calling IWFMModel.simulate_for_one_timestep
See Also
--------
IWFMModel.get_zone_ag_pumping_average_depth_to_water : Return zonal depth-to-groundwater values that are weighted-averaged with respect to agricultural pumping rates during a model run
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # get subregion average depth to water
... avg_dtw = model.get_subregion_ag_pumping_average_depth_to_water()
... print(avg_dtw)
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
* TIME STEP 2 AT 10/02/1990_24:00
[-999. -999.]
* TIME STEP 3 AT 10/03/1990_24:00
[-999. -999.]
* TIME STEP 4 AT 10/04/1990_24:00
[-999. -999.]
...
* TIME STEP 3651 AT 09/28/2000_24:00
[ 266.03824182 -999. ]
* TIME STEP 3652 AT 09/29/2000_24:00
[ 266.19381051 -999. ]
* TIME STEP 3653 AT 09/30/2000_24:00
[ 266.34883635 -999. ]
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetSubregionAgPumpingAverageDepthToGW"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetSubregionAgPumpingAverageDepthToGW"
)
)
# get number of subregions in model
n_subregions = ctypes.c_int(self.get_n_subregions())
# initialize output variables
average_depth_to_groundwater = (ctypes.c_double * n_subregions.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetSubregionAgPumpingAverageDepthToGW(
ctypes.byref(n_subregions),
average_depth_to_groundwater,
ctypes.byref(status),
)
return np.array(average_depth_to_groundwater)
def get_zone_ag_pumping_average_depth_to_water(self, elements_list, zones_list):
"""
Return zonal depth-to-groundwater values that are
weighted-averaged with respect to agricultural pumping rates
during a model run
Parameters
----------
elements_list : list or np.ndarray
list of all elements corresponding to all zones where average depth to water is calculated
zones_list : list or np.ndarray
list of zone ids corresponding to each element in the elements list
Returns
-------
np.ndarray
average depth to water for each zone specified
Note
----
This method is intended to be used when is_for_inquiry=0 while performing a simulation
i.e. after calling IWFMModel.simulate_for_one_timestep
See Also
--------
IWFMModel.get_subregion_ag_pumping_average_depth_to_water : Return subregional depth-to-groundwater values that are weighted-averaged with respect to agricultural pumping rates during a model run
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(dll, pp_file, sim_file, is_for_inquiry=0)
>>> while not model.is_end_of_simulation():
... # advance the simulation time one time step forward
... model.advance_time()
...
... # read all time series data from input files
... model.read_timeseries_data()
...
... # Simulate the hydrologic process for the timestep
... model.simulate_for_one_timestep()
...
... # get subregion average depth to water
... elements = model.get_element_ids()
... subregions = model.get_subregions_by_element()
... avg_dtw = model.get_zone_ag_pumping_average_depth_to_water(elements, subregions)
... print(avg_dtw)
...
... # print the results to the user-specified output files
... model.print_results()
...
... # advance the state of the hydrologic system in time
... model.advance_state()
.
.
.
* TIME STEP 2 AT 10/02/1990_24:00
[-999. 0.]
* TIME STEP 3 AT 10/03/1990_24:00
[-999. 0.]
* TIME STEP 4 AT 10/04/1990_24:00
[-999. 0.]
...
* TIME STEP 3651 AT 09/28/2000_24:00
[ 266.03824182 0. ]
* TIME STEP 3652 AT 09/29/2000_24:00
[ 266.19381051 0. ]
* TIME STEP 3653 AT 09/30/2000_24:00
[ 266.34883635 0. ]
>>> model.kill()
>>> model.close_log_file()
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetZoneAgPumpingAverageDepthToGW"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_GetZoneAgPumpingAverageDepthToGW"
)
)
# if list convert to np.ndarray
if isinstance(elements_list, list):
elements_list = np.array(elements_list)
if isinstance(zones_list, list):
zones_list = np.array(zones_list)
if (elements_list.shape != zones_list.shape) | (len(elements_list.shape) != 1):
raise ValueError(
"elements_list and zone_list should be 1D" " arrays of the same length"
)
# get length of elements list and element zones list
len_elements_list = ctypes.c_int(len(elements_list))
# get list of zones and number
zones = np.unique(zones_list)
n_zones = ctypes.c_int(len(zones))
# convert elements_list to ctypes
elements_list = (ctypes.c_int * len_elements_list.value)(*elements_list)
# convert zones_list to ctypes
zones_list = (ctypes.c_int * len_elements_list.value)(*zones_list)
# initialize output variables
average_depth_to_groundwater = (ctypes.c_double * n_zones.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetZoneAgPumpingAverageDepthToGW(
ctypes.byref(len_elements_list),
elements_list,
zones_list,
ctypes.byref(n_zones),
average_depth_to_groundwater,
ctypes.byref(status),
)
return np.array(average_depth_to_groundwater)
def _get_n_locations(self, location_type_id):
"""
private method returning the number of locations for a specified location
type
Parameters
----------
location_type_id : int
identification number used by IWFM for a particular location type
e.g elements, nodes, subregions, etc.
Returns
-------
int
number of locations for the location type specified
Note
----
This is a generic version to get the number of locations. Many
location types already have a dedicated procedure for doing this
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetNLocations"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetNLocations")
)
# convert location type id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# initialize output variables
n_locations = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetNLocations(
ctypes.byref(location_type_id),
ctypes.byref(n_locations),
ctypes.byref(status),
)
return n_locations.value
def get_n_small_watersheds(self):
"""
Return the number of small watersheds specified in an IWFM
model
Returns
-------
int
number of small watersheds
See Also
--------
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_n_elements : Return the number of elements in an IWFM model
IWFMModel.get_n_subregions : Return the number of subregions in an IWFM model
IWFMModel.get_n_stream_nodes : Return the number of stream nodes in an IWFM model
IWFMModel.get_n_stream_reaches : Return the number of stream reaches in an IWFM model
IWFMModel.get_n_lakes : Return the number of lakes in an IWFM model
"""
location_type_id = self.get_location_type_id_smallwatershed()
return self._get_n_locations(ctypes.byref(location_type_id))
def _get_location_ids(self, location_type_id):
"""
private method returning the location identification numbers used by the
model for a specified location type
Parameters
----------
location_type_id : int
identification number used by IWFM for a particular location type
e.g elements, nodes, subregions, etc.
Returns
-------
np.ndarray
array of identification numbers
Note
----
This is a generic version to get the number of locations. Many
location types already have a dedicated procedure for doing this
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_GetLocationIDs"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_GetLocationIDs")
)
# get number of locations of the given location type
n_locations = ctypes.c_int(self._get_n_locations(location_type_id))
# convert location_type_id to ctypes
location_type_id = ctypes.c_int(location_type_id)
# initialize output variables
location_ids = (ctypes.c_int * n_locations.value)()
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_GetLocationIDs(
ctypes.byref(location_type_id),
ctypes.byref(n_locations),
location_ids,
ctypes.byref(status),
)
return np.array(location_ids)
def get_small_watershed_ids(self):
"""
Return the small watershed identification numbers specified
in the IWFM model
Returns
-------
np.ndarray
integer array of small watershed identification numbers
See Also
--------
IWFMModel.get_node_ids : Return an array of node ids in an IWFM model
IWFMModel.get_element_ids : Return an array of element ids in an IWFM model
IWFMModel.get_subregion_ids : Return an array of IDs for subregions in an IWFM model
IWFMModel.get_stream_node_ids : Return an array of stream node IDs in an IWFM model
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_lake_ids : Return an array of the lake IDs in an IWFM model
"""
location_type_id = self.get_location_type_id_smallwatershed()
return self._get_location_ids(location_type_id)
def set_preprocessor_path(self, preprocessor_path):
"""
sets the path to the directory where the preprocessor main
input file is located
Parameters
----------
preprocessor_path : str
file path to where preprocessor main input file is stored
Returns
-------
None
internally sets the path of the preprocessor main input file
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SetPreProcessorPath"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_SetPreProcessorPath")
)
# get length of preprocessor_path string
len_pp_path = len(preprocessor_path)
# convert preprocessor path to ctypes character array
preprocessor_path = ctypes.create_string_buffer(
preprocessor_path.encode("utf-8")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SetPreProcessorPath(
ctypes.byref(len_pp_path), preprocessor_path, ctypes.byref(status)
)
def set_simulation_path(self, simulation_path):
"""
sets the path to the directory where the simulation main
input file is located
Parameters
----------
simulation_path : str
file path to where simulation main input file is stored
Returns
-------
None
internally sets the path of the simulation main input file
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SetSimulationPath"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_SetSimulationPath")
)
# get length of preprocessor_path string
len_sim_path = len(simulation_path)
# convert preprocessor path to ctypes character array
simulation_path = ctypes.create_string_buffer(simulation_path.encode("utf-8"))
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SetSimulationPath(
ctypes.byref(len_sim_path), simulation_path, ctypes.byref(status)
)
def set_supply_adjustment_max_iterations(self, max_iterations):
"""
sets the maximum number of iterations that will be used in
automatic supply adjustment
Parameters
----------
max_iterations : int
maximum number of iterations for automatic supply adjustment
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SetSupplyAdjustmentMaxIters"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_SetSupplyAdjustmentMaxIters"
)
)
# convert max_iterations to ctypes
max_iterations = ctypes.c_int(max_iterations)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SetSupplyAdjustmentMaxIters(
ctypes.byref(max_iterations), ctypes.byref(status)
)
def set_supply_adjustment_tolerance(self, tolerance):
"""
sets the tolerance, given as a fraction of the water demand
that will be used in automatic supply adjustment
Parameters
----------
tolerance : float
fraction of water demand used as the convergence criteria
for iterative supply adjustment
Note
----
When the automatic supply adjustment feature of IWFM is turned
on, IWFM iteratively tries to adjust water supplies (diversions,
pumping or both based on user defined specifications) to meet
the water demand. When the difference between water supply and
demand is less than the tolerance, IWFM assumes equivalency
between demand and supply, and terminates supply adjustment
iterations.
0.01 represents 1% of the demand
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SetSupplyAdjustmentTolerance"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_SetSupplyAdjustmentTolerance"
)
)
# convert tolerance to ctypes
tolerance = ctypes.c_double(tolerance)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SetSupplyAdjustmentTolerance(
ctypes.byref(tolerance), ctypes.byref(status)
)
def delete_inquiry_data_file(self):
"""
deletes the binary file, IW_ModelData_ForInquiry.bin,
generated by the IWFM DLL when the Model Object is instantiated
Note
----
When this binary file exists, the entire Model Object is not created
when the IWFMModel object is created so not all functionality is available
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_DeleteInquiryDataFile"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_DeleteInquiryDataFile")
)
# convert simulation file name to ctypes
simulation_file_name = ctypes.create_string_buffer(
self.simulation_file_name.encode("utf-8")
)
length_simulation_file_name = ctypes.c_int(ctypes.sizeof(simulation_file_name))
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_DeleteInquiryDataFile(
ctypes.byref(length_simulation_file_name),
simulation_file_name,
ctypes.byref(status),
)
def simulate_for_one_timestep(self):
"""
simulates a single timestep of the model application
Note
----
This method is intended to be used when is_for_inquiry=0
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SimulateForOneTimeStep"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_SimulateForOneTimeStep")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SimulateForOneTimeStep(ctypes.byref(status))
def simulate_for_an_interval(self, time_interval):
"""
simulates the model application for a specified time interval
Parameters
----------
time_interval : str
valid IWFM time interval greater than or equal to simulation
time step
Note
----
This method is intended to be used when is_for_inquiry=0 during
a model simulation
specified time interval must be greater than simulation time step
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SimulateForAnInterval"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_SimulateForAnInterval")
)
# get simulation time_interval
simulation_time_interval = self.get_time_specs()[-1]
# determine if time_interval is greater than or equal to
# simulation_time_interval
if not self._is_time_interval_greater_or_equal(
time_interval, simulation_time_interval
):
raise ValueError(
"time interval must be greater than or "
"equal to simulation time interval"
)
# convert time_interval to ctypes
time_interval = ctypes.create_string_buffer(time_interval.encoding("utf-8"))
# get length of time interval
len_time_interval = ctypes.c_int(ctypes.sizeof(time_interval))
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SimulateForAnInterval(
ctypes.byref(len_time_interval), time_interval, ctypes.byref(status)
)
def simulate_all(self):
"""
performs all of the computations for the entire simulation
period
Note
----
This method is intended to be used when is_for_inquiry=0 during
a model simulation
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_SimulateAll"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_SimulateAll")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_SimulateAll(ctypes.byref(status))
def advance_time(self):
"""
advances the simulation time step by one simulation time step
Note
----
This method is intended to be used when is_for_inquiry=0 during
a model simulation
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_AdvanceTime"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_AdvanceTime")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_AdvanceTime(ctypes.byref(status))
def read_timeseries_data(self):
"""
reads in all of the time series data for the current
simulation time step
Note
----
This method is intended to be used when is_for_inquiry=0 during
a model simulation
See Also
--------
IWFMModel.read_timeseries_data_overwrite : reads time series data for the current simulation time step and allows overwriting certain time series data
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_ReadTSData"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_ReadTSData")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_ReadTSData(ctypes.byref(status))
def read_timeseries_data_overwrite(
self,
land_use_areas,
diversion_ids,
diversions,
stream_inflow_ids,
stream_inflows,
):
"""
reads time series data for the current simulation time step and allows overwriting certain time series data
Parameters
----------
land_use_areas : list or np.ndarray
subregional land use areas to be overwritten for the current
time step order is non-ponded first, then ponded, then urban,
then native, and riparian
diversion_ids : list or np.ndarray
diversion identification numbers to be overwritten
diversions : list or np.ndarray
diversion amounts to overwrite for each diversion
identification number provided. must be same length
as diversion_ids
stream_inflow_ids : list or np.ndarray
stream inflow indices where boundary inflows will be
overwritten
stream_inflows : list or np.ndarray
stream inflow amounts to be overwritten for each stream
flow index provided. Must be the same length as
stream_inflow_ids
Returns
-------
None
Note
----
This method is intended to be used when is_for_inquiry=0 during
a model simulation
See Also
--------
IWFMModel.read_timeseries_data : reads in all of the time series data for the current simulation time step
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_ReadTSData_Overwrite"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_ReadTSData_Overwrite")
)
if land_use_areas is None:
n_landuses = ctypes.c_int(0)
n_subregions = ctypes.c_int(0)
else:
# get number of land uses
n_landuses = ctypes.c_int(self.get_n_ag_crops() + 3)
# get number of subregions
n_subregions = ctypes.c_int(self.get_n_subregions())
# check that land_use_areas is n_subregions by n_landuses
if isinstance(land_use_areas, list):
land_use_areas = np.array(land_use_areas)
if land_use_areas.shape != (n_subregions, n_landuses):
raise ValueError(
"land_use areas must be provided for "
"each land use and subregion in the model"
)
# convert land_use_areas to ctypes
land_use_array = ((ctypes.c_double * n_subregions.value) * n_landuses.value)()
for i, row in enumerate(land_use_areas):
land_use_array[i][:] = row
# check that diversion_ids are valid
# if either diversion_ids or diversions are None treat both as None.
if diversion_ids is None or diversions is None:
n_diversions = ctypes.c_int(0)
else:
# check that diversion_ids are provided as correct data type
if not isinstance(diversion_ids, (np.ndarray, list)):
raise TypeError(
"diversion_ids must be provided as a list or np.ndarray"
)
# check that diversions are provided as the correct data type
if not isinstance(diversions, (np.ndarray, list)):
raise TypeError("diversions must be provided as a list or np.ndarray")
# get diversion_ids specified in the model input files
model_diversion_ids = self.get_diversion_ids()
# if provided as a list, convert to a np.ndarray
if isinstance(diversion_ids, list):
diversion_ids = np.array(diversion_ids)
if isinstance(diversions, list):
diversions = np.array(diversions)
# check that all diversion_ids provided are valid model diversion ids
if not np.all(np.isin(diversion_ids, model_diversion_ids)):
raise ValueError(
"diversion_ids contains diversion "
"identification number not found in the model"
)
# check diversion and diversion_ids are the same length
if (diversion_ids.shape != diversions.shape) and (
len(diversion_ids.shape) == 1
):
raise ValueError(
"diversion_ids and diversions must be 1D arrays of the same length"
)
# get the number of diversions
n_diversions = ctypes.c_int(len(diversion_ids))
# convert diversion_ids and diversion to ctypes
diversion_ids = (ctypes.c_int * n_diversions.value)(*diversion_ids)
diversions = (ctypes.c_double * n_diversions.value)(*diversions)
# check that stream_inflow_ids are valid
# if either stream_inflow_ids or stream_inflows are None treat both as None.
if stream_inflow_ids is None or stream_inflows is None:
n_stream_inflows = ctypes.c_int(0)
else:
# check that stream_inflow_ids are provided as the correct data type
if not isinstance(stream_inflow_ids, (np.ndarray, list)):
raise TypeError(
"stream_inflow_ids must be provided as a list or np.ndarray"
)
# check that stream_inflows are provided as the correct data type
if not isinstance(stream_inflows, (np.ndarray, list)):
raise TypeError(
"stream_inflows must be provided as a list or np.ndarray"
)
model_stream_inflow_ids = self.get_stream_inflow_ids()
# if provided as a list, convert to a np.ndarray
if isinstance(stream_inflow_ids, list):
stream_inflow_ids = np.array(stream_inflow_ids)
if isinstance(stream_inflows, list):
stream_inflows = np.array(stream_inflows)
# check that all stream_inflow_ids provided are valid model stream inflow ids
if not np.all(np.isin(stream_inflow_ids, model_stream_inflow_ids)):
raise ValueError(
"stream_inflow_ids contains stream inflow "
"identification numbers not found in the model"
)
# check stream_inflows and stream_inflow_ids are the same length
if (stream_inflow_ids.shape != stream_inflows.shape) and (
len(stream_inflow_ids.shape) == 1
):
raise ValueError(
"stream_inflow_ids and stream_inflows "
"must be 1D arrays of the same length"
)
# get the number of diversions
n_stream_inflows = ctypes.c_int(len(stream_inflow_ids))
# convert diversion_ids and diversion to ctypes
stream_inflow_ids = (ctypes.c_int * n_diversions.value)(*stream_inflow_ids)
stream_inflows = (ctypes.c_double * n_diversions.value)(*stream_inflows)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_ReadTSData_Overwrite(
ctypes.byref(n_landuses),
ctypes.byref(n_subregions),
land_use_array,
ctypes.byref(n_diversions),
diversion_ids,
diversions,
ctypes.byref(n_stream_inflows),
stream_inflow_ids,
stream_inflows,
ctypes.byref(status),
)
def print_results(self):
"""
prints out all the simulation results at the end of a
simulation
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_PrintResults"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_PrintResults")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_PrintResults(ctypes.byref(status))
def advance_state(self):
"""
advances the state of the hydrologic system in time (e.g.
groundwater heads at current timestep are switched to
groundwater heads at previous timestep) during a model run
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_AdvanceState"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_AdvanceState")
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_AdvanceState(ctypes.byref(status))
def is_stream_upstream_node(self, stream_node_1, stream_node_2):
"""
checks if a specified stream node .is located upstream from
another specified stream node within the stream network of the
IWFM model
Parameters
----------
stream_node_1 : int
stream node being checked if it is upstream of stream_node_2
stream_node_2 : int
stream node used to determine if stream_node_1 is upstream
Returns
-------
bool
True if stream_node_1 is upstream of stream_node_2
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_IsStrmUpstreamNode"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_IsStrmUpstreamNode")
)
# convert stream_node_1 and stream_node_2 to ctypes
stream_node_1 = ctypes.c_int(stream_node_1)
stream_node_2 = ctypes.c_int(stream_node_2)
# initialize output variables
is_upstream = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_IsStrmUpstreamNode(
ctypes.byref(stream_node_1),
ctypes.byref(stream_node_2),
ctypes.byref(is_upstream),
ctypes.byref(status),
)
if is_upstream.value == 1:
return True
else:
return False
def is_end_of_simulation(self):
"""
check if the end of simulation period has been reached during a model run
Returns
-------
bool
True if end of simulation period otherwise False
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_IsEndOfSimulation"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_IsEndOfSimulation")
)
# initialize output variables
is_end_of_simulation = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_IsEndOfSimulation(
ctypes.byref(is_end_of_simulation), ctypes.byref(status)
)
if is_end_of_simulation.value == 1:
return True
else:
return False
def is_model_instantiated(self):
"""
check if a Model object is instantiated
Returns
-------
bool
True if model object is instantiated otherwise False
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_IsModelInstantiated"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_IsModelInstantiated")
)
# initialize output variables
is_instantiated = ctypes.c_int(0)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_IsModelInstantiated(
ctypes.byref(is_instantiated), ctypes.byref(status)
)
if is_instantiated.value == 1:
return True
else:
return False
def turn_supply_adjustment_on_off(
self, diversion_adjustment_flag, pumping_adjustment_flag
):
"""
turns the automatic supply adjustment of diversions and
pumping to meet agricultural and/or urban water demands on or
off during a model run
Parameters
----------
diversion_adjustment_flag : int, 0 or 1 only
1 - turn diversion supply adjustment on
0 - turn diversion supply adjustment off
pumping_adjustment_flag : int, 0 or 1 only
1 - turn pumping supply adjustment on
0 - turn pumping supply adjustment off
Returns
-------
None
updates global supply adjustment flags for diversions and
pumping
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_TurnSupplyAdjustOnOff"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format("IW_Model_TurnSupplyAdjustOnOff")
)
if diversion_adjustment_flag not in [0, 1]:
raise ValueError(
"diversion_adjustment_flag must be 0 or 1 "
"to turn diversion adjustment on use 1 "
"to turn diversion adjustment off use 0."
)
if pumping_adjustment_flag not in [0, 1]:
raise ValueError(
"diversion_adjustment_flag must be 0 or 1 "
"to turn diversion adjustment on use 1 "
"to turn diversion adjustment off use 0."
)
# convert adjustment flags to ctypes
diversion_adjustment_flag = ctypes.c_int(diversion_adjustment_flag)
pumping_adjustment_flag = ctypes.c_int(pumping_adjustment_flag)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_TurnSupplyAdjustOnOff(
ctypes.byref(diversion_adjustment_flag),
ctypes.byref(pumping_adjustment_flag),
ctypes.byref(status),
)
def restore_pumping_to_read_values(self):
"""
restores the pumping rates to the values read from the
Pumping Rate input file during a model run.
Returns
-------
None
internally restores pumping to values read from the input file
Note
----
This procedure is useful when it is necessary to re-simulate
the hydrologic system
(e.g. when IWFM is linked to a reservoir operations model in
an iterative fashion) at a given timestep with pumping
adjustment is on and the pumping values need to be restored
to their original values
"""
# check to see if IWFM procedure is available in user version of IWFM DLL
if not hasattr(self.dll, "IW_Model_RestorePumpingToReadValues"):
raise AttributeError(
'IWFM API does not have "{}" procedure. '
"Check for an updated version".format(
"IW_Model_RestorePumpingToReadValues"
)
)
# set instance variable status to 0
status = ctypes.c_int(0)
self.dll.IW_Model_RestorePumpingToReadValues(ctypes.byref(status))
### methods that wrap two or more DLL calls
def get_groundwater_hydrograph_info(self):
"""
Return model information for the groundwater hydrographs,
including hydrograph ID, x- and y- coordinates, name, and
stratigraphy.
Returns
-------
pd.DataFrame
columns: id, name, x, y, gse, BTM_Lay1, BTM_Lay2, ..., BTM_Layn
See Also
--------
IWFMModel.get_groundwater_hydrograph_ids : Return the IDs for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_coordinates : Return the x,y-coordinates for the groundwater hydrographs specified in an IWFM model
IWFMModel.get_groundwater_hydrograph_names : Return the groundwater hydrograph location names specified in an IWFM model
IWFMModel.get_stratigraphy_atXYcoordinate : Return the stratigraphy at given X,Y coordinates
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_groundwater_hydrograph_info()
ID Name X Y GSE BTM_Lay1 BTM_Lay2
0 1 GWHyd1 1883179.2 14566752.0 500.0 0.0 -100.0
1 2 GWHyd2 1883179.2 14560190.4 500.0 0.0 -100.0
2 3 GWHyd3 1883179.2 14553628.8 500.0 0.0 -100.0
3 4 GWHyd4 1883179.2 14547067.2 500.0 0.0 -100.0
4 5 GWHyd5 1883179.2 14540505.6 500.0 0.0 -100.0
5 6 GWHyd6 1883179.2 14533944.0 500.0 0.0 -100.0
6 7 GWHyd7 1883179.2 14527382.4 500.0 0.0 -100.0
7 8 GWHyd8 1883179.2 14520820.8 500.0 0.0 -100.0
8 9 GWHyd9 1883179.2 14514259.2 500.0 0.0 -100.0
9 10 GWHyd10 1883179.2 14507697.6 500.0 0.0 -100.0
10 11 GWHyd11 1883179.2 14501136.0 500.0 0.0 -110.0
11 12 GWHyd12 1883179.2 14494574.4 500.0 0.0 -110.0
12 13 GWHyd13 1883179.2 14488012.8 500.0 0.0 -110.0
13 14 GWHyd14 1883179.2 14481451.2 500.0 0.0 -110.0
14 15 GWHyd15 1883179.2 14474889.6 500.0 0.0 -110.0
15 16 GWHyd16 1883179.2 14468328.0 500.0 0.0 -110.0
16 17 GWHyd17 1883179.2 14461766.4 500.0 0.0 -110.0
17 18 GWHyd18 1883179.2 14455204.8 500.0 0.0 -110.0
18 19 GWHyd19 1883179.2 14448643.2 500.0 0.0 -110.0
19 20 GWHyd20 1883179.2 14442081.6 500.0 0.0 -110.0
20 21 GWHyd21 1883179.2 14435520.0 500.0 0.0 -110.0
21 22 GWHyd22 1883179.2 14566752.0 500.0 0.0 -100.0
22 23 GWHyd23 1883179.2 14560190.4 500.0 0.0 -100.0
23 24 GWHyd24 1883179.2 14553628.8 500.0 0.0 -100.0
24 25 GWHyd25 1883179.2 14547067.2 500.0 0.0 -100.0
25 26 GWHyd26 1883179.2 14540505.6 500.0 0.0 -100.0
26 27 GWHyd27 1883179.2 14533944.0 500.0 0.0 -100.0
27 28 GWHyd28 1883179.2 14527382.4 500.0 0.0 -100.0
28 29 GWHyd29 1883179.2 14520820.8 500.0 0.0 -100.0
29 30 GWHyd30 1883179.2 14514259.2 500.0 0.0 -100.0
30 31 GWHyd31 1883179.2 14507697.6 500.0 0.0 -100.0
31 32 GWHyd32 1883179.2 14501136.0 500.0 0.0 -110.0
32 33 GWHyd33 1883179.2 14494574.4 500.0 0.0 -110.0
33 34 GWHyd34 1883179.2 14488012.8 500.0 0.0 -110.0
34 35 GWHyd35 1883179.2 14481451.2 500.0 0.0 -110.0
35 36 GWHyd36 1883179.2 14474889.6 500.0 0.0 -110.0
36 37 GWHyd37 1883179.2 14468328.0 500.0 0.0 -110.0
37 38 GWHyd38 1883179.2 14461766.4 500.0 0.0 -110.0
38 39 GWHyd39 1883179.2 14455204.8 500.0 0.0 -110.0
39 40 GWHyd40 1883179.2 14448643.2 500.0 0.0 -110.0
40 41 GWHyd41 1883179.2 14442081.6 500.0 0.0 -110.0
41 42 GWHyd42 1883179.2 14435520.0 500.0 0.0 -110.0
>>> model.kill()
>>> model.close_log_file()
"""
hydrograph_ids = self.get_groundwater_hydrograph_ids()
(
hydrograph_x_coord,
hydrograph_y_coord,
) = self.get_groundwater_hydrograph_coordinates()
hydrograph_names = self.get_groundwater_hydrograph_names()
df = pd.DataFrame(
{
"ID": hydrograph_ids,
"Name": hydrograph_names,
"X": hydrograph_x_coord,
"Y": hydrograph_y_coord,
}
)
columns = ["GSE"] + [
"BTM_Lay{}".format(layer + 1) for layer in range(self.get_n_layers())
]
func = lambda row: self.get_stratigraphy_atXYcoordinate(row["X"], row["Y"], 1.0)
df[columns] = df.apply(func, axis=1, result_type="expand")
return df
def get_node_info(self):
"""
Return node id, x-, and y-coordinates for each node in an IWFM model
Returns
-------
pd.DataFrame
DataFrame containing IDs, x-coordinates, and y-coordinates for all nodes in an IWFM model
See Also
--------
IWFMModel.get_n_nodes : Return the number of nodes in an IWFM model
IWFMModel.get_node_ids : Return an array of node IDs in an IWFM model
IWFMModel.get_node_coordinates : Return the x,y coordinates of the nodes in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_node_info()
NodeID X Y
0 1 1804440.0 14435520.0
1 2 1811001.6 14435520.0
2 3 1817563.2 14435520.0
3 4 1824124.8 14435520.0
4 5 1830686.4 14435520.0
... ... ... ...
436 437 1909425.6 14566752.0
437 438 1915987.2 14566752.0
438 439 1922548.8 14566752.0
439 440 1929110.4 14566752.0
440 441 1935672.0 14566752.0
>>> model.kill()
>>> model.close_log_file()
"""
# get array of node ids
node_ids = self.get_node_ids()
# get arrays of x- and y- coordinates for each node id
x, y = self.get_node_coordinates()
# create DataFrame object to manage node info
node_info = pd.DataFrame({"NodeID": node_ids, "X": x, "Y": y})
return node_info
def get_element_info(self):
"""
Return element configuration information for all
elements in an IWFM model
Returns
-------
pd.DataFrame
DataFrame containing subregion IDs, node order, and node IDs for each element ID
See Also
--------
IWFMModel.get_n_elements : Return the number of elements in an IWFM model
IWFMModel.get_element_ids : Return an array of element IDs in an IWFM model
IWFMModel.get_element_config : Return an array of node IDs for an IWFM element
IWFMModel.get_subregions_by_element : Return an array of IWFM elements contained in each subregion
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_element_info()
IE SR NodeNum NodeID
0 1 1 Node1 1
1 1 1 Node2 2
2 1 1 Node3 23
3 1 1 Node4 22
4 2 1 Node1 2
... ... ... ... ...
1595 399 2 Node4 439
1596 400 2 Node1 419
1597 400 2 Node2 420
1598 400 2 Node3 441
1599 400 2 Node4 440
>>> model.kill()
>>> model.close_log_file()
"""
df = pd.DataFrame({"IE": self.get_element_ids()})
# generate column names for node id configuration
columns = ["Node{}".format(i + 1) for i in range(4)]
df[columns] = df.apply(
lambda row: self.get_element_config(row["IE"]), axis=1, result_type="expand"
)
df["SR"] = self.get_subregions_by_element()
stacked_df = df.set_index(["IE", "SR"]).stack().reset_index()
stacked_df.rename(columns={"level_2": "NodeNum", 0: "NodeID"}, inplace=True)
return stacked_df[stacked_df["NodeID"] != 0]
def get_boundary_nodes(self, subregions=False, remove_duplicates=False):
"""
Return nodes that make up the boundary of an IWFM model
Parameters
----------
subregions : boolean, default=False
if True will return the nodes for the model subregion boundaries
if False will return the nodes for the model boundary
remove_duplicates : boolean, default=False
if True will return only the unique nodes
if False will return the start and end nodes
Returns
-------
pd.DataFrame
DataFrame of Node IDs for the model boundary
"""
element_segments = self.get_element_info()
# add columns to dataframe
element_segments["start_node"] = element_segments["NodeID"]
element_segments["end_node"] = 0
element_segments["count"] = 0
# update end_node column with values for each element
for element in element_segments["IE"].unique():
element_nodes = element_segments[element_segments["IE"] == element][
"NodeID"
].to_numpy()
element_segments.loc[
element_segments["IE"] == element, "end_node"
] = np.roll(element_nodes, -1, axis=0)
# duplicate start_node and end_node
element_segments["orig_start_node"] = element_segments["start_node"]
element_segments["orig_end_node"] = element_segments["end_node"]
# order start_nodes and end_nodes low to high
condition = element_segments["start_node"] > element_segments["end_node"]
element_segments.loc[
condition, ["start_node", "end_node"]
] = element_segments.loc[condition, ["end_node", "start_node"]].values
if not subregions:
# count segments interior segments should have count of 2 while edge segments have count of 1
grouped = (
element_segments.groupby(["start_node", "end_node"])["count"]
.count()
.reset_index()
)
# filter only the edge segments with count = 1
boundary_nodes = grouped[grouped["count"] == 1][["start_node", "end_node"]]
if remove_duplicates:
# organize nodes in single column and remove duplicates
boundary_nodes = (
boundary_nodes.stack()
.reset_index()
.drop(["level_0", "level_1"], axis=1)
)
boundary_nodes.rename(columns={0: "NodeID"}, inplace=True)
boundary_nodes.drop_duplicates("NodeID", inplace=True)
return boundary_nodes
return pd.merge(
element_segments, boundary_nodes, on=["start_node", "end_node"]
)[["orig_start_node", "orig_end_node"]]
else:
# count segments interior segments should have count of 2 while edge segments have count of 1
grouped = (
element_segments.groupby(["SR", "start_node", "end_node"])["count"]
.count()
.reset_index()
)
# filter only the edge segments with count = 1
boundary_nodes = grouped[grouped["count"] == 1][
["SR", "start_node", "end_node"]
]
if remove_duplicates:
# organize nodes in single column and remove duplicates
boundary_nodes = (
boundary_nodes.set_index("SR", append=True)
.stack()
.reset_index()
.drop(["level_0", "level_2"], axis=1)
)
boundary_nodes.rename(columns={0: "NodeID"}, inplace=True)
boundary_nodes.drop_duplicates("NodeID", inplace=True)
return boundary_nodes
return pd.merge(
element_segments, boundary_nodes, on=["SR", "start_node", "end_node"]
)[["SR", "orig_start_node", "orig_end_node"]]
def get_element_spatial_info(self):
"""
Return element configuration information including x-y
coordinates for nodes
Returns
-------
pd.DataFrame
DataFrame containing element IDs, Subregions, NodeID for each element with x-y coordinates
See Also
--------
IWFMModel.get_element_info : Return element configuration information for all elements in an IWFM model
IWFMModel.get_node_info : Return node id, x-, and y-coordinates for each node in an IWFM model
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_element_spatial_info()
IE SR NodeNum NodeID X Y
0 1 1 Node1 1 1804440.0 14435520.0
1 1 1 Node2 2 1811001.6 14435520.0
2 1 1 Node3 23 1811001.6 14442081.6
3 1 1 Node4 22 1804440.0 14442081.6
4 2 1 Node1 2 1811001.6 14435520.0
... ... ... ... ... ... ...
1595 399 2 Node4 439 1922548.8 14566752.0
1596 400 2 Node1 419 1929110.4 14560190.4
1597 400 2 Node2 420 1935672.0 14560190.4
1598 400 2 Node3 441 1935672.0 14566752.0
1599 400 2 Node4 440 1929110.4 14566752.0
>>> model.kill()
>>> model.close_log_file()
"""
node_info = self.get_node_info()
element_info = self.get_element_info()
# merge element info with nodes to assign coordinates to each element vertex
element_geometry = pd.merge(element_info, node_info, on="NodeID")
element_geometry.sort_values(by=["IE", "NodeNum"], inplace=True)
return element_geometry
def get_depth_to_water(self, layer_number, begin_date=None, end_date=None):
"""
calculates a depth to water for an IWFM model layer for all dates between
the provided start date and end date.
Parameters
----------
layer_number : int
layer number id for a given layer in an IWFM model. Must be equal to or
less than total number of model layers
start_date : str, default=None
IWFM format date for first date used to return simulated heads
end_date : str, default=None
IWFM format date for last date used to return simulated heads
Returns
-------
pd.DataFrame
depth to water by model node and date
See Also
--------
IWFMModel.get_ground_surface_elevation : Return the ground surface elevation for each node specified in the IWFM model
IWFMModel.get_gwheads_foralayer : Return the simulated groundwater heads for a single user-specified model layer for every model node over a user-specified time interval.
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_depth_to_water(1, '09/01/2000_24:00')
Date NodeID DTW X Y
0 2000-09-01 1 210.0 1804440.0 14435520.0
1 2000-09-02 1 210.0 1804440.0 14435520.0
2 2000-09-03 1 210.0 1804440.0 14435520.0
3 2000-09-04 1 210.0 1804440.0 14435520.0
4 2000-09-05 1 210.0 1804440.0 14435520.0
... ... ... ... ... ...
13225 2000-09-26 441 150.0 1935672.0 14566752.0
13226 2000-09-27 441 150.0 1935672.0 14566752.0
13227 2000-09-28 441 150.0 1935672.0 14566752.0
13228 2000-09-29 441 150.0 1935672.0 14566752.0
13229 2000-09-30 441 150.0 1935672.0 14566752.0
>>> model.kill()
>>> model.close_log_file()
"""
# get ground surface elevations
gs_elevs = self.get_ground_surface_elevation()
# get groundwater heads
dts, heads = self.get_gwheads_foralayer(layer_number, begin_date, end_date)
# calculate depth to water
depth_to_water = gs_elevs - heads
# convert to dataframe object
dtw_df = pd.DataFrame(
data=depth_to_water,
index=pd.to_datetime(dts),
columns=np.arange(1, self.get_n_nodes() + 1),
)
# reformat dataframe
dtw_df = dtw_df.stack().reset_index()
dtw_df.rename(
columns={"level_0": "Date", "level_1": "NodeID", 0: "DTW"}, inplace=True
)
return pd.merge(dtw_df, self.get_node_info(), on="NodeID")
def get_stream_network(self):
"""
Return the stream nodes and groundwater nodes for every reach in an IWFM model
Returns
-------
pd.DataFrame
stream nodes, groundwater nodes, and name for each reach in the IWFM model
Note
----
For IWFM models using the wide stream feature in Stream Package Version #4.2,
only the first groundwater node will be returned.
See Also
--------
IWFMModel.get_stream_reach_ids : Return an array of stream reach IDs in an IWFM model
IWFMModel.get_stream_reach_stream_nodes : Return the stream node IDs corresponding to stream nodes in a specified reach
IWFMModel.get_stream_reach_groundwater_nodes : Return the groundwater node IDs corresponding to stream nodes in a specified reach
Example
-------
>>> from pywfm import IWFMModel
>>> pp_file = '../Preprocessor/PreProcessor_MAIN.IN'
>>> sim_file = 'Simulation_MAIN.IN'
>>> model = IWFMModel(pp_file, sim_file)
>>> model.get_stream_network()
StreamReach StreamNodes GroundwaterNodes ReachName
0 1 1 433 Reach1
1 1 2 412 Reach1
2 1 3 391 Reach1
3 1 4 370 Reach1
4 1 5 349 Reach1
5 1 6 328 Reach1
6 1 7 307 Reach1
7 1 8 286 Reach1
8 1 9 265 Reach1
9 1 10 264 Reach1
10 2 11 222 Reach2
11 2 12 223 Reach2
12 2 13 202 Reach2
13 2 14 181 Reach2
14 2 15 160 Reach2
15 2 16 139 Reach2
16 3 17 139 Reach3
17 3 18 118 Reach3
18 3 19 97 Reach3
19 3 20 76 Reach3
20 3 21 55 Reach3
21 3 22 34 Reach3
22 3 23 13 Reach3
>>> model.kill()
>>> model.close_log_file()
"""
# get stream reach IDs
stream_reach_ids = self.get_stream_reach_ids()
# get stream nodes and groundwater nodes for each stream reach
dfs = []
for rch in stream_reach_ids:
stream_nodes = self.get_stream_reach_stream_nodes(int(rch))
groundwater_nodes = self.get_stream_reach_groundwater_nodes(int(rch))
df = pd.DataFrame(
{"StreamNodes": stream_nodes, "GroundwaterNodes": groundwater_nodes}
)
df["StreamReach"] = rch
dfs.append(df)
# assemble all stream reaches into a single DataFrame
stream_network = pd.concat(dfs)
# get stream reach names
stream_reach_names = self.get_stream_reach_names()
reach_names = pd.DataFrame(
{"StreamReach": stream_reach_ids, "ReachName": stream_reach_names}
)
stream_network = pd.merge(stream_network, reach_names, on="StreamReach")
stream_network.sort_values(
by=["StreamReach", "StreamNodes"], ignore_index=True, inplace=True
)
return stream_network[
["StreamReach", "StreamNodes", "GroundwaterNodes", "ReachName"]
]
### plotting methods
def plot_nodes(
self,
axes,
values=None,
cmap="jet",
scale_factor=10000,
buffer_distance=10000,
write_to_file=False,
file_name=None,
):
"""
plots model nodes on predefined axes
Parameters
----------
axes : plt.Axes
axes object for matplotlib figure
values : list, tuple, np.ndarray, or None, default=None
values to display color
cmap : str or `~matplotlib.colors.Colormap`, default='jet'
colormap used to map normalized data values to RGBA colors
scale_factor : int, default=10000
used to scale the limits of the x and y axis of the plot
e.g. scale_factor=1 rounds the x and y min and max values
down and up, respectively to the nearest whole number
buffer_distance : int, default=10000
value used to buffer the min and max axis values by a
number of units
write_to_file : boolean, default=False
save plot to file. if True, file_name is required
file_name : str
file path and name (with extension for valid matplotlib.pyplot
savefig output type)
Returns
-------
None
matplotlib figure is generated
"""
if not isinstance(axes, plt.Axes):
raise TypeError("axes must be an instance of matplotlib.pyplot.Axes")
if values is not None:
if isinstance(values, list):
values = np.array(values)
if not isinstance(values, np.ndarray):
raise TypeError("values must be either a list or np.ndarray")
if len(values) != self.get_n_nodes():
raise ValueError(
"length of values must be the same as the number of nodes"
)
if not isinstance(scale_factor, int):
raise TypeError("scale_factor must be an integer")
if not isinstance(buffer_distance, int):
raise TypeError("buffer distance must be an integer")
if not isinstance(write_to_file, bool):
raise TypeError("write_to_file must be True or False")
if write_to_file and file_name is None:
raise ValueError("to save figure, user must specify a file_name")
if file_name is not None:
if not isinstance(file_name, str):
raise TypeError("file_name must be a string")
else:
if not os.path.isdir(os.path.dirname(file_name)):
raise ValueError(
"file path: {} does not exist".format(
os.path.dirname(file_name)
)
)
model_data = self.get_node_info()
xmin = (
math.floor(model_data["X"].min() / scale_factor) * scale_factor
- buffer_distance
)
xmax = (
math.ceil(model_data["X"].max() / scale_factor) * scale_factor
+ buffer_distance
)
ymin = (
math.floor(model_data["Y"].min() / scale_factor) * scale_factor
- buffer_distance
)
ymax = (
math.ceil(model_data["Y"].max() / scale_factor) * scale_factor
+ buffer_distance
)
axes.scatter(model_data["X"], model_data["Y"], s=2, c=values, cmap=cmap)
axes.set_xlim(xmin, xmax)
axes.set_ylim(ymin, ymax)
# axes.grid()
if write_to_file:
plt.savefig(file_name)
def plot_elements(
self,
axes,
values=None,
cmap="jet",
scale_factor=10000,
buffer_distance=10000,
write_to_file=False,
file_name=None,
):
"""
plots model elements on predefined axes
Parameters
----------
axes : plt.Axes
axes object for matplotlib figure
values : list, tuple, np.ndarray, or None, default=None
values to display color
cmap : str or `~matplotlib.colors.Colormap`, default='jet'
colormap used to map normalized data values to RGBA colors
scale_factor : int, default=10000
used to scale the limits of the x and y axis of the plot
e.g. scale_factor=1 rounds the x and y min and max values
down and up, respectively to the nearest whole number
buffer_distance : int, default=10000
value used to buffer the min and max axis values by a
number of units
write_to_file : boolean, default=False
save plot to file. if True, file_name is required
file_name : str
file path and name (with extension for valid matplotlib.pyplot
savefig output type)
Returns
-------
None
matplotlib figure is generated
"""
if not isinstance(axes, plt.Axes):
raise TypeError("axes must be an instance of matplotlib.pyplot.Axes")
if not isinstance(scale_factor, int):
raise TypeError("scale_factor must be an integer")
if not isinstance(buffer_distance, int):
raise TypeError("buffer distance must be an integer")
if not isinstance(write_to_file, bool):
raise TypeError("write_to_file must be True or False")
if write_to_file and file_name is None:
raise ValueError("to save figure, user must specify a file_name")
if file_name is not None:
if not isinstance(file_name, str):
raise TypeError("file_name must be a string")
else:
if not os.path.isdir(os.path.dirname(file_name)):
raise ValueError(
"file path: {} does not exist".format(
os.path.dirname(file_name)
)
)
model_data = self.get_element_spatial_info()
xmin = (
math.floor(model_data["X"].min() / scale_factor) * scale_factor
- buffer_distance
)
xmax = (
math.ceil(model_data["X"].max() / scale_factor) * scale_factor
+ buffer_distance
)
ymin = (
math.floor(model_data["Y"].min() / scale_factor) * scale_factor
- buffer_distance
)
ymax = (
math.ceil(model_data["Y"].max() / scale_factor) * scale_factor
+ buffer_distance
)
dfs = []
for e in model_data["IE"].unique():
node_ids = model_data[model_data["IE"] == e]["NodeID"].to_numpy()
node_ids = np.append(node_ids, node_ids[0])
x = model_data[model_data["IE"] == e]["X"].to_numpy()
x = np.append(x, x[0])
y = model_data[model_data["IE"] == e]["Y"].to_numpy()
y = np.append(y, y[0])
elem = | pd.DataFrame({"NodeID": node_ids, "X": x, "Y": y}) | pandas.DataFrame |
import numpy
import pandas as pd
import math as m
#Moving Average
def MA(df, n):
MA = pd.Series(df['Close'].rolling(n).mean(), name = 'MA_' + str(n))
df = df.join(MA)
return df
def MACD(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
#Average Directional Movement Index
def ADX(df, n, n_ADX):
"""Calculate the Average Directional Movement Index for given data.
:param df: pandas.DataFrame
:param n:
:param n_ADX:
:return: pandas.DataFrame
"""
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']
DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean())
UpI = | pd.Series(UpI) | pandas.Series |
from typing import List, Optional, Union
from pandas import DataFrame
from mstrio.api import documents
from mstrio.project_objects.document import Document
from mstrio.server.environment import Environment
from mstrio.utils import helper
from mstrio.connection import Connection
def list_dossiers(connection: Connection, name: Optional[str] = None, to_dictionary: bool = False,
to_dataframe: bool = False, limit: Optional[int] = None,
**filters) -> List["Dossier"]:
"""Get all Dossiers stored on the server.
Optionally use `to_dictionary` or `to_dataframe` to choose output format.
If `to_dictionary` is True, `to_dataframe` is omitted.
Args:
connection(object): MicroStrategy connection object returned
by 'connection.Connection()'
name: exact name of the document to list
to_dictionary(bool, optional): if True, return Dossiers as
list of dicts
to_dataframe(bool, optional): if True, return Dossiers as
pandas DataFrame
limit(int): limit the number of elements returned to a sample of
dossiers
**filters: Available filter parameters: ['name', 'id', 'type',
'subtype', 'date_created', 'date_modified', 'version', 'acg',
'owner', 'ext_type', 'view_media', 'certified_info', 'project_id']
Returns:
List of dossiers.
"""
if connection.project_id is None:
msg = ("Please log into a specific project to load dossiers within it. To load "
"all dossiers across the whole environment use "
f"{list_dossiers_across_projects.__name__} function")
raise ValueError(msg)
return Dossier._list_all(connection, to_dictionary=to_dictionary, name=name, limit=limit,
to_dataframe=to_dataframe, **filters)
def list_dossiers_across_projects(connection: Connection, name: Optional[str] = None,
to_dictionary: bool = False, to_dataframe: bool = False,
limit: Optional[int] = None, **filters) -> List["Dossier"]:
"""Get all Dossiers stored on the server.
Optionally use `to_dictionary` or `to_dataframe` to choose output format.
If `to_dictionary` is True, `to_dataframe` is omitted.
Args:
connection(object): MicroStrategy connection object returned
by 'connection.Connection()'
name: exact names of the dossiers to list
to_dictionary(bool, optional): if True, return Dossiers as
list of dicts
to_dataframe(bool, optional): if True, return Dossiers as
pandas DataFrame
limit: limit the number of elements returned. If `None` (default), all
objects are returned.
**filters: Available filter parameters: ['name', 'id', 'type',
'subtype', 'date_created', 'date_modified', 'version', 'acg',
'owner', 'ext_type', 'view_media', 'certified_info', 'project_id']
Returns:
List of documents.
"""
project_id_before = connection.project_id
env = Environment(connection)
projects = env.list_projects()
output = []
for project in projects:
connection.select_project(project_id=project.id)
output.extend(
Dossier._list_all(connection, to_dictionary=to_dictionary, name=name, limit=limit,
to_dataframe=to_dataframe, **filters))
output = list(set(output))
connection.select_project(project_id=project_id_before)
return output
class Dossier(Document):
@classmethod
def _list_all(cls, connection: Connection, name: Optional[str] = None,
to_dictionary: bool = False, to_dataframe: bool = False,
limit: Optional[int] = None,
**filters) -> Union[List["Dossier"], List[dict], DataFrame]:
msg = "Error retrieving documents from the environment."
if to_dictionary and to_dataframe:
helper.exception_handler(
"Please select either to_dictionary=True or to_dataframe=True, but not both.",
ValueError)
objects = helper.fetch_objects_async(connection, api=documents.get_dossiers,
async_api=documents.get_dossiers_async,
dict_unpack_value='result', limit=limit,
chunk_size=1000, error_msg=msg, filters=filters,
search_term=name)
if to_dictionary:
return objects
elif to_dataframe:
return | DataFrame(objects) | pandas.DataFrame |
from collections import deque
import numpy as np
import pandas as pd
from sunpy.util import SunpyUserWarning
__all__ = ['ELO']
class ELO:
"""
Recreating the ELO rating algirithm for Sunspotter.
"""
def __init__(self, score_board: pd.DataFrame, *, k_value=32, default_score=1400,
max_comparisons=50, max_score_change=32, min_score_change=16, score_memory=10,
delimiter=';', column_map={"player 0": "image_id_0",
"player 1": "image_id_1",
"score for player 0": "image0_more_complex_image1"}):
"""
Parameters
----------
score_board : pandas.DataFrame
DataFrame holding the scores of individual matches.
k_value : int, optional
Initial K Value to be used for calculating new ratings, by default 32
default_score : int, optional
Initial rating, by default 1400
max_comparisons : int, optional
Max comparisions for any player, by default 50
max_score_change : int, optional
Upper limit on K Value updation, by default 32
min_score_change : int, optional
Lower limit on K Value updation, by default 16
score_memory : int, optional
Number of previous scores to consider while calculating
standard deviation and new K value, by default 10
column_map : dict, optional
Dictionary, for mapping the column names of the score_board dataframe
to variable names used in the ELO ranking system.
by default {"player 0": "image_id_0",
"player 1": "image_id_1",
"score for player 0": "image0_more_complex_image1"}
"""
self.score_board = score_board
self.k_value = k_value
self.default_score = default_score
self.score_change = {'min': min_score_change, 'max': max_score_change}
self.max_comparisions = max_comparisons
self.score_memory = score_memory
self.column_map = column_map
if not set(self.column_map.values()).issubset(self.score_board.columns):
missing_columns = set(self.column_map.values()) - set(self.column_map.values()).intersection(self.score_board.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("The following columns mentioned in the column map"
f" are not present in the score board: {missing_columns}")
self._create_ranking()
def _create_ranking(self):
"""
Prepares the Ranking DataFrame.
"""
image_ids = set(self.score_board[self.column_map['player 0']]).union(self.score_board[self.column_map['player 1']])
self.rankings = pd.DataFrame(image_ids, columns=['player id'])
self.rankings.set_axis(self.rankings['player id'], inplace=True)
self.rankings['score'] = self.default_score
self.rankings['k value'] = self.k_value
self.rankings['count'] = 0
self.rankings['std dev'] = self.score_change['max']
self.rankings['last scores'] = str(self.default_score)
def expected_score(self, score_image_0, score_image_1):
"""
Given two AR scores, calculates expected probability of `image_0` being more complex.
Parameters
----------
score_image_0 : int
Score for first image
score_image_1 : int
Score for second image
Returns
-------
expected_0_score : float
Expected probability of `image_0` being more complex.
"""
expected_0_score = 1.0 / (1.0 + 10 ** ((score_image_1 - score_image_0) / 400.00))
return expected_0_score
def new_rating(self, rating_for_image, k_value, score_for_image, image_expected_score):
"""
Calculates new rating based on the ELO algorithm.
Parameters
----------
rating_for_image : float
Current Rating for the image
k_value : float
Current k_value for the image
score_for_image : int
Actual result of classification of the image in a pairwise match.
`0` denotes less complex, `1` denotes more complex
image_expected_score : float
Expected result of classification of image in a pairwise match
based on current rating of the image.
Returns
-------
new_image_rating : float
New rating of image after the classification match.
"""
new_image_rating = rating_for_image + k_value * (score_for_image - image_expected_score)
return new_image_rating
def score_update(self, image_0, image_1, score_for_image_0):
"""
Updates the ratings of the two images based on the complexity classification.
Parameters
----------
image_0 : int
Image id for first image
image_1 : int
Image id for second image
score_for_image_0 : int
Actual result of classification of the image 0 in a pairwise match.
`0` denotes less complex, `1` denotes more complex
Notes
-----
To make updates in the original rankings DataFrame, for each classification,
two state dictionaries need to be maintained, corresponfing to the two AR images.
The changes are made to these state dictionaries and then the ranking DataFrame is updated.
"""
# state dicts
state_dict_0 = self.rankings.loc[image_0].to_dict()
state_dict_0['last scores'] = deque(map(float, state_dict_0['last scores'].split(',')), maxlen=self.score_memory)
state_dict_1 = self.rankings.loc[image_1].to_dict()
state_dict_1['last scores'] = deque(map(float, state_dict_1['last scores'].split(',')), maxlen=self.score_memory)
expected_score_0 = self.expected_score(self.rankings.loc[image_0]['score'],
self.rankings.loc[image_1]['score'])
expected_score_1 = 1 - expected_score_0
_update_state_dict(state_dict_0, image_0, expected_score_0, score_for_image_0)
_update_state_dict(state_dict_1, image_1, expected_score_1, 1 - score_for_image_0)
# Making the Update DataFrames
update_df = | pd.DataFrame([state_dict_0, state_dict_1]) | pandas.DataFrame |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
:return: 特征矩阵 pandas.DataFrame
"""
id = startid # 序列号
feature_set = pd.DataFrame() # 特征矩阵
# 单独的特征向量
labels = pd.Series()
emotion_vector = pd.Series()
songname_vector = pd.Series()
tempo_vector = pd.Series()
total_beats = pd.Series()
average_beats = pd.Series()
chroma_stft_mean = pd.Series()
# chroma_stft_std = pd.Series()
chroma_stft_var = pd.Series()
# chroma_cq_mean = pd.Series()
# chroma_cq_std = pd.Series()
# chroma_cq_var = pd.Series()
# chroma_cens_mean = pd.Series()
# chroma_cens_std = pd.Series()
# chroma_cens_var = pd.Series()
mel_mean = pd.Series()
# mel_std = pd.Series()
mel_var = pd.Series()
mfcc_mean = pd.Series()
# mfcc_std = pd.Series()
mfcc_var = pd.Series()
mfcc_delta_mean = pd.Series()
# mfcc_delta_std = pd.Series()
mfcc_delta_var = pd.Series()
rmse_mean = pd.Series()
# rmse_std = pd.Series()
rmse_var = pd.Series()
cent_mean = pd.Series()
# cent_std = pd.Series()
cent_var = pd.Series()
spec_bw_mean = | pd.Series() | pandas.Series |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 22:10:18 2019
@author: shashank
Takes a list of entries into a tournament you are running as:
ams.csv
amd.csv
awd.csv
axd.csv
bms.csv
etc.
and uses the rankings from the ranking.csv file to rank every entry in each
bracket against each other. Outputs plots and a CSV with the data to
sandbagging/--.csv where -- is the name of the bracket (eg. MD, WD, etc.)
The outputs of this code are the end goal to help tournament directors remove
players who are sandbagging--we can see if any players are outliers in skill in
their bracket.
"""
import pandas as pd
from scraper import Alias
from ranking import Ranking
from ranking import num_there
import numpy as np
import math
import matplotlib.pyplot as plt
import csv
def get_skills(leaderboard,event,height,exclude_errors=False):
"""
if exlcude_errors is False:
this includes all players. Even if a player's partner isn't signed up,
it will assume the players' partner's skill is average with the starting STD DEV
if exclude_errors is True:
this will exclude players whose partners are not signed up yet. It will include
teams in which one or both players aren't in the standings, and assume default
skill and STD DEV
if exclude_errors is all:
this will exclude teams in which either partner is not in the standings, or who
haven't yet signed up
if exclude_errors is assume partner:
this assumes that if a player is not in the standings, that player
has the same skill level as his partner, but with the default STD DEV
"""
aliases = Alias()
aliases.read_csv()
event_players = []
for index,row in event.iterrows():
if (not math.isnan(row['Number'])) and (math.isnan(event.iloc[index+1]['Number'])):
event_players.append([event.iloc[index]['player'],event.iloc[index+1]['player']])
CS_players = []
CS_values = []
not_in_rankings = []
for value in event_players:
if (not pd.isna(value[0])):
try:
player1_id = aliases.get_default_id(value[0])
player1_name = aliases.get_default_name(player1_id)
except IndexError:
player1_name=value[0]
not_in_rankings.append(value[0])
else:
player1_name="None"
if (not pd.isna(value[1])):
try:
player2_id = aliases.get_default_id(value[1])
player2_name = aliases.get_default_name(player2_id)
except IndexError:
player2_name=value[1]
not_in_rankings.append(value[1])
else:
player2_name="None"
player1 = np.where(leaderboard == player1_name)[0]
player2 = np.where(leaderboard == player2_name)[0]
if len(player1) == 1:
player1_name = leaderboard.iloc[player1[0]][0]
player1_average = leaderboard.iloc[player1[0]]['Average']
player1_std = leaderboard.iloc[player1[0]]['95% CI']
else:
player1_average = 25
player1_std = 8.333*2
if len(player2) == 1:
player2_name = leaderboard.iloc[player2[0]][0]
player2_average = leaderboard.iloc[player2[0]]['Average']
player2_std = leaderboard.iloc[player2[0]]['95% CI']
else:
player2_average = 25
player2_std = 8.333*2
if exclude_errors:
if player1_name!='None' and player2_name!='None':
CS_players.append([player1_name,player2_name])
CS_values.append(player1_average-player1_std+player2_average-player2_std)
elif exclude_errors=='all':
if (not player1_name in not_in_rankings) and (not player2_name in not_in_rankings):
CS_players.append([player1_name,player2_name])
CS_values.append(player1_average-player1_std+player2_average-player2_std)
elif exclude_errors=='assume partner':
if (player1_name in not_in_rankings) and (not player2_name in not_in_rankings):
CS_players.append([player1_name,player2_name])
CS_values.append(player2_average-player1_std+player2_average-player2_std)
elif (not player1_name in not_in_rankings) and (player2_name in not_in_rankings):
CS_players.append([player1_name,player2_name])
CS_values.append(player1_average-player1_std+player1_average-player2_std)
elif (not player1_name in not_in_rankings) and (not player2_name in not_in_rankings):
CS_players.append([player1_name,player2_name])
CS_values.append(player1_average-player1_std+player2_average-player2_std)
else:
CS_players.append([player1_name,player2_name])
CS_values.append(player1_average-player1_std+player2_average-player2_std)
ones = []
for i in range(len(CS_values)):
ones.append(height)
CS_skill, CS_players = zip(*sorted(zip(CS_values, CS_players)))
return CS_skill,CS_players,ones,not_in_rankings
def get_skills_singles(leaderboard,event,height,exclude_errors=False):
"""
if exlcude_errors is False:
this includes all players. Even if a player's partner isn't signed up,
it will assume the players' partner's skill is average with the starting STD DEV
if exclude_errors is True:
this will exclude players whose partners are not signed up yet. It will include
teams in which one or both players aren't in the standings, and assume default
skill and STD DEV
if exclude_errors is all:
this will exclude teams in which either partner is not in the standings, or who
haven't yet signed up
if exclude_errors is assume partner:
this assumes that if a player is not in the standings, that player
has the same skill level as his/her partner, but with the default STD DEV
"""
aliases = Alias()
aliases.read_csv()
event_players = []
for index,row in event.iterrows():
event_players.append(event.iloc[index]['player'])
CS_players = []
CS_values = []
not_in_rankings = []
for value in event_players:
try:
player_id = aliases.get_default_id(value)
player_name = aliases.get_default_name(player_id)
except IndexError:
player_name=value
not_in_rankings.append(value)
player = np.where(leaderboard == player_name)[0]
if len(player) == 1:
player_name = leaderboard.iloc[player[0]][0]
player_average = leaderboard.iloc[player[0]]['Average']
player_std = leaderboard.iloc[player[0]]['95% CI']
else:
player_average = 25
player_std = 8.333*2
if exclude_errors:
if not (player_name in not_in_rankings):
CS_players.append(player_name)
CS_values.append(player_average-player_std)
else:
CS_players.append(player_name)
CS_values.append(player_average-player_std)
ones = []
for i in range(len(CS_values)):
ones.append(height)
CS_skill, CS_players = zip(*sorted(zip(CS_values, CS_players)))
return CS_skill,CS_players,ones,not_in_rankings
leaderboard = pd.read_csv("ranking.csv")
amd = | pd.read_csv("sandbagging/amd.csv",names=['Number','player']) | pandas.read_csv |
from difflib import SequenceMatcher
import functools
from typing import Optional
import pandas
__doc__ = """Get specialty codes and consolidate data from different sources in basic_data."""
COLUMNS = ['first_name', 'last_name', 'city', 'postal_code', 'state', 'specialty_code']
GENERIC_OPHTHALMOLOGY_CODE = '207W00000X'
GENERIC_ENDOCRINOLOGY_CODE = '207RE0101X'
def _convert_zip9_to_zip5(z: Optional[str]) -> Optional[str]:
if pandas.isnull(z):
return
return z.split('-')[0].zfill(5)
def clean_asoprs(in_df: pandas.DataFrame) -> pandas.DataFrame:
out_df: pandas.DataFrame = in_df.loc[:, ['Full Name_firstName', 'Full Name_lastName']]
all_address_columns = {col for col in in_df.columns if 'address' in col.lower()}
all_address_subfields = {col.split('_')[-1] for col in all_address_columns}
def get_first_address_subfield(row: pandas.Series):
out_dict = {i: None for i in all_address_subfields}
for col in set(row.index).intersection(all_address_columns):
subfield = col.split('_')[-1]
if not | pandas.isnull(value := row[col]) | pandas.isnull |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import re
import warnings
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import os
import platform
from .condition_fun import *
from .info_value import *
# converting vector (breaks & special_values) to dataframe
def split_vec_todf(vec):
'''
Create a dataframe based on provided vector.
Split the rows that including '%,%' into multiple rows.
Replace 'missing' by np.nan.
Params
------
vec: list
Returns
------
pandas.DataFrame
returns a dataframe with three columns
{'bin_chr':orginal vec, 'rowid':index of vec, 'value':splited vec}
'''
if vec is not None:
vec = [str(i) for i in vec]
a = pd.DataFrame({'bin_chr':vec}).assign(rowid=lambda x:x.index)
b = pd.DataFrame([i.split('%,%') for i in vec], index=vec)\
.stack().replace('missing', np.nan) \
.reset_index(name='value')\
.rename(columns={'level_0':'bin_chr'})[['bin_chr','value']]
# return
return pd.merge(a,b,on='bin_chr')
def add_missing_spl_val(dtm, breaks, spl_val):
'''
add missing to spl_val if there is nan in dtm.value and
missing is not specified in breaks and spl_val
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
list
returns spl_val list
'''
if dtm.value.isnull().any():
if breaks is None:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in str(i)) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
elif any([('missing' in str(i)) for i in breaks]):
spl_val=spl_val
else:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in i) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
# return
return spl_val
# count number of good or bad in y
def n0(x): return sum(x==0)
def n1(x): return sum(x==1)
# split dtm into bin_sv and dtm (without speical_values)
def dtm_binning_sv(dtm, breaks, spl_val):
'''
Split the orginal dtm (melt dataframe) into
binning_sv (binning of special_values) and
a new dtm (without special_values).
Params
------
dtm: melt dataframe
spl_val: speical values list
Returns
------
list
returns a list with binning_sv and dtm
'''
spl_val = add_missing_spl_val(dtm, breaks, spl_val)
if spl_val is not None:
# special_values from vector to dataframe
sv_df = split_vec_todf(spl_val)
# value
if is_numeric_dtype(dtm['value']):
sv_df['value'] = sv_df['value'].astype(dtm['value'].dtypes)
# sv_df['bin_chr'] = sv_df['bin_chr'].astype(dtm['value'].dtypes).astype(str)
sv_df['bin_chr'] = np.where(
np.isnan(sv_df['value']), sv_df['bin_chr'],
sv_df['value'].astype(dtm['value'].dtypes).astype(str))
# sv_df = sv_df.assign(value = lambda x: x.value.astype(dtm['value'].dtypes))
# dtm_sv & dtm
dtm_sv = pd.merge(dtm.fillna("missing"), sv_df[['value']].fillna("missing"), how='inner', on='value', right_index=True)
dtm = dtm[~dtm.index.isin(dtm_sv.index)].reset_index() if len(dtm_sv.index) < len(dtm.index) else None
# dtm_sv = dtm.query('value in {}'.format(sv_df['value'].tolist()))
# dtm = dtm.query('value not in {}'.format(sv_df['value'].tolist()))
if dtm_sv.shape[0] == 0:
return {'binning_sv':None, 'dtm':dtm}
# binning_sv
binning_sv = pd.merge(
dtm_sv.fillna('missing').groupby(['variable','value'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'}),
sv_df.fillna('missing'),
on='value'
).groupby(['variable', 'rowid', 'bin_chr']).agg({'bad':sum,'good':sum})\
.reset_index().rename(columns={'bin_chr':'bin'})\
.drop('rowid', axis=1)
else:
binning_sv = None
# return
return {'binning_sv':binning_sv, 'dtm':dtm}
# check empty bins for unmeric variable
def check_empty_bins(dtm, binning):
# check empty bins
bin_list = np.unique(dtm.bin.astype(str)).tolist()
if 'nan' in bin_list:
bin_list.remove('nan')
binleft = set([re.match(r'\[(.+),(.+)\)', i).group(1) for i in bin_list]).difference(set(['-inf', 'inf']))
binright = set([re.match(r'\[(.+),(.+)\)', i).group(2) for i in bin_list]).difference(set(['-inf', 'inf']))
if binleft != binright:
bstbrks = sorted(list(map(float, ['-inf'] + list(binright) + ['inf'])))
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# warnings.warn("The break points are modified into '[{}]'. There are empty bins based on the provided break points.".format(','.join(binright)))
# binning
# dtm['bin'] = dtm['bin'].astype(str)
# return
return binning
# required in woebin2 # return binning if breaks provided
#' @import data.table
def woebin2_breaks(dtm, breaks, spl_val):
'''
get binning if breaks is provided
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
DataFrame
returns a binning datafram
'''
# breaks from vector to dataframe
bk_df = split_vec_todf(breaks)
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'binning':None}
# binning
if is_numeric_dtype(dtm['value']):
# best breaks
bstbrks = ['-inf'] + list(set(bk_df.value.tolist()).difference(set([np.nan, '-inf', 'inf', 'Inf', '-Inf']))) + ['inf']
bstbrks = sorted(list(map(float, bstbrks)))
# cut
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
dtm['bin'] = dtm['bin'].astype(str)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
binning = check_empty_bins(dtm, binning)
# sort bin
binning = pd.merge(
binning.assign(value=lambda x: [float(re.search(r"^\[(.*),(.*)\)", i).group(2)) if i != 'nan' else np.nan for i in binning['bin']] ),
bk_df.assign(value=lambda x: x.value.astype(float)),
how='left',on='value'
).sort_values(by="rowid").reset_index(drop=True)
# merge binning and bk_df if nan isin value
if bk_df['value'].isnull().any():
binning = binning.assign(bin=lambda x: [i if i != 'nan' else 'missing' for i in x['bin']])\
.fillna('missing').groupby(['variable','rowid'])\
.agg({'bin':lambda x: '%,%'.join(x), 'good':sum, 'bad':sum})\
.reset_index()
else:
# merge binning with bk_df
binning = pd.merge(
dtm,
bk_df.assign(bin=lambda x: x.bin_chr),
how='left', on='value'
).fillna('missing').groupby(['variable', 'rowid', 'bin'])['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.reset_index().drop('rowid', axis=1)
# return
return {'binning_sv':binning_sv, 'binning':binning}
# required in woebin2_init_bin # return pretty breakpoints
def pretty(low, high, n):
'''
pretty breakpoints, the same as pretty function in R
Params
------
low: minimal value
low: maximal value
n: number of intervals
Returns
------
numpy.ndarray
returns a breakpoints array
'''
# nicenumber
def nicenumber(x):
exp = np.trunc(np.log10(abs(x)))
f = abs(x) / 10**exp
if f < 1.5:
nf = 1.
elif f < 3.:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
return np.sign(x) * nf * 10.**exp
# pretty breakpoints
d = abs(nicenumber((high-low)/(n-1)))
miny = np.floor(low / d) * d
maxy = np.ceil (high / d) * d
return np.arange(miny, maxy+0.5*d, d)
# required in woebin2 # return initial binning
def woebin2_init_bin(dtm, init_count_distr, breaks, spl_val):
'''
initial binning
Params
------
dtm: melt dataframe
init_count_distr: the minimal precentage in the fine binning process
breaks: breaks
breaks: breaks list
spl_val: speical values list
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'initial_binning':None}
# binning
if is_numeric_dtype(dtm['value']): # numeric variable
xvalue = dtm['value'].astype(float)
# breaks vector & outlier
iq = xvalue.quantile([0.25, 0.5, 0.75])
iqr = iq[0.75] - iq[0.25]
xvalue_rm_outlier = xvalue if iqr == 0 else xvalue[(xvalue >= iq[0.25]-3*iqr) & (xvalue <= iq[0.75]+3*iqr)]
# number of initial binning
n = np.trunc(1/init_count_distr)
len_uniq_x = len(np.unique(xvalue_rm_outlier))
if len_uniq_x < n: n = len_uniq_x
# initial breaks
brk = np.unique(xvalue_rm_outlier) if len_uniq_x < 10 else pretty(min(xvalue_rm_outlier), max(xvalue_rm_outlier), n)
brk = list(filter(lambda x: x>np.nanmin(xvalue) and x<np.nanmax(xvalue), brk))
brk = [float('-inf')] + sorted(brk) + [float('inf')]
# initial binning datatable
# cut
labels = ['[{},{})'.format(brk[i], brk[i+1]) for i in range(len(brk)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], brk, right=False, labels=labels)#.astype(str)
# init_bin
init_bin = dtm.groupby('bin')['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
init_bin = check_empty_bins(dtm, init_bin)
init_bin = init_bin.assign(
variable = dtm['variable'].values[0],
brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']],
badprob = lambda x: x['bad']/(x['bad']+x['good'])
)[['variable', 'bin', 'brkp', 'good', 'bad', 'badprob']]
else: # other type variable
# initial binning datatable
init_bin = dtm.groupby('value')['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.assign(
variable = dtm['variable'].values[0],
badprob = lambda x: x['bad']/(x['bad']+x['good'])
).reset_index()
# order by badprob if is.character
if dtm.value.dtype.name not in ['category', 'bool']:
init_bin = init_bin.sort_values(by='badprob').reset_index()
# add index as brkp column
init_bin = init_bin.assign(brkp = lambda x: x.index)\
[['variable', 'value', 'brkp', 'good', 'bad', 'badprob']]\
.rename(columns={'value':'bin'})
# remove brkp that good == 0 or bad == 0 ------
while len(init_bin.query('(good==0) or (bad==0)')) > 0:
# brkp needs to be removed if good==0 or bad==0
rm_brkp = init_bin.assign(count = lambda x: x['good']+x['bad'])\
.assign(
count_lag = lambda x: x['count'].shift(1).fillna(len(dtm)+1),
count_lead = lambda x: x['count'].shift(-1).fillna(len(dtm)+1)
).assign(merge_tolead = lambda x: x['count_lag'] > x['count_lead'])\
.query('(good==0) or (bad==0)')\
.query('count == count.min()').iloc[0,]
# set brkp to lead's or lag's
shift_period = -1 if rm_brkp['merge_tolead'] else 1
init_bin = init_bin.assign(brkp2 = lambda x: x['brkp'].shift(shift_period))\
.assign(brkp = lambda x:np.where(x['brkp'] == rm_brkp['brkp'], x['brkp2'], x['brkp']))
# groupby brkp
init_bin = init_bin.groupby('brkp').agg({
'variable':lambda x: np.unique(x),
'bin': lambda x: '%,%'.join(x),
'good': sum,
'bad': sum
}).assign(badprob = lambda x: x['bad']/(x['good']+x['bad']))\
.reset_index()
# format init_bin
if is_numeric_dtype(dtm['value']):
init_bin = init_bin\
.assign(bin = lambda x: [re.sub(r'(?<=,).+%,%.+,', '', i) if ('%,%' in i) else i for i in x['bin']])\
.assign(brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']])
# return
return {'binning_sv':binning_sv, 'initial_binning':init_bin}
# required in woebin2_tree # add 1 best break for tree-like binning
def woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks=None):
'''
add a breakpoint into provided bestbreaks
Params
------
dtm
initial_binning
count_distr_limit
bestbreaks
Returns
------
DataFrame
a binning dataframe with updated breaks
'''
# dtm removed values in spl_val
# total_iv for all best breaks
def total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows):
# best breaks set
breaks_set = set(initial_binning.brkp).difference(set(list(map(float, ['-inf', 'inf']))))
if bestbreaks is not None: breaks_set = breaks_set.difference(set(bestbreaks))
breaks_set = sorted(breaks_set)
# loop on breaks_set
init_bin_all_breaks = initial_binning.copy(deep=True)
for i in breaks_set:
# best break + i
bestbreaks_i = [float('-inf')]+sorted(bestbreaks+[i] if bestbreaks is not None else [i])+[float('inf')]
# best break datatable
labels = ['[{},{})'.format(bestbreaks_i[i], bestbreaks_i[i+1]) for i in range(len(bestbreaks_i)-1)]
init_bin_all_breaks.loc[:,'bstbin'+str(i)] = pd.cut(init_bin_all_breaks['brkp'], bestbreaks_i, right=False, labels=labels)#.astype(str)
# best break dt
total_iv_all_brks = pd.melt(
init_bin_all_breaks, id_vars=["variable", "good", "bad"], var_name='bstbin',
value_vars=['bstbin'+str(i) for i in breaks_set])\
.groupby(['variable', 'bstbin', 'value'])\
.agg({'good':sum, 'bad':sum}).reset_index()\
.assign(count=lambda x: x['good']+x['bad'])
total_iv_all_brks['count_distr'] = total_iv_all_brks.groupby(['variable', 'bstbin'])\
['count'].apply(lambda x: x/dtm_rows).reset_index(drop=True)
total_iv_all_brks['min_count_distr'] = total_iv_all_brks.groupby(['variable', 'bstbin'])\
['count_distr'].transform(lambda x: min(x))
total_iv_all_brks = total_iv_all_brks\
.assign(bstbin = lambda x: [float(re.sub('^bstbin', '', i)) for i in x['bstbin']] )\
.groupby(['variable','bstbin','min_count_distr'])\
.apply(lambda x: iv_01(x['good'], x['bad'])).reset_index(name='total_iv')
# return
return total_iv_all_brks
# binning add 1best break
def binning_add_1bst(initial_binning, bestbreaks):
if bestbreaks is None:
bestbreaks_inf = [float('-inf'),float('inf')]
else:
bestbreaks_inf = [float('-inf')]+sorted(bestbreaks)+[float('inf')]
labels = ['[{},{})'.format(bestbreaks_inf[i], bestbreaks_inf[i+1]) for i in range(len(bestbreaks_inf)-1)]
binning_1bst_brk = initial_binning.assign(
bstbin = lambda x: pd.cut(x['brkp'], bestbreaks_inf, right=False, labels=labels)
)
if is_numeric_dtype(dtm['value']):
binning_1bst_brk = binning_1bst_brk.groupby(['variable', 'bstbin'])\
.agg({'good':sum, 'bad':sum}).reset_index().assign(bin=lambda x: x['bstbin'])\
[['bstbin', 'variable', 'bin', 'good', 'bad']]
else:
binning_1bst_brk = binning_1bst_brk.groupby(['variable', 'bstbin'])\
.agg({'good':sum, 'bad':sum, 'bin':lambda x:'%,%'.join(x)}).reset_index()\
[['bstbin', 'variable', 'bin', 'good', 'bad']]
# format
binning_1bst_brk['total_iv'] = iv_01(binning_1bst_brk.good, binning_1bst_brk.bad)
binning_1bst_brk['bstbrkp'] = [float(re.match("^\[(.*),.+", i).group(1)) for i in binning_1bst_brk['bstbin']]
# return
return binning_1bst_brk
# dtm_rows
dtm_rows = len(dtm.index)
# total_iv for all best breaks
total_iv_all_brks = total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows)
# bestbreaks: total_iv == max(total_iv) & min(count_distr) >= count_distr_limit
bstbrk_maxiv = total_iv_all_brks.loc[lambda x: x['min_count_distr'] >= count_distr_limit]
if len(bstbrk_maxiv.index) > 0:
bstbrk_maxiv = bstbrk_maxiv.loc[lambda x: x['total_iv']==max(x['total_iv'])]
bstbrk_maxiv = bstbrk_maxiv['bstbin'].tolist()[0]
else:
bstbrk_maxiv = None
# bestbreaks
if bstbrk_maxiv is not None:
# add 1best break to bestbreaks
bestbreaks = bestbreaks+[bstbrk_maxiv] if bestbreaks is not None else [bstbrk_maxiv]
# binning add 1best break
bin_add_1bst = binning_add_1bst(initial_binning, bestbreaks)
# return
return bin_add_1bst
# required in woebin2 # return tree-like binning
def woebin2_tree(dtm, init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, breaks=None, spl_val=None):
'''
binning using tree-like method
Params
------
dtm:
init_count_distr:
count_distr_limit:
stop_limit:
bin_num_limit:
breaks:
spl_val:
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list['initial_binning']
binning_sv = bin_list['binning_sv']
if len(initial_binning.index)==1:
return {'binning_sv':binning_sv, 'binning':initial_binning}
# initialize parameters
len_brks = len(initial_binning.index)
bestbreaks = None
IVt1 = IVt2 = 1e-10
IVchg = 1 ## IV gain ratio
step_num = 1
# best breaks from three to n+1 bins
binning_tree = None
while (IVchg >= stop_limit) and (step_num+1 <= min([bin_num_limit, len_brks])):
binning_tree = woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks)
# best breaks
bestbreaks = binning_tree.loc[lambda x: x['bstbrkp'] != float('-inf'), 'bstbrkp'].tolist()
# information value
IVt2 = binning_tree['total_iv'].tolist()[0]
IVchg = IVt2/IVt1-1 ## ratio gain
IVt1 = IVt2
# step_num
step_num = step_num + 1
if binning_tree is None: binning_tree = initial_binning
# return
return {'binning_sv':binning_sv, 'binning':binning_tree}
# examples
# import time
# start = time.time()
# # binning_dict = woebin2_init_bin(dtm, init_count_distr=0.02, breaks=None, spl_val=None)
# # woebin2_tree_add_1brkp(dtm, binning_dict['initial_binning'], count_distr_limit=0.05)
# # woebin2_tree(dtm, binning_dict['initial_binning'], count_distr_limit=0.05)
# end = time.time()
# print(end - start)
# required in woebin2 # return chimerge binning
#' @importFrom stats qchisq
def woebin2_chimerge(dtm, init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, breaks=None, spl_val=None):
'''
binning using chimerge method
Params
------
dtm:
init_count_distr:
count_distr_limit:
stop_limit:
bin_num_limit:
breaks:
spl_val:
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# [chimerge](http://blog.csdn.net/qunxingvip/article/details/50449376)
# [ChiMerge:Discretization of numeric attributs](http://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf)
# chisq = function(a11, a12, a21, a22) {
# A = list(a1 = c(a11, a12), a2 = c(a21, a22))
# Adf = do.call(rbind, A)
#
# Edf =
# matrix(rowSums(Adf), ncol = 1) %*%
# matrix(colSums(Adf), nrow = 1) /
# sum(Adf)
#
# sum((Adf-Edf)^2/Edf)
# }
# function to create a chisq column in initial_binning
def add_chisq(initial_binning):
chisq_df = pd.melt(initial_binning,
id_vars=["brkp", "variable", "bin"], value_vars=["good", "bad"],
var_name='goodbad', value_name='a')\
.sort_values(by=['goodbad', 'brkp']).reset_index(drop=True)
###
chisq_df['a_lag'] = chisq_df.groupby('goodbad')['a'].apply(lambda x: x.shift(1))#.reset_index(drop=True)
chisq_df['a_rowsum'] = chisq_df.groupby('brkp')['a'].transform(lambda x: sum(x))#.reset_index(drop=True)
chisq_df['a_lag_rowsum'] = chisq_df.groupby('brkp')['a_lag'].transform(lambda x: sum(x))#.reset_index(drop=True)
###
chisq_df = pd.merge(
chisq_df.assign(a_colsum = lambda df: df.a+df.a_lag),
chisq_df.groupby('brkp').apply(lambda df: sum(df.a+df.a_lag)).reset_index(name='a_sum'))\
.assign(
e = lambda df: df.a_rowsum*df.a_colsum/df.a_sum,
e_lag = lambda df: df.a_lag_rowsum*df.a_colsum/df.a_sum
).assign(
ae = lambda df: (df.a-df.e)**2/df.e + (df.a_lag-df.e_lag)**2/df.e_lag
).groupby('brkp').apply(lambda x: sum(x.ae)).reset_index(name='chisq')
# return
return pd.merge(initial_binning.assign(count = lambda x: x['good']+x['bad']), chisq_df, how='left')
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list['initial_binning']
binning_sv = bin_list['binning_sv']
# dtm_rows
dtm_rows = len(dtm.index)
# chisq limit
from scipy.special import chdtri
chisq_limit = chdtri(1, stop_limit)
# binning with chisq column
binning_chisq = add_chisq(initial_binning)
# param
bin_chisq_min = binning_chisq.chisq.min()
bin_count_distr_min = min(binning_chisq['count']/dtm_rows)
bin_nrow = len(binning_chisq.index)
# remove brkp if chisq < chisq_limit
while bin_chisq_min < chisq_limit or bin_count_distr_min < count_distr_limit or bin_nrow > bin_num_limit:
# brkp needs to be removed
if bin_chisq_min < chisq_limit:
rm_brkp = binning_chisq.assign(merge_tolead = False).sort_values(by=['chisq', 'count']).iloc[0,]
elif bin_count_distr_min < count_distr_limit:
rm_brkp = binning_chisq.assign(
count_distr = lambda x: x['count']/sum(x['count']),
chisq_lead = lambda x: x['chisq'].shift(-1).fillna(float('inf'))
).assign(merge_tolead = lambda x: x['chisq'] > x['chisq_lead'])
# replace merge_tolead as True
rm_brkp.loc[np.isnan(rm_brkp['chisq']), 'merge_tolead']=True
# order select 1st
rm_brkp = rm_brkp.sort_values(by=['count_distr']).iloc[0,]
elif bin_nrow > bin_num_limit:
rm_brkp = binning_chisq.assign(merge_tolead = False).sort_values(by=['chisq', 'count']).iloc[0,]
# set brkp to lead's or lag's
shift_period = -1 if rm_brkp['merge_tolead'] else 1
binning_chisq = binning_chisq.assign(brkp2 = lambda x: x['brkp'].shift(shift_period))\
.assign(brkp = lambda x:np.where(x['brkp'] == rm_brkp['brkp'], x['brkp2'], x['brkp']))
# groupby brkp
binning_chisq = binning_chisq.groupby('brkp').agg({
'variable':lambda x:np.unique(x),
'bin': lambda x: '%,%'.join(x),
'good': sum,
'bad': sum
}).assign(badprob = lambda x: x['bad']/(x['good']+x['bad']))\
.reset_index()
# update
## add chisq to new binning dataframe
binning_chisq = add_chisq(binning_chisq)
## param
bin_chisq_min = binning_chisq.chisq.min()
bin_count_distr_min = min(binning_chisq['count']/dtm_rows)
bin_nrow = len(binning_chisq.index)
# format init_bin # remove (.+\\)%,%\\[.+,)
if is_numeric_dtype(dtm['value']):
binning_chisq = binning_chisq\
.assign(bin = lambda x: [re.sub(r'(?<=,).+%,%.+,', '', i) if ('%,%' in i) else i for i in x['bin']])\
.assign(brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']])
# return
return {'binning_sv':binning_sv, 'binning':binning_chisq}
# required in woebin2 # # format binning output
def binning_format(binning):
'''
format binning dataframe
Params
------
binning: with columns of variable, bin, good, bad
Returns
------
DataFrame
binning dataframe with columns of 'variable', 'bin',
'count', 'count_distr', 'good', 'bad', 'badprob', 'woe',
'bin_iv', 'total_iv', 'breaks', 'is_special_values'
'''
binning['count'] = binning['good'] + binning['bad']
binning['count_distr'] = binning['count']/sum(binning['count'])
binning['badprob'] = binning['bad']/binning['count']
# binning = binning.assign(
# count = lambda x: (x['good']+x['bad']),
# count_distr = lambda x: (x['good']+x['bad'])/sum(x['good']+x['bad']),
# badprob = lambda x: x['bad']/(x['good']+x['bad']))
# new columns: woe, iv, breaks, is_sv
binning['woe'] = woe_01(binning['good'],binning['bad'])
binning['bin_iv'] = miv_01(binning['good'],binning['bad'])
binning['total_iv'] = binning['bin_iv'].sum()
# breaks
binning['breaks'] = binning['bin']
if any([r'[' in str(i) for i in binning['bin']]):
def re_extract_all(x):
gp23 = re.match(r"^\[(.*), *(.*)\)((%,%missing)*)", x)
breaks_string = x if gp23 is None else gp23.group(2)+gp23.group(3)
return breaks_string
binning['breaks'] = [re_extract_all(i) for i in binning['bin']]
# is_sv
binning['is_special_values'] = binning['is_sv']
# return
return binning[['variable', 'bin', 'count', 'count_distr', 'good', 'bad', 'badprob', 'woe', 'bin_iv', 'total_iv', 'breaks', 'is_special_values']]
# woebin2
# This function provides woe binning for only two columns (one x and one y) dataframe.
def woebin2(dtm, breaks=None, spl_val=None,
init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, method="tree"):
'''
provides woe binning for only two series
Params
------
Returns
------
DataFrame
'''
# binning
if breaks is not None:
# 1.return binning if breaks provided
bin_list = woebin2_breaks(dtm=dtm, breaks=breaks, spl_val=spl_val)
else:
if stop_limit == 'N':
# binning of initial & specialvalues
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
else:
if method == 'tree':
# 2.tree-like optimal binning
bin_list = woebin2_tree(
dtm, init_count_distr=init_count_distr, count_distr_limit=count_distr_limit,
stop_limit=stop_limit, bin_num_limit=bin_num_limit, breaks=breaks, spl_val=spl_val)
elif method == "chimerge":
# 2.chimerge optimal binning
bin_list = woebin2_chimerge(
dtm, init_count_distr=init_count_distr, count_distr_limit=count_distr_limit,
stop_limit=stop_limit, bin_num_limit=bin_num_limit, breaks=breaks, spl_val=spl_val)
# rbind binning_sv and binning
binning = pd.concat(bin_list, keys=bin_list.keys()).reset_index()\
.assign(is_sv = lambda x: x.level_0 =='binning_sv')
# return
return binning_format(binning)
def bins_to_breaks(bins, dt, to_string=False, save_string=None):
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_all = bins['variable'].unique()
# dtypes of variables
vars_class = pd.DataFrame({
'variable': xs_all,
'not_numeric': [not is_numeric_dtype(dt[i]) for i in xs_all]
})
# breakslist of bins
bins_breakslist = bins[~bins['breaks'].isin(["-inf","inf","missing"]) & ~bins['is_special_values']]
bins_breakslist = | pd.merge(bins_breakslist[['variable', 'breaks']], vars_class, how='left', on='variable') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 13:51:22 2020
@author: adiallo
<NAME>
Pans Project 2020
"""
import os #operating system import to get paths
import matplotlib.pyplot as plt #to plot graphs
import pandas as pd #to read/write data
#Reading the dataset
base_path = os.getcwd()
listOfFolders = os.listdir(os.getcwd())
folder = 'Iris-data'
path = ''
try:
if folder in listOfFolders:
path = base_path + folder
print('found')
except:
print('Create folder with Iris-data and put iris.csv in it.')
# Read data from file 'filename.csv'
data = | pd.read_csv(path + 'iris.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
bootstrapping based on event rate on one animal
num_bs_replicates=50000 takes too much time, I did 1000 instead.
Results with narrowest CIs: (space: 60%,75%,90% exceedance, 40-90 state threshold )
CASE: SpikerateCoact
animal min of exceedance & state threshold combination winner exceedance winner state_threshold
H1 0p9 70
H2 0p9 90
H3 0p75 90
H4 0p9 90
H5 0p9 90
H6 0p9 90
H7 0p9 90
H8 0p9 90
H9 0p9 90
H10 0p9 90
H11 0p9 90
N1 0p9 60
N2 0p9 90
N3 0p9 80
N4 0p9 90
N5 0p9 90
N6 0p9 90
CASE: SpikestdCoact
animal min of exceedance & state threshold combination winner exceedance winner state_threshold
H1 0p9 90
H2 0p9 90
H3 0p9 90
H4 0p9 90
H5 0p9 60
H6 0p9 90
H7 0p9 90
H8 0p9 90
H9 0p9 90
H10 0p9 90
H11 0p9 90
N1 0p9 90
N2 0p9 90
N3 0p9 90
N4 0p9 90
N5 0p9 90
N6 0p9 90
"""
# In[ ]:
from string import ascii_letters
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
# In[ ]: fns
def getResampledStats(time, stats):
resampled_index = np.random.choice(np.arange(len(time)), len(time))
print(resampled_index)
return np.array(time)[resampled_index], np.array(stats)[resampled_index]
def getEventRate(denom, stats,threshold):
#time - resampled time
#stats - resample stats with same index as resampled time
state_array = np.zeros(len(stats))
for i in range(len(stats)):
if stats[i] > threshold:
state_array[i] = 1
transition_timestamp = []
for i in range(len(state_array) - 1):
if (state_array[i] - state_array[i + 1]) == -1:
transition_timestamp.append([i + 1])
return len(transition_timestamp) / (denom)
def draw_bs_replicates(denom,time,stats,size):
"""creates a bootstrap sample, computes replicates and returns replicates array"""
# Create an empty array to store replicates
bs_replicates = np.empty(size)
# Create bootstrap replicates as much as size
for i in range(size):
# Create a bootstrap sample
#bs_sample = np.random.choice(data,size=len(data))
_, bb = getResampledStats(time, stats)
rate = getEventRate(denom,bb,threshold)
# Get bootstrap replicate and append to bs_replicates
bs_replicates[i] = rate
return bs_replicates
# In[ ]: EXAMPLE ONE ANIMAL
df = pd.read_csv('../HeartFailureAnimals/H10/SpikerateCoact_output_1min_20minbuff_0p6/coactivity_stats.csv')
time = df['time']
stats = df['coactivity_stat']
endbaseline_1844 = 19430.61330
threshold = 50
num_bs_replicates=100 #Change to 50000
#time before end of baseline
index = time < endbaseline_1844
time = time[index]
stats = stats[index]
#convert to lists
time = time.tolist()
stats = stats.tolist()
if len(stats) > 0:
# Draw N bootstrap replicates
denom = time[-1] - time[0] #hard coded
bs_replicates_values = draw_bs_replicates(denom,time, stats, num_bs_replicates)
# Print empirical mean
#print("Empirical mean: " + str(np.mean(values)))
# Print the mean of bootstrap replicates
#print("Bootstrap replicates mean: " + str(np.mean(bs_replicates_values)))
########################### COMMENT IF PLOTTING STATES ######################################################
# Plot the PDF for bootstrap replicates as histogram & save fig
plt.hist(bs_replicates_values,bins=30)
lower=5
upper=95
# Showing the related percentiles
plt.axvline(x=np.percentile(bs_replicates_values,[lower]), ymin=0, ymax=1,label='5th percentile',c='y')
plt.axvline(x=np.percentile(bs_replicates_values,[upper]), ymin=0, ymax=1,label='95th percentile',c='r')
plt.xlabel("Event rate")
plt.ylabel("Probability Density Function")
#plt.title("pig" + current_animal + " SpikerateCoact_output_1min_20minbuff_0p6" +" Th: " + str(threshold))
plt.legend()
#str_PDF_savefig_pdf= "pig" + str(current_animal) + "_PDF_SpikerateCoact_output_1min_20minbuff_0p6" + "_Threshold" + str(threshold) + "_bootstrap" + str(num_bs_replicates) + "_baseline.pdf"
#plt.savefig(str_PDF_savefig_pdf)
plt.show()
# Get the corresponding values of 5th and 95th CI
CI_BS = np.percentile(bs_replicates_values,[lower,upper])
CI_width = np.diff(CI_BS)
# Print stuff
print("event rate replicates: ",bs_replicates_values)
print("event rate replicates mean: ",np.mean(bs_replicates_values))
print("event rate replicates std: ",np.std(bs_replicates_values))
print("The confidence interval: ",CI_BS)
print("CI width: ",CI_width)
else:
print("has no transition timestamps for threshold = " + str(threshold))
# In[ ]:calculation params
#End of Baseline timestamps
EndBaseline_HF = [15157.47730, 13782.64500, 14479.24235, 15010.85545, 20138.13390, 14126.76400, 22447.50400, 19488.27205, 19001.37350, 16823.12835, 19430.61330]
EndBaseline_Normal = [18081.77015, 14387.14405, 17091.46195, 21465.20400, 28360.64150, 22006.09015]
#End of Baseline linewidth
lw_EndBaseline = 3
# In[ ]: Data: HF Animals
HF_path = '../HeartFailureAnimals/'
filenames = os.listdir(HF_path)
filenames = [f for f in filenames if (f.startswith("H"))]
print(filenames)
# ['H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10', 'H11']
# In[ ]: SpikerateCoact_output_1min_20minbuff_0p6 : EACH In[] AFTER THIS ONE IS REPEAT
threshold = 10
animals = list()
coactivity_stats_filepaths = list()
state_timestamp_HF = []
bsstats_all = list()
split_char_animal="H"
num_bs_replicates=1000 #Change to 50000
# fig, ax_HF = plt.subplots(figsize = (22,12), nrows = len(filenames), ncols = 1)
# str_HF_state_title= "States for HF animals from SpikerateCoact_output_1min_20minbuff_0p6, threshold = " + str(threshold)
# fig.suptitle(str_HF_state_title, fontsize=16)
count = 0
for filename in filenames:
current_path = os.path.join(HF_path, filename)
current_path_SpikerateCoact_output_1min_20minbuff_0p6 = os.path.join(current_path, 'SpikerateCoact_output_1min_20minbuff_0p6').replace("\\","/")
current_animal = filename.split(split_char_animal)[1]
animals.append(filename)
for root, dirs, files in os.walk(current_path_SpikerateCoact_output_1min_20minbuff_0p6):
#print(files)
for name in files:
if name.startswith(("coactivity_stats.csv")):
coactivity_stats_filepath = os.path.join(current_path_SpikerateCoact_output_1min_20minbuff_0p6, name).replace("\\","/") ## FOR WINDOWS BACKSLASH
coactivity_stats_filepaths.append(coactivity_stats_filepath)
str_current = "current path = " + coactivity_stats_filepath
print(str_current)
df = pd.read_csv(coactivity_stats_filepath)
time = df['time']
stats = df['coactivity_stat']
#limit time before end of baseline
index = time < EndBaseline_HF[count]
time = time[index]
stats = stats[index]
#convert to lists
time = time.tolist()
stats = stats.tolist()
########################### UNCOMMENT TO PLOT STATES (COMMENT THE FIGURES BELOW FIRST) ######################################################
# fill figure
# ax_HF[count].plot(time/3600, state_array ,'--', color = 'orchid', alpha=0.8)
# ax_HF[count].set_xticks(np.array(transition_timestamp)/3600)
# ax_HF[count].set_xlim(time[0]/3600,EndBaseline_HF[count]/3600) #limiting to baseline data only
# #ax_HF[count].axvline(x=EndBaseline_HF[count]/3600, color = 'black', linewidth = lw_EndBaseline) #full exp, mark end of baseline for each
# ax_HF[count].tick_params(axis="x", labelsize=3)
# ax_HF[count].set_yticks([0,1])
# ax_HF[count].spines["top"].set_visible(False)
# ax_HF[count].spines["right"].set_visible(False)
# ax_HF[count].spines["bottom"].set_visible(False)
# ax_HF[count].set_ylabel((''.join(filter(lambda i: i.isdigit(), current_animal))), fontsize=12)
count = count + 1
########################## BOOTSTRAP (not sure what to bootstrap, was event rate definition len(events)/duration?) #################################
#values = np.diff(np.array(transition_timestamp)) #data to bootstrap
if len(stats) > 0:
# Draw N bootstrap replicates
denom = time[-1] - time[0] #hard coded
bs_replicates_values = draw_bs_replicates(denom,time, stats, num_bs_replicates)
########################### COMMENT IF PLOTTING STATES ######################################################
# Plot the PDF for bootstrap replicates as histogram & save fig
plt.hist(bs_replicates_values,bins=30)
lower=5
upper=95
# Showing the related percentiles
plt.axvline(x=np.percentile(bs_replicates_values,[lower]), ymin=0, ymax=1,label='5th percentile',c='y')
plt.axvline(x=np.percentile(bs_replicates_values,[upper]), ymin=0, ymax=1,label='95th percentile',c='r')
plt.xlabel("Event rate")
plt.ylabel("Probability Density Function")
plt.title("pig" + current_animal + " SpikerateCoact_output_1min_20minbuff_0p6" +" Th: " + str(threshold))
plt.legend()
str_PDF_savefig_pdf= "pig" + str(current_animal) + "_PDF_SpikerateCoact_output_1min_20minbuff_0p6" + "_Thr" + str(threshold) + "_BS" + str(num_bs_replicates) + "EvRate_base.pdf"
plt.savefig(str_PDF_savefig_pdf)
plt.show()
# Get the bootstrapped stats
bs_mean = np.mean(bs_replicates_values)
bs_std = np.std(bs_replicates_values)
ci = np.percentile(bs_replicates_values,[lower,upper])
ci_width = np.diff(ci)
# Print stuff
#print("event rate replicates: ",bs_replicates_values)
print("pig" + str(current_animal)+ " bootstrapped mean: ",bs_mean)
print( "pig" + str(current_animal) + " bootstrapped std: ",bs_std)
print("pig" + str(current_animal) + " bootstrapped ci: ",ci)
print("pig" + str(current_animal) + " bootstrapped ci width: ",ci_width)
bsstats_concat = np.concatenate((np.array([bs_mean]),np.array([bs_std]),ci,ci_width))
bsstats_all.append(bsstats_concat)
else:
print(current_animal + "has no transition timestamps for threshold = " + str(threshold))
bsstats_concat = [999,999,999,999,999]
bsstats_all.append(bsstats_concat)
df_bsstats = pd.DataFrame(bsstats_all)
#rename columns
df_bsstats.rename(columns = {0 :'mean', 1 :'std', 2 :'lower', 3 :'upper', 4 :'ci_width'}, inplace = True)
df_bsstats['state_threshold'] = threshold
df_bsstats['exceedance'] = "SpikerateCoact_output_1min_20minbuff_0p6"
df_bsstats['animal'] = filenames
#reindex column titles
column_titles = ['animal', 'mean', 'std', 'lower','upper','ci_width','state_threshold','exceedance']
df_bsstats=df_bsstats.reindex(columns=column_titles)
# add details to title
str_csv = "bsstats_HF_SpikerateCoact_output_1min_20minbuff_0p6" + "_Thr" + str(threshold) + "_BS" + str(num_bs_replicates) + "EvRate_base.csv"
# save csv
df_bsstats.to_csv(str_csv, index=False)
# In[ ]: SpikerateCoact_output_1min_20minbuff_0p75
#threshold = 40
animals = list()
coactivity_stats_filepaths = list()
state_timestamp_HF = []
bsstats_all = list()
split_char_animal="pig"
num_bs_replicates=1000 #Change to 50000
# fig, ax_HF = plt.subplots(figsize = (22,12), nrows = len(filenames), ncols = 1)
# str_HF_state_title= "States for HF animals from SpikerateCoact_output_1min_20minbuff_0p75, threshold = " + str(threshold)
# fig.suptitle(str_HF_state_title, fontsize=16)
count = 0
for filename in filenames:
current_path = os.path.join(HF_path, filename)
current_path_SpikerateCoact_output_1min_20minbuff_0p75 = os.path.join(current_path, 'SpikerateCoact_output_1min_20minbuff_0p75').replace("\\","/")
current_animal = filename.split(split_char_animal)[1]
animals.append(filename)
for root, dirs, files in os.walk(current_path_SpikerateCoact_output_1min_20minbuff_0p75):
#print(files)
for name in files:
if name.startswith(("coactivity_stats.csv")):
coactivity_stats_filepath = os.path.join(current_path_SpikerateCoact_output_1min_20minbuff_0p75, name).replace("\\","/") ## FOR WINDOWS BACKSLASH
coactivity_stats_filepaths.append(coactivity_stats_filepath)
str_current = "current path = " + coactivity_stats_filepath
print(str_current)
df = pd.read_csv(coactivity_stats_filepath)
time = df['time']
stats = df['coactivity_stat']
#limit time before end of baseline
index = time < EndBaseline_HF[count]
time = time[index]
stats = stats[index]
#convert to lists
time = time.tolist()
stats = stats.tolist()
########################### UNCOMMENT TO PLOT STATES (COMMENT THE FIGURES BELOW FIRST) ######################################################
# fill figure
# ax_HF[count].plot(time/3600, state_array ,'--', color = 'orchid', alpha=0.8)
# ax_HF[count].set_xticks(np.array(transition_timestamp)/3600)
# ax_HF[count].set_xlim(time[0]/3600,EndBaseline_HF[count]/3600) #limiting to baseline data only
# #ax_HF[count].axvline(x=EndBaseline_HF[count]/3600, color = 'black', linewidth = lw_EndBaseline) #full exp, mark end of baseline for each
# ax_HF[count].tick_params(axis="x", labelsize=3)
# ax_HF[count].set_yticks([0,1])
# ax_HF[count].spines["top"].set_visible(False)
# ax_HF[count].spines["right"].set_visible(False)
# ax_HF[count].spines["bottom"].set_visible(False)
# ax_HF[count].set_ylabel((''.join(filter(lambda i: i.isdigit(), current_animal))), fontsize=12)
count = count + 1
########################## BOOTSTRAP (not sure what to bootstrap, was event rate definition len(events)/duration?) #################################
#values = np.diff(np.array(transition_timestamp)) #data to bootstrap
if len(stats) > 0:
# Draw N bootstrap replicates
denom = time[-1] - time[0] #hard coded
bs_replicates_values = draw_bs_replicates(denom,time, stats, num_bs_replicates)
########################### COMMENT IF PLOTTING STATES ######################################################
# Plot the PDF for bootstrap replicates as histogram & save fig
plt.hist(bs_replicates_values,bins=30)
lower=5
upper=95
# Showing the related percentiles
plt.axvline(x=np.percentile(bs_replicates_values,[lower]), ymin=0, ymax=1,label='5th percentile',c='y')
plt.axvline(x=np.percentile(bs_replicates_values,[upper]), ymin=0, ymax=1,label='95th percentile',c='r')
plt.xlabel("Event rate")
plt.ylabel("Probability Density Function")
plt.title("pig" + current_animal + " SpikerateCoact_output_1min_20minbuff_0p75" +" Th: " + str(threshold))
plt.legend()
str_PDF_savefig_pdf= "pig" + str(current_animal) + "_PDF_SpikerateCoact_output_1min_20minbuff_0p75" + "_Thr" + str(threshold) + "_BS" + str(num_bs_replicates) + "EvRate_base.pdf"
plt.savefig(str_PDF_savefig_pdf)
plt.show()
# Get the bootstrapped stats
bs_mean = np.mean(bs_replicates_values)
bs_std = np.std(bs_replicates_values)
ci = np.percentile(bs_replicates_values,[lower,upper])
ci_width = np.diff(ci)
# Print stuff
#print("event rate replicates: ",bs_replicates_values)
print("pig" + str(current_animal)+ " bootstrapped mean: ",bs_mean)
print( "pig" + str(current_animal) + " bootstrapped std: ",bs_std)
print("pig" + str(current_animal) + " bootstrapped ci: ",ci)
print("pig" + str(current_animal) + " bootstrapped ci width: ",ci_width)
bsstats_concat = np.concatenate((np.array([bs_mean]),np.array([bs_std]),ci,ci_width))
bsstats_all.append(bsstats_concat)
else:
print(current_animal + "has no transition timestamps for threshold = " + str(threshold))
bsstats_concat = [999,999,999,999,999]
bsstats_all.append(bsstats_concat)
df_bsstats = pd.DataFrame(bsstats_all)
#rename columns
df_bsstats.rename(columns = {0 :'mean', 1 :'std', 2 :'lower', 3 :'upper', 4 :'ci_width'}, inplace = True)
df_bsstats['state_threshold'] = threshold
df_bsstats['exceedance'] = "SpikerateCoact_output_1min_20minbuff_0p75"
df_bsstats['animal'] = filenames
#reindex column titles
column_titles = ['animal', 'mean', 'std', 'lower','upper','ci_width','state_threshold','exceedance']
df_bsstats=df_bsstats.reindex(columns=column_titles)
# add details to title
str_csv = "bsstats_HF_SpikerateCoact_output_1min_20minbuff_0p75" + "_Thr" + str(threshold) + "_BS" + str(num_bs_replicates) + "EvRate_base.csv"
# save csv
df_bsstats.to_csv(str_csv, index=False)
# In[ ]: SpikerateCoact_output_1min_20minbuff_0p9
#threshold = 50
animals = list()
coactivity_stats_filepaths = list()
state_timestamp_HF = []
bsstats_all = list()
split_char_animal="pig"
num_bs_replicates=1000 #Change to 50000
# fig, ax_HF = plt.subplots(figsize = (22,12), nrows = len(filenames), ncols = 1)
# str_HF_state_title= "States for HF animals from SpikerateCoact_output_1min_20minbuff_0p9, threshold = " + str(threshold)
# fig.suptitle(str_HF_state_title, fontsize=16)
count = 0
for filename in filenames:
current_path = os.path.join(HF_path, filename)
current_path_SpikerateCoact_output_1min_20minbuff_0p9 = os.path.join(current_path, 'SpikerateCoact_output_1min_20minbuff_0p9').replace("\\","/")
current_animal = filename.split(split_char_animal)[1]
animals.append(filename)
for root, dirs, files in os.walk(current_path_SpikerateCoact_output_1min_20minbuff_0p9):
#print(files)
for name in files:
if name.startswith(("coactivity_stats.csv")):
coactivity_stats_filepath = os.path.join(current_path_SpikerateCoact_output_1min_20minbuff_0p9, name).replace("\\","/") ## FOR WINDOWS BACKSLASH
coactivity_stats_filepaths.append(coactivity_stats_filepath)
str_current = "current path = " + coactivity_stats_filepath
print(str_current)
df = pd.read_csv(coactivity_stats_filepath)
time = df['time']
stats = df['coactivity_stat']
#limit time before end of baseline
index = time < EndBaseline_HF[count]
time = time[index]
stats = stats[index]
#convert to lists
time = time.tolist()
stats = stats.tolist()
########################### UNCOMMENT TO PLOT STATES (COMMENT THE FIGURES BELOW FIRST) ######################################################
# fill figure
# ax_HF[count].plot(time/3600, state_array ,'--', color = 'orchid', alpha=0.8)
# ax_HF[count].set_xticks(np.array(transition_timestamp)/3600)
# ax_HF[count].set_xlim(time[0]/3600,EndBaseline_HF[count]/3600) #limiting to baseline data only
# #ax_HF[count].axvline(x=EndBaseline_HF[count]/3600, color = 'black', linewidth = lw_EndBaseline) #full exp, mark end of baseline for each
# ax_HF[count].tick_params(axis="x", labelsize=3)
# ax_HF[count].set_yticks([0,1])
# ax_HF[count].spines["top"].set_visible(False)
# ax_HF[count].spines["right"].set_visible(False)
# ax_HF[count].spines["bottom"].set_visible(False)
# ax_HF[count].set_ylabel((''.join(filter(lambda i: i.isdigit(), current_animal))), fontsize=12)
count = count + 1
########################## BOOTSTRAP (not sure what to bootstrap, was event rate definition len(events)/duration?) #################################
#values = np.diff(np.array(transition_timestamp)) #data to bootstrap
if len(stats) > 0:
# Draw N bootstrap replicates
denom = time[-1] - time[0] #hard coded
bs_replicates_values = draw_bs_replicates(denom,time, stats, num_bs_replicates)
########################### COMMENT IF PLOTTING STATES ######################################################
# Plot the PDF for bootstrap replicates as histogram & save fig
plt.hist(bs_replicates_values,bins=30)
lower=5
upper=95
# Showing the related percentiles
plt.axvline(x=np.percentile(bs_replicates_values,[lower]), ymin=0, ymax=1,label='5th percentile',c='y')
plt.axvline(x=np.percentile(bs_replicates_values,[upper]), ymin=0, ymax=1,label='95th percentile',c='r')
plt.xlabel("Event rate")
plt.ylabel("Probability Density Function")
plt.title("pig" + current_animal + " SpikerateCoact_output_1min_20minbuff_0p9" +" Th: " + str(threshold))
plt.legend()
str_PDF_savefig_pdf= "pig" + str(current_animal) + "_PDF_SpikerateCoact_output_1min_20minbuff_0p9" + "_Thr" + str(threshold) + "_BS" + str(num_bs_replicates) + "EvRate_base.pdf"
plt.savefig(str_PDF_savefig_pdf)
plt.show()
# Get the bootstrapped stats
bs_mean = np.mean(bs_replicates_values)
bs_std = np.std(bs_replicates_values)
ci = np.percentile(bs_replicates_values,[lower,upper])
ci_width = np.diff(ci)
# Print stuff
#print("event rate replicates: ",bs_replicates_values)
print("pig" + str(current_animal)+ " bootstrapped mean: ",bs_mean)
print( "pig" + str(current_animal) + " bootstrapped std: ",bs_std)
print("pig" + str(current_animal) + " bootstrapped ci: ",ci)
print("pig" + str(current_animal) + " bootstrapped ci width: ",ci_width)
bsstats_concat = np.concatenate((np.array([bs_mean]),np.array([bs_std]),ci,ci_width))
bsstats_all.append(bsstats_concat)
else:
print(current_animal + "has no transition timestamps for threshold = " + str(threshold))
bsstats_concat = [999,999,999,999,999]
bsstats_all.append(bsstats_concat)
df_bsstats = | pd.DataFrame(bsstats_all) | pandas.DataFrame |
"""
Generate figures for the DeepCytometer paper for v8 of the pipeline.
Environment: cytometer_tensorflow_v2.
We repeat the phenotyping from klf14_b6ntac_exp_0110_paper_figures_v8.py, but change the stratification of the data so
that we have Control (PATs + WT MATs) vs. Het MATs.
The comparisons we do are:
* Control vs. MAT WT
* MAT WT vs. MAT Het
This script partly deprecates klf14_b6ntac_exp_0099_paper_figures_v7.py:
* Figures have been updated to have v8 of the pipeline in the paper.
This script partly deprecates klf14_b6ntac_exp_0110_paper_figures_v8.py:
* We repeat the phenotyping, but change the stratification of the data so that we have Control (PATs + WT MATs) vs.
Het MATs.
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0111_paper_figures'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
DEBUG = False
SAVE_FIGS = False
# post-processing parameters
min_area = 203 / 2 # (pix^2) smaller objects are rejected
max_area = 44879 * 3 # (pix^2) larger objects are rejected
xres_ref = 0.4538234626730202
yres_ref = 0.4537822752643282
min_area_um2 = min_area * xres_ref * yres_ref
max_area_um2 = max_area * xres_ref * yres_ref
# json_annotation_files_dict here needs to have the same files as in
# klf14_b6ntac_exp_0098_full_slide_size_analysis_v7.py
# SQWAT: list of annotation files
json_annotation_files_dict = {}
json_annotation_files_dict['sqwat'] = [
'KLF14-B6NTAC 36.1d PAT 99-16 C1 - 2016-02-11 11.48.31.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46.json',
'KLF14-B6NTAC-MAT-17.1a 44-16 C1 - 2016-02-01 11.14.17.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 C1 - 2016-02-01 16.27.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 C1 - 2016-02-03 09.10.17.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 C1 - 2016-03-15 17.15.41.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 C1 - 2016-02-02 14.32.03.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 C1 - 2016-02-04 10.24.22.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 C1 - 2016-02-04 16.15.05.json',
'KLF14-B6NTAC 37.1a PAT 106-16 C1 - 2016-02-12 16.21.00.json',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06.json',
# 'KLF14-B6NTAC-PAT-37.2d 411-16 C1 - 2016-03-15 12.42.26.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 C1 - 2016-02-04 09.17.52.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 C1 - 2016-02-18 10.28.27.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 C1 - 2016-02-01 15.25.53.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 C1 - 2016-02-18 09.19.26.json',
'KLF14-B6NTAC 36.1g PAT 102-16 C1 - 2016-02-11 17.20.14.json',
'KLF14-B6NTAC-37.1g PAT 112-16 C1 - 2016-02-16 13.33.09.json',
'KLF14-B6NTAC-38.1e PAT 94-16 C1 - 2016-02-10 12.13.10.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 C1 - 2016-02-03 15.46.15.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 C1 - 2016-02-02 09.59.16.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 C1 - 2016-02-18 17.03.38.json',
'KLF14-B6NTAC-MAT-18.1f 55-16 C1 - 2016-02-02 16.14.30.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 C1 - 2016-03-15 14.37.55.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 C1 - 2016-02-17 14.51.18.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32.json',
'KLF14-B6NTAC 36.1e PAT 100-16 C1 - 2016-02-11 14.06.56.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 C1 - 2016-02-02 12.26.58.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52.json',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 C1 - 2016-03-17 14.33.38.json',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 C1 - 2016-02-03 14.19.35.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 C1 - 2016-02-25 15.13.00.json',
'KLF14-B6NTAC-PAT-37.2a 406-16 C1 - 2016-03-14 12.01.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 C1 - 2016-02-12 14.33.33.json',
'KLF14-B6NTAC-37.1b PAT 107-16 C1 - 2016-02-15 11.43.31.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 C1 - 2016-02-18 11.48.16.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 C1 - 2016-02-04 12.34.32.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 C1 - 2016-02-18 13.12.09.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 C1 - 2016-03-15 15.54.12.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 C1 - 2016-02-02 17.23.31.json',
'KLF14-B6NTAC-37.1h PAT 113-16 C1 - 2016-02-16 15.14.09.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52.json',
'KLF14-B6NTAC-37.1e PAT 110-16 C1 - 2016-02-15 17.33.11.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54.json',
'KLF14-B6NTAC 36.1h PAT 103-16 C1 - 2016-02-12 10.15.22.json',
# 'KLF14-B6NTAC-PAT-39.1h 453-16 C1 - 2016-03-17 11.38.04.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 C1 - 2016-02-17 12.49.00.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 C1 - 2016-02-01 17.51.46.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 C1 - 2016-02-17 11.46.42.json',
'KLF14-B6NTAC-38.1f PAT 95-16 C1 - 2016-02-10 14.41.44.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 C1 - 2016-03-15 10.18.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 C1 - 2016-02-18 15.41.38.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 C1 - 2016-02-01 12.23.50.json',
'KLF14-B6NTAC 36.1f PAT 101-16 C1 - 2016-02-11 15.23.06.json',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33.json',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 C1 - 2016-02-03 11.56.52.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 C1 - 2016-03-14 10.58.34.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 C1 - 2016-03-14 16.23.30.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 C1 - 2016-02-25 14.00.14.json',
# 'KLF14-B6NTAC-PAT-37.2c 407-16 C1 - 2016-03-14 14.13.54.json',
# 'KLF14-B6NTAC-PAT-37.2b 410-16 C1 - 2016-03-15 11.24.20.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 C1 - 2016-03-17 10.22.54.json',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41.json',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 C1 - 2016-03-16 17.01.17.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52.json',
'KLF14-B6NTAC-37.1f PAT 111-16 C2 - 2016-02-16 11.26 (1).json',
'KLF14-B6NTAC-PAT 37.2b 410-16 C4 - 2020-02-14 10.27.23.json',
'KLF14-B6NTAC-PAT 37.2c 407-16 C4 - 2020-02-14 10.15.57.json',
# 'KLF14-B6NTAC-PAT 37.2d 411-16 C4 - 2020-02-14 10.34.10.json'
]
# GWAT: list of annotation files
json_annotation_files_dict['gwat'] = [
'KLF14-B6NTAC-36.1a PAT 96-16 B1 - 2016-02-10 15.32.31.json',
'KLF14-B6NTAC-36.1b PAT 97-16 B1 - 2016-02-10 17.15.16.json',
'KLF14-B6NTAC-36.1c PAT 98-16 B1 - 2016-02-10 18.32.40.json',
'KLF14-B6NTAC 36.1d PAT 99-16 B1 - 2016-02-11 11.29.55.json',
'KLF14-B6NTAC 36.1e PAT 100-16 B1 - 2016-02-11 12.51.11.json',
'KLF14-B6NTAC 36.1f PAT 101-16 B1 - 2016-02-11 14.57.03.json',
'KLF14-B6NTAC 36.1g PAT 102-16 B1 - 2016-02-11 16.12.01.json',
'KLF14-B6NTAC 36.1h PAT 103-16 B1 - 2016-02-12 09.51.08.json',
# 'KLF14-B6NTAC 36.1i PAT 104-16 B1 - 2016-02-12 11.37.56.json',
'KLF14-B6NTAC 36.1j PAT 105-16 B1 - 2016-02-12 14.08.19.json',
'KLF14-B6NTAC 37.1a PAT 106-16 B1 - 2016-02-12 15.33.02.json',
'KLF14-B6NTAC-37.1b PAT 107-16 B1 - 2016-02-15 11.25.20.json',
'KLF14-B6NTAC-37.1c PAT 108-16 B1 - 2016-02-15 12.33.10.json',
'KLF14-B6NTAC-37.1d PAT 109-16 B1 - 2016-02-15 15.03.44.json',
'KLF14-B6NTAC-37.1e PAT 110-16 B1 - 2016-02-15 16.16.06.json',
'KLF14-B6NTAC-37.1g PAT 112-16 B1 - 2016-02-16 12.02.07.json',
'KLF14-B6NTAC-37.1h PAT 113-16 B1 - 2016-02-16 14.53.02.json',
'KLF14-B6NTAC-38.1e PAT 94-16 B1 - 2016-02-10 11.35.53.json',
'KLF14-B6NTAC-38.1f PAT 95-16 B1 - 2016-02-10 14.16.55.json',
'KLF14-B6NTAC-MAT-16.2a 211-16 B1 - 2016-02-17 11.21.54.json',
'KLF14-B6NTAC-MAT-16.2b 212-16 B1 - 2016-02-17 12.33.18.json',
'KLF14-B6NTAC-MAT-16.2c 213-16 B1 - 2016-02-17 14.01.06.json',
'KLF14-B6NTAC-MAT-16.2d 214-16 B1 - 2016-02-17 15.43.57.json',
'KLF14-B6NTAC-MAT-16.2e 215-16 B1 - 2016-02-17 17.14.16.json',
'KLF14-B6NTAC-MAT-16.2f 216-16 B1 - 2016-02-18 10.05.52.json',
# 'KLF14-B6NTAC-MAT-17.1a 44-16 B1 - 2016-02-01 09.19.20.json',
'KLF14-B6NTAC-MAT-17.1b 45-16 B1 - 2016-02-01 12.05.15.json',
'KLF14-B6NTAC-MAT-17.1c 46-16 B1 - 2016-02-01 13.01.30.json',
'KLF14-B6NTAC-MAT-17.1d 47-16 B1 - 2016-02-01 15.11.42.json',
'KLF14-B6NTAC-MAT-17.1e 48-16 B1 - 2016-02-01 16.01.09.json',
'KLF14-B6NTAC-MAT-17.1f 49-16 B1 - 2016-02-01 17.12.31.json',
'KLF14-B6NTAC-MAT-17.2a 64-16 B1 - 2016-02-04 08.57.34.json',
'KLF14-B6NTAC-MAT-17.2b 65-16 B1 - 2016-02-04 10.06.00.json',
'KLF14-B6NTAC-MAT-17.2c 66-16 B1 - 2016-02-04 11.14.28.json',
'KLF14-B6NTAC-MAT-17.2d 67-16 B1 - 2016-02-04 12.20.20.json',
'KLF14-B6NTAC-MAT-17.2f 68-16 B1 - 2016-02-04 14.01.40.json',
'KLF14-B6NTAC-MAT-17.2g 69-16 B1 - 2016-02-04 15.52.52.json',
'KLF14-B6NTAC-MAT-18.1a 50-16 B1 - 2016-02-02 08.49.06.json',
'KLF14-B6NTAC-MAT-18.1b 51-16 B1 - 2016-02-02 09.46.31.json',
'KLF14-B6NTAC-MAT-18.1c 52-16 B1 - 2016-02-02 11.24.31.json',
'KLF14-B6NTAC-MAT-18.1d 53-16 B1 - 2016-02-02 14.11.37.json',
# 'KLF14-B6NTAC-MAT-18.1e 54-16 B1 - 2016-02-02 15.06.05.json',
'KLF14-B6NTAC-MAT-18.2a 57-16 B1 - 2016-02-03 08.54.27.json',
'KLF14-B6NTAC-MAT-18.2b 58-16 B1 - 2016-02-03 09.58.06.json',
'KLF14-B6NTAC-MAT-18.2c 59-16 B1 - 2016-02-03 11.41.32.json',
'KLF14-B6NTAC-MAT-18.2d 60-16 B1 - 2016-02-03 12.56.49.json',
'KLF14-B6NTAC-MAT-18.2e 61-16 B1 - 2016-02-03 14.02.25.json',
'KLF14-B6NTAC-MAT-18.2f 62-16 B1 - 2016-02-03 15.00.17.json',
'KLF14-B6NTAC-MAT-18.2g 63-16 B1 - 2016-02-03 16.40.37.json',
'KLF14-B6NTAC-MAT-18.3b 223-16 B1 - 2016-02-25 16.53.42.json',
'KLF14-B6NTAC-MAT-18.3c 218-16 B1 - 2016-02-18 12.51.46.json',
'KLF14-B6NTAC-MAT-18.3d 224-16 B1 - 2016-02-26 10.48.56.json',
'KLF14-B6NTAC-MAT-19.1a 56-16 B1 - 2016-02-02 16.57.46.json',
'KLF14-B6NTAC-MAT-19.2b 219-16 B1 - 2016-02-18 14.21.50.json',
'KLF14-B6NTAC-MAT-19.2c 220-16 B1 - 2016-02-18 16.40.48.json',
'KLF14-B6NTAC-MAT-19.2e 221-16 B1 - 2016-02-25 13.15.27.json',
'KLF14-B6NTAC-MAT-19.2f 217-16 B1 - 2016-02-18 11.23.22.json',
'KLF14-B6NTAC-MAT-19.2g 222-16 B1 - 2016-02-25 14.51.57.json',
'KLF14-B6NTAC-PAT-36.3a 409-16 B1 - 2016-03-15 09.24.54.json',
'KLF14-B6NTAC-PAT-36.3b 412-16 B1 - 2016-03-15 14.11.47.json',
'KLF14-B6NTAC-PAT-36.3d 416-16 B1 - 2016-03-16 14.22.04.json',
# 'KLF14-B6NTAC-PAT-37.2a 406-16 B1 - 2016-03-14 11.46.47.json',
'KLF14-B6NTAC-PAT-37.2b 410-16 B1 - 2016-03-15 11.12.01.json',
'KLF14-B6NTAC-PAT-37.2c 407-16 B1 - 2016-03-14 12.54.55.json',
'KLF14-B6NTAC-PAT-37.2d 411-16 B1 - 2016-03-15 12.01.13.json',
'KLF14-B6NTAC-PAT-37.2e 408-16 B1 - 2016-03-14 16.06.43.json',
'KLF14-B6NTAC-PAT-37.2f 405-16 B1 - 2016-03-14 09.49.45.json',
'KLF14-B6NTAC-PAT-37.2g 415-16 B1 - 2016-03-16 11.04.45.json',
'KLF14-B6NTAC-PAT-37.2h 418-16 B1 - 2016-03-16 16.42.16.json',
'KLF14-B6NTAC-PAT-37.3a 413-16 B1 - 2016-03-15 15.31.26.json',
'KLF14-B6NTAC-PAT-37.3c 414-16 B1 - 2016-03-15 16.49.22.json',
'KLF14-B6NTAC-PAT-37.4a 417-16 B1 - 2016-03-16 15.25.38.json',
'KLF14-B6NTAC-PAT-37.4b 419-16 B1 - 2016-03-17 09.10.42.json',
'KLF14-B6NTAC-PAT-38.1a 90-16 B1 - 2016-02-04 17.27.42.json',
'KLF14-B6NTAC-PAT-39.1h 453-16 B1 - 2016-03-17 11.15.50.json',
'KLF14-B6NTAC-PAT-39.2d 454-16 B1 - 2016-03-17 12.16.06.json'
]
########################################################################################################################
## Common code to the rest of this script:
## Import packages and auxiliary functions
## USED IN PAPER
########################################################################################################################
# import pickle
from toolz import interleave
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import scipy
import scipy.stats as stats
# import skimage
import sklearn.neighbors, sklearn.model_selection
import statsmodels.api as sm
# import statsmodels.formula.api as smf
from statsmodels.stats.multitest import multipletests
import seaborn as sns
# import openslide
import PIL
# from PIL import Image, ImageDraw
import cytometer.data
import cytometer.stats
import shapely
# directories
klf14_root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
hand_traced_dir = os.path.join(klf14_root_data_dir, 'klf14_b6ntac_training_v2')
annotations_dir = os.path.join(home, 'Data/cytometer_data/aida_data_Klf14_v8/annotations')
histo_dir = os.path.join(home, 'scan_srv2_cox/Maz Yon')
dataframe_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
paper_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper')
figures_dir = os.path.join(home, 'GoogleDrive/Research/20190727_cytometer_paper/figures')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
area2quantile_dir = os.path.join(home, 'Data/cytometer_data/deepcytometer_pipeline_v8')
saved_models_dir = os.path.join(home, 'Data/cytometer_data/deepcytometer_pipeline_v8')
DEBUG = False
method = 'corrected'
# k-folds file with hand traced filenames
saved_kfolds_filename = 'klf14_b6ntac_exp_0094_generate_extra_training_images.pickle'
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
# make sure that in the boxplots PAT comes before MAT
metainfo['sex'] = metainfo['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
metainfo['ko_parent'] = metainfo['ko_parent'].astype(
pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
metainfo['genotype'] = metainfo['genotype'].astype(
pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
metainfo['functional_ko'] = 'Control'
metainfo.loc[(metainfo['ko_parent'] == 'MAT') & (metainfo['genotype'] == 'KLF14-KO:Het'), 'functional_ko'] = 'FKO'
metainfo.loc[(metainfo['ko_parent'] == 'MAT') & (metainfo['genotype'] == 'KLF14-KO:WT'), 'functional_ko'] = 'MAT_WT'
metainfo['functional_ko'] = metainfo['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control', 'MAT_WT', 'FKO'], ordered=True))
# remove BW=NaNs
metainfo = metainfo[~np.isnan(metainfo['BW'])]
metainfo = metainfo.reset_index()
# load dataframe with cell population quantiles and histograms
dataframe_areas_filename = os.path.join(dataframe_dir, 'klf14_b6ntac_exp_0110_dataframe_areas_' + method + '.pkl')
df_all = pd.read_pickle(dataframe_areas_filename)
df_all = df_all.reset_index()
df_all['sex'] = df_all['sex'].astype(pd.api.types.CategoricalDtype(categories=['f', 'm'], ordered=True))
df_all['ko_parent'] = df_all['ko_parent'].astype(
pd.api.types.CategoricalDtype(categories=['PAT', 'MAT'], ordered=True))
df_all['genotype'] = df_all['genotype'].astype(
pd.api.types.CategoricalDtype(categories=['KLF14-KO:WT', 'KLF14-KO:Het'], ordered=True))
df_all['functional_ko'] = 'Control'
df_all.loc[(df_all['ko_parent'] == 'MAT') & (df_all['genotype'] == 'KLF14-KO:Het'), 'functional_ko'] = 'FKO'
df_all.loc[(df_all['ko_parent'] == 'MAT') & (df_all['genotype'] == 'KLF14-KO:WT'), 'functional_ko'] = 'MAT_WT'
df_all['functional_ko'] = df_all['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control', 'MAT_WT', 'FKO'], ordered=True))
# load extra info needed for the histograms
dataframe_areas_extra_filename = os.path.join(dataframe_dir, 'klf14_b6ntac_exp_0110_dataframe_areas_extra.npz')
with np.load(dataframe_areas_extra_filename) as aux:
quantiles = aux['quantiles']
area_bin_edges = aux['area_bin_edges']
area_bin_centers = aux['area_bin_centers']
# list of hand traced contours
# The list contains 126 XCF (Gimp format) files with the contours that were used for training DeepCytometer,
# plus 5 files (131 in total) with extra contours for 2 mice where the cell population was not well
# represented.
hand_file_svg_list = [
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_010512_col_006912.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_012848_col_016240.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_016812_col_017484.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_029472_col_015520.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_005348_col_019844.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_006652_col_061724.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_006900_col_071980.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_010732_col_016692.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_012828_col_018388.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_013600_col_022880.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_014768_col_022576.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_014980_col_027052.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_027388_col_018468.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_028864_col_024512.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_041392_col_026032.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_009588_col_028676.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_011680_col_013984.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_015856_col_012416.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_018720_col_031152.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_021796_col_055852.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_011852_col_071620.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_013300_col_055476.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_014320_col_007600.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_015200_col_021536.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_020256_col_002880.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_021136_col_010880.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_001292_col_004348.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_005600_col_004224.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_007216_col_008896.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_007372_col_008556.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_011904_col_005280.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_010048_col_001856.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_012172_col_049588.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_013232_col_009008.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_016068_col_007276.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_019680_col_016480.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_004124_col_012524.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_004384_col_005456.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_006040_col_005272.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_006640_col_008848.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_008532_col_009804.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_013952_col_002624.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_017044_col_031228.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_021804_col_035412.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_010716_col_008924.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_016832_col_016944.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_018784_col_010912.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_024528_col_014688.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_026108_col_068956.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_009840_col_008736.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_017792_col_017504.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_020032_col_018640.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_030820_col_022204.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_007500_col_050372.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_008000_col_003680.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_013348_col_019316.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_019168_col_019600.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_022960_col_007808.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_026132_col_012148.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_027968_col_011200.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_003584_col_017280.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_012908_col_010212.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_013984_col_012576.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_014448_col_019088.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_015200_col_015920.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_028156_col_018596.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_001920_col_014048.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_005344_col_019360.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_009236_col_018316.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_012680_col_023936.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_013256_col_007952.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_014800_col_020976.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_016756_col_063692.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_017360_col_024712.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_020824_col_018688.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_024128_col_010112.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_024836_col_055124.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_005424_col_006896.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_006268_col_013820.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_013820_col_057052.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_014272_col_008064.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_017808_col_012400.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_007296_col_010640.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_013856_col_014128.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_018380_col_063068.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_020448_col_013824.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_024076_col_020404.svg',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52_row_010128_col_013536.svg',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52_row_015776_col_010976.svg',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52_row_015984_col_026832.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_005428_col_058372.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_012404_col_054316.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_013604_col_024644.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_014628_col_069148.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_018384_col_014688.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_019340_col_017348.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_020128_col_010096.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_022000_col_015568.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_006880_col_017808.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_008212_col_015364.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_011004_col_005988.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_015004_col_010364.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_018992_col_005952.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_019556_col_057972.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_021812_col_022916.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_022208_col_018128.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_010084_col_058476.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_012208_col_007472.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_013680_col_019152.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_016260_col_058300.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_019220_col_061724.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_020048_col_028896.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_021012_col_057844.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_023236_col_011084.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_006124_col_082236.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_007436_col_019092.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_009296_col_029664.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_015872_col_019456.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_016556_col_010292.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_023100_col_009220.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_023728_col_011904.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_031860_col_033476.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_004256_col_017552.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_005424_col_010432.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_006412_col_012484.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_012144_col_007056.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_013012_col_019820.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_031172_col_025996.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_034628_col_040116.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_035948_col_041492.svg'
]
# get v2 of the hand traced contours
hand_file_svg_list = [os.path.join(hand_traced_dir, x) for x in hand_file_svg_list]
## auxiliary functions
def table_of_hand_traced_regions(file_svg_list):
"""
Open SVG files in a list, and count the number of different types of regions (Cells, Other, Background, Windows,
Windows with cells) and create a table with them for the paper
:param file_svg_list: list of filenames
:return: pd.Dataframe
"""
# init dataframe to aggregate training numbers of each mouse
table = pd.DataFrame(columns=['Cells', 'Other', 'Background', 'Windows', 'Windows with cells'])
# loop files with hand traced contours
for i, file_svg in enumerate(file_svg_list):
print('file ' + str(i) + '/' + str(len(file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
background_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Background',
add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours + background_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8),
# 1: brown cells (treated as "other" tissue)
np.zeros(shape=(len(background_contours),), dtype=np.uint8)] # 0: background
contour_type = np.concatenate(contour_type)
print('Cells: ' + str(len(cell_contours)) + '. Other: ' + str(len(other_contours))
+ '. Brown: ' + str(len(brown_contours)) + '. Background: ' + str(len(background_contours)))
# create dataframe for this image
df_common = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(file_svg),
values=[i, ], values_tag='i',
tags_to_keep=['id', 'ko_parent', 'sex'])
# mouse ID as a string
id = df_common['id'].values[0]
sex = df_common['sex'].values[0]
ko = df_common['ko_parent'].values[0]
# row to add to the table
df = pd.DataFrame(
[(sex, ko,
len(cell_contours), len(other_contours) + len(brown_contours), len(background_contours), 1,
int(len(cell_contours) > 0))],
columns=['Sex', 'Genotype', 'Cells', 'Other', 'Background', 'Windows', 'Windows with cells'], index=[id])
if id in table.index:
num_cols = ['Cells', 'Other', 'Background', 'Windows', 'Windows with cells']
table.loc[id, num_cols] = (table.loc[id, num_cols] + df.loc[id, num_cols])
else:
table = table.append(df, sort=False, ignore_index=False, verify_integrity=True)
# alphabetical order by mouse IDs
table = table.sort_index()
return table
print('PAT WT: ' + str(np.count_nonzero((metainfo['genotype'] == 'KLF14-KO:WT') & (metainfo['ko_parent'] == 'PAT'))))
print('PAT Het: ' + str(np.count_nonzero((metainfo['genotype'] == 'KLF14-KO:Het') & (metainfo['ko_parent'] == 'PAT'))))
print('MAT WT: ' + str(np.count_nonzero((metainfo['genotype'] == 'KLF14-KO:WT') & (metainfo['ko_parent'] == 'MAT'))))
print('MAT Het: ' + str(np.count_nonzero((metainfo['genotype'] == 'KLF14-KO:Het') & (metainfo['ko_parent'] == 'MAT'))))
########################################################################################################################
## Whole animal studies (cull age, body weight, depot weight)
## USED IN PAPER
########################################################################################################################
## some data preparations
print('Min cull age: ' + str(metainfo['cull_age'].min()) + ' days')
print('Max cull age: ' + str(metainfo['cull_age'].max()) + ' days')
# we need numerical instead of categorical values for logistic regression
metainfo['ko_parent_num'] = (metainfo['ko_parent'] == 'MAT').astype(np.float32)
metainfo['genotype_num'] = (metainfo['genotype'] == 'KLF14-KO:Het').astype(np.float32)
# scale cull_age to avoid large condition numbers
metainfo['cull_age__'] = (metainfo['cull_age'] - np.mean(metainfo['cull_age'])) / np.std(metainfo['cull_age'])
# for convenience create two dataframes (female and male) with the data for the current depot
metainfo_f = metainfo[metainfo['sex'] == 'f']
metainfo_m = metainfo[metainfo['sex'] == 'm']
## effect of sex on body weight
########################################################################################################################
df_all = df_all[~np.isnan(df_all['BW'])]
bw_model = sm.RLM.from_formula('BW ~ C(sex)', data=metainfo, subset=metainfo['ko_parent']=='PAT', M=sm.robust.norms.HuberT()).fit()
print(bw_model.summary())
print(bw_model.pvalues)
print('Males are ' + str(bw_model.params['C(sex)[T.m]'] / bw_model.params['Intercept'] * 100)
+ ' % larger than females')
## BW ~ functional_ko
########################################################################################################################
# BW ~ functional_ko for female/male
bw_model_f = sm.OLS.from_formula('BW ~ C(functional_ko)', data=metainfo_f).fit()
bw_model_m = sm.OLS.from_formula('BW ~ C(functional_ko)', data=metainfo_m).fit()
print(bw_model_f.summary())
print(bw_model_m.summary())
extra_tests_f = bw_model_f.t_test('Intercept + C(functional_ko)[T.MAT_WT], Intercept + C(functional_ko)[T.FKO]')
extra_tests_m = bw_model_m.t_test('Intercept + C(functional_ko)[T.MAT_WT], Intercept + C(functional_ko)[T.FKO]')
# mean BW
bwmean_control_f = np.mean(metainfo_f[metainfo_f['ko_parent'] == 'PAT']['BW'])
bwmean_matwt_f = np.mean(metainfo_f[(metainfo_f['ko_parent'] == 'MAT') & (metainfo_f['genotype'] == 'KLF14-KO:WT')]['BW'])
bwmean_fko_f = np.mean(metainfo_f[(metainfo_f['ko_parent'] == 'MAT') & (metainfo_f['genotype'] == 'KLF14-KO:Het')]['BW'])
bwmean_control_m = np.mean(metainfo_m[metainfo_m['ko_parent'] == 'PAT']['BW'])
bwmean_matwt_m = np.mean(metainfo_m[(metainfo_m['ko_parent'] == 'MAT') & (metainfo_m['genotype'] == 'KLF14-KO:WT')]['BW'])
bwmean_fko_m = np.mean(metainfo_m[(metainfo_m['ko_parent'] == 'MAT') & (metainfo_m['genotype'] == 'KLF14-KO:Het')]['BW'])
# Tukey HSD
multicomp_f = sm.stats.multicomp.MultiComparison(metainfo_f['BW'], metainfo_f['functional_ko'])
tukeyhsd_f = multicomp_f.tukeyhsd()
tukeyhsd_f = pd.DataFrame(data=tukeyhsd_f._results_table.data[1:], columns=tukeyhsd_f._results_table.data[0])
print(tukeyhsd_f)
multicomp_m = sm.stats.multicomp.MultiComparison(metainfo_m['BW'], metainfo_m['functional_ko'])
tukeyhsd_m = multicomp_m.tukeyhsd()
tukeyhsd_m = pd.DataFrame(data=tukeyhsd_m._results_table.data[1:], columns=tukeyhsd_m._results_table.data[0])
print(tukeyhsd_m)
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([5.48, 4.8 ])
ax = sns.swarmplot(x='sex', y='BW', hue='functional_ko', data=metainfo, dodge=True, palette=['C2', 'C3', 'C4'])
plt.xlabel('')
plt.ylabel('Body weight (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.xticks([0, 1], labels=['Female', 'Male'])
ax.get_legend().set_title('')
ax.legend(['Control (PAT)', 'MAT WT', 'FKO (MAT Het)'], loc='lower right', fontsize=12)
# mean values
plt.plot([-0.35, -0.15], [bwmean_control_f,]*2, 'k', linewidth=2)
plt.plot([-0.10, 0.10], [bwmean_matwt_f,]*2, 'k', linewidth=2)
plt.plot([ 0.17, 0.35], [bwmean_fko_f,]*2, 'k', linewidth=2)
plt.plot([ 0.65, 0.85], [bwmean_control_m,]*2, 'k', linewidth=2)
plt.plot([ 0.90, 1.10], [bwmean_matwt_m,]*2, 'k', linewidth=2)
plt.plot([ 1.17, 1.35], [bwmean_fko_m,]*2, 'k', linewidth=2)
# female
plt.plot([-0.3, -0.3, 0.0, 0.0], [42, 44, 44, 42], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'Control') & (tukeyhsd_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.3f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(-0.15, 44.5, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.0, 0.0, 0.3, 0.3], [47, 49, 49, 47], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'FKO') & (tukeyhsd_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.15, 49.5, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([-0.3, -0.3, 0.3, 0.3], [52, 54, 54, 52], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'Control') & (tukeyhsd_f['group2'] == 'FKO')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.0, 54.5, pval_text, ha='center', va='bottom', fontsize=14)
# male
plt.plot([1.0, 1.0, 1.3, 1.3], [47.5, 49.5, 49.5, 47.5], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'FKO') & (tukeyhsd_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.15, 50, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.0, 1.0], [52.5, 54.5, 54.5, 52.5], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'Control') & (tukeyhsd_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.85, 55, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.3, 1.3], [57.5, 59.5, 59.5, 57.5], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'Control') & (tukeyhsd_m['group2'] == 'FKO')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.00, 60, pval_text, ha='center', va='bottom', fontsize=14)
plt.ylim(18, 65)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_bw_fko.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_bw_fko.svg'))
## DW ~ functional_ko
########################################################################################################################
# mean DW Gonadal
dwmean_control_f_gwat = np.mean(metainfo_f[metainfo_f['ko_parent'] == 'PAT']['gWAT'])
dwmean_matwt_f_gwat = np.mean(metainfo_f[(metainfo_f['ko_parent'] == 'MAT') & (metainfo_f['genotype'] == 'KLF14-KO:WT')]['gWAT'])
dwmean_fko_f_gwat = np.mean(metainfo_f[(metainfo_f['ko_parent'] == 'MAT') & (metainfo_f['genotype'] == 'KLF14-KO:Het')]['gWAT'])
dwmean_control_m_gwat = np.mean(metainfo_m[metainfo_m['ko_parent'] == 'PAT']['gWAT'])
dwmean_matwt_m_gwat = np.mean(metainfo_m[(metainfo_m['ko_parent'] == 'MAT') & (metainfo_m['genotype'] == 'KLF14-KO:WT')]['gWAT'])
dwmean_fko_m_gwat = np.mean(metainfo_m[(metainfo_m['ko_parent'] == 'MAT') & (metainfo_m['genotype'] == 'KLF14-KO:Het')]['gWAT'])
# Tukey HSD for gWAT ~ functional_ko
multicomp_f = sm.stats.multicomp.MultiComparison(metainfo_f['gWAT'], metainfo_f['functional_ko'])
tukeyhsd_f = multicomp_f.tukeyhsd()
tukeyhsd_f = pd.DataFrame(data=tukeyhsd_f._results_table.data[1:], columns=tukeyhsd_f._results_table.data[0])
print(tukeyhsd_f)
multicomp_m = sm.stats.multicomp.MultiComparison(metainfo_m['gWAT'], metainfo_m['functional_ko'])
tukeyhsd_m = multicomp_m.tukeyhsd()
tukeyhsd_m = pd.DataFrame(data=tukeyhsd_m._results_table.data[1:], columns=tukeyhsd_m._results_table.data[0])
print(tukeyhsd_m)
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([5.48, 4.8 ])
ax = sns.swarmplot(x='sex', y='gWAT', hue='functional_ko', data=metainfo, dodge=True, palette=['C2', 'C3', 'C4'])
plt.xlabel('')
plt.ylabel('Gonadal depot weight (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.xticks([0, 1], labels=['Female', 'Male'])
ax.get_legend().set_title('')
ax.legend([])
# ax.legend(['Control (PAT)', 'MAT WT', 'FKO (MAT Het)'], loc='lower right', fontsize=12)
# mean values
plt.plot([-0.35, -0.15], [dwmean_control_f_gwat,]*2, 'k', linewidth=2)
plt.plot([-0.10, 0.10], [dwmean_matwt_f_gwat,]*2, 'k', linewidth=2)
plt.plot([ 0.17, 0.35], [dwmean_fko_f_gwat,]*2, 'k', linewidth=2)
plt.plot([ 0.65, 0.85], [dwmean_control_m_gwat,]*2, 'k', linewidth=2)
plt.plot([ 0.90, 1.10], [dwmean_matwt_m_gwat,]*2, 'k', linewidth=2)
plt.plot([ 1.17, 1.35], [dwmean_fko_m_gwat,]*2, 'k', linewidth=2)
# female
plt.plot([-0.3, -0.3, 0.0, 0.0], [1.45, 1.55, 1.55, 1.45], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'Control') & (tukeyhsd_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(-0.15, 1.56, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.0, 0.0, 0.3, 0.3], [1.75, 1.85, 1.85, 1.75], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'FKO') & (tukeyhsd_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.15, 1.86, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([-0.3, -0.3, 0.3, 0.3], [2.05, 2.15, 2.15, 2.05], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'Control') & (tukeyhsd_f['group2'] == 'FKO')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.0, 2.16, pval_text, ha='center', va='bottom', fontsize=14)
# male
plt.plot([1.0, 1.0, 1.3, 1.3], [1.75, 1.85, 1.85, 1.75], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'FKO') & (tukeyhsd_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.15, 1.86, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.0, 1.0], [2.05, 2.15, 2.15, 2.05], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'Control') & (tukeyhsd_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.85, 2.16, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.3, 1.3], [2.35, 2.45, 2.45, 2.35], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'Control') & (tukeyhsd_m['group2'] == 'FKO')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.00, 2.46, pval_text, ha='center', va='bottom', fontsize=14)
plt.ylim(0, 2.7)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_gwat_fko.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_gwat_fko.svg'))
# mean DW Subcut.
dwmean_control_f_sqwat = np.mean(metainfo_f[metainfo_f['ko_parent'] == 'PAT']['SC'])
dwmean_matwt_f_sqwat = np.mean(metainfo_f[(metainfo_f['ko_parent'] == 'MAT') & (metainfo_f['genotype'] == 'KLF14-KO:WT')]['SC'])
dwmean_fko_f_sqwat = np.mean(metainfo_f[(metainfo_f['ko_parent'] == 'MAT') & (metainfo_f['genotype'] == 'KLF14-KO:Het')]['SC'])
dwmean_control_m_sqwat = np.mean(metainfo_m[metainfo_m['ko_parent'] == 'PAT']['SC'])
dwmean_matwt_m_sqwat = np.mean(metainfo_m[(metainfo_m['ko_parent'] == 'MAT') & (metainfo_m['genotype'] == 'KLF14-KO:WT')]['SC'])
dwmean_fko_m_sqwat = np.mean(metainfo_m[(metainfo_m['ko_parent'] == 'MAT') & (metainfo_m['genotype'] == 'KLF14-KO:Het')]['SC'])
# Tukey HSD for SC ~ functional_ko
multicomp_f = sm.stats.multicomp.MultiComparison(metainfo_f['SC'], metainfo_f['functional_ko'])
tukeyhsd_f = multicomp_f.tukeyhsd()
tukeyhsd_f = pd.DataFrame(data=tukeyhsd_f._results_table.data[1:], columns=tukeyhsd_f._results_table.data[0])
print(tukeyhsd_f)
multicomp_m = sm.stats.multicomp.MultiComparison(metainfo_m['SC'], metainfo_m['functional_ko'])
tukeyhsd_m = multicomp_m.tukeyhsd()
tukeyhsd_m = pd.DataFrame(data=tukeyhsd_m._results_table.data[1:], columns=tukeyhsd_m._results_table.data[0])
print(tukeyhsd_m)
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([5.48, 4.8 ])
ax = sns.swarmplot(x='sex', y='SC', hue='functional_ko', data=metainfo, dodge=True, palette=['C2', 'C3', 'C4'])
plt.xlabel('')
plt.ylabel('Subcutaneous depot weight (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.xticks([0, 1], labels=['Female', 'Male'])
ax.get_legend().set_title('')
ax.legend([])
# ax.legend(['Control (PAT)', 'MAT WT', 'FKO (MAT Het)'], loc='lower right', fontsize=12)
# mean values
plt.plot([-0.35, -0.15], [dwmean_control_f_sqwat,]*2, 'k', linewidth=2)
plt.plot([-0.10, 0.10], [dwmean_matwt_f_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 0.17, 0.35], [dwmean_fko_f_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 0.65, 0.85], [dwmean_control_m_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 0.90, 1.10], [dwmean_matwt_m_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 1.17, 1.35], [dwmean_fko_m_sqwat,]*2, 'k', linewidth=2)
# female
plt.plot([0.0, 0.0, 0.3, 0.3], [1.05, 1.15, 1.15, 1.05], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'FKO') & (tukeyhsd_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.15, 1.16, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([-0.3, -0.3, 0.0, 0.0], [1.65, 1.75, 1.75, 1.65], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'Control') & (tukeyhsd_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(-0.15, 1.76, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([-0.3, -0.3, 0.3, 0.3], [1.95, 2.05, 2.05, 1.95], 'k', lw=1.5)
idx = (tukeyhsd_f['group1'] == 'Control') & (tukeyhsd_f['group2'] == 'FKO')
pval = list(tukeyhsd_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.0, 2.06, pval_text, ha='center', va='bottom', fontsize=14)
# male
plt.plot([1.0, 1.0, 1.3, 1.3], [1.3, 1.4, 1.4, 1.3], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'FKO') & (tukeyhsd_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.15, 1.41, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.0, 1.0], [1.6, 1.7, 1.7, 1.6], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'Control') & (tukeyhsd_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.85, 1.71, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.3, 1.3], [1.9, 2.0, 2.0, 1.9], 'k', lw=1.5)
idx = (tukeyhsd_m['group1'] == 'Control') & (tukeyhsd_m['group2'] == 'FKO')
pval = list(tukeyhsd_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.00, 2.01, pval_text, ha='center', va='bottom', fontsize=14)
plt.ylim(0, 2.3)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_sqwat_fko.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_sqwat_fko.svg'))
print('mean DW')
print('\tfemale gonadal Control: ' + str(dwmean_control_f_gwat))
print('\tfemale gonadal MAT WT: ' + str(dwmean_matwt_f_gwat))
print('\tfemale gonadal FKO: ' + str(dwmean_fko_f_gwat))
print('\tfemale subcut. Control: ' + str(dwmean_control_f_sqwat))
print('\tfemale subcut. MAT WT: ' + str(dwmean_matwt_f_sqwat))
print('\tfemale subcut. FKO: ' + str(dwmean_fko_f_sqwat))
print('\tmale gonadal Control: ' + str(dwmean_control_m_gwat))
print('\tmale gonadal MAT WT: ' + str(dwmean_matwt_m_gwat))
print('\tmale gonadal FKO: ' + str(dwmean_fko_m_gwat))
print('\tmale subcut. Control: ' + str(dwmean_control_m_sqwat))
print('\tmale subcut. MAT WT: ' + str(dwmean_matwt_m_sqwat))
print('\tmale subcut. FKO: ' + str(dwmean_fko_m_sqwat))
## DW ~ BW * functional_ko
########################################################################################################################
# scale BW to avoid large condition numbers
BW_mean = metainfo['BW'].mean()
metainfo['BW__'] = metainfo['BW'] / BW_mean
# auxiliary variables to create the null models for the (Control vs. MAT WT) and (MAT WT vs. FKO) comparisons
metainfo['functional_ko_a'] = metainfo['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control_MAT_WT', 'FKO'], ordered=True))
metainfo.loc[metainfo['functional_ko'] != 'FKO', 'functional_ko_a'] = 'Control_MAT_WT'
metainfo['functional_ko_b'] = metainfo['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control', 'MAT_WT_FKO'], ordered=True))
metainfo.loc[metainfo['functional_ko'] != 'Control', 'functional_ko_b'] = 'MAT_WT_FKO'
metainfo['functional_ko_c'] = metainfo['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control_FKO', 'MAT_WT'], ordered=True))
metainfo.loc[metainfo['functional_ko'] != 'MAT_WT', 'functional_ko_c'] = 'Control_FKO'
# for convenience create two dataframes (female and male) with the data for the current depot
metainfo_f = metainfo[metainfo['sex'] == 'f']
metainfo_m = metainfo[metainfo['sex'] == 'm']
## depot ~ BW * kfo models
# global models fitted to 3 strata (Control, MAT WT and FKO):
# These are the models that we are going to use to test for correlation, apart from the LRTs
model_gwat_f_global = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko)', data=metainfo_f).fit()
model_sqwat_f_global = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko)', data=metainfo_f).fit()
model_gwat_m_global = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko)', data=metainfo_m).fit()
model_sqwat_m_global = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko)', data=metainfo_m).fit()
# models fitted to 2 strata (combining Control and MAT WT) to be used as null models
model_gwat_f_control_matwt = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko_a)', data=metainfo_f).fit()
model_sqwat_f_control_matwt = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko_a)', data=metainfo_f).fit()
model_gwat_m_control_matwt = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko_a)', data=metainfo_m).fit()
model_sqwat_m_control_matwt = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko_a)', data=metainfo_m).fit()
# models fitted to 2 strata (combining MAT WT and FKO) to be used as null models
model_gwat_f_matwt_fko = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko_b)', data=metainfo_f).fit()
model_sqwat_f_matwt_fko = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko_b)', data=metainfo_f).fit()
model_gwat_m_matwt_fko = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko_b)', data=metainfo_m).fit()
model_sqwat_m_matwt_fko = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko_b)', data=metainfo_m).fit()
# models fitted to 2 strata (combining Control and FKO) to be used as null models
model_gwat_f_control_fko = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko_c)', data=metainfo_f).fit()
model_sqwat_f_control_fko = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko_c)', data=metainfo_f).fit()
model_gwat_m_control_fko = sm.OLS.from_formula('gWAT ~ BW__ * C(functional_ko_c)', data=metainfo_m).fit()
model_sqwat_m_control_fko = sm.OLS.from_formula('SC ~ BW__ * C(functional_ko_c)', data=metainfo_m).fit()
# compute LRTs and extract p-values and LRs
lrt = pd.DataFrame(columns=['lr', 'pval', 'pval_ast'])
lr, pval = cytometer.stats.lrtest(model_gwat_f_control_matwt.llf, model_gwat_f_global.llf)
lrt.loc['model_gwat_f_control_matwt', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_sqwat_f_control_matwt.llf, model_sqwat_f_global.llf)
lrt.loc['model_sqwat_f_control_matwt', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_gwat_m_control_matwt.llf, model_gwat_m_global.llf)
lrt.loc['model_gwat_m_control_matwt', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_sqwat_m_control_matwt.llf, model_sqwat_m_global.llf)
lrt.loc['model_sqwat_m_control_matwt', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_gwat_f_matwt_fko.llf, model_gwat_f_global.llf)
lrt.loc['model_gwat_f_matwt_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_sqwat_f_matwt_fko.llf, model_sqwat_f_global.llf)
lrt.loc['model_sqwat_f_matwt_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_gwat_m_matwt_fko.llf, model_gwat_m_global.llf)
lrt.loc['model_gwat_m_matwt_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_sqwat_m_matwt_fko.llf, model_sqwat_m_global.llf)
lrt.loc['model_sqwat_m_matwt_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_gwat_f_control_fko.llf, model_gwat_f_global.llf)
lrt.loc['model_gwat_f_control_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_sqwat_f_control_fko.llf, model_sqwat_f_global.llf)
lrt.loc['model_sqwat_f_control_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_gwat_m_control_fko.llf, model_gwat_m_global.llf)
lrt.loc['model_gwat_m_control_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(model_sqwat_m_control_fko.llf, model_sqwat_m_global.llf)
lrt.loc['model_sqwat_m_control_fko', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
# multitest correction using Benjamini-Krieger-Yekutieli
_, lrt['pval_adj'], _, _ = multipletests(lrt['pval'], method='fdr_tsbky', alpha=0.05, returnsorted=False)
lrt['pval_adj_ast'] = cytometer.stats.pval_to_asterisk(lrt['pval_adj'])
if SAVE_FIGS:
lrt.to_csv(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_depot_weight_models_lrt_fko.csv'), na_rep='nan')
# Likelihood ratio tests: Control vs. MAT WT
print('Likelihood Ratio Tests: Control vs. MAT WT')
print('Female')
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_gwat_f_control_matwt', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Gonadal: ' + pval_text)
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_sqwat_f_control_matwt', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Subcutaneous: ' + pval_text)
print('Male')
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_gwat_m_control_matwt', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Gonadal: ' + pval_text)
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_sqwat_m_control_matwt', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Subcutaneous: ' + pval_text)
# Likelihood ratio tests: MAT WT vs. FKO (MAT Het)
print('')
print('Likelihood Ratio Tests: MAT WT vs. FKO (MAT Het)')
print('Female')
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_gwat_f_matwt_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Gonadal: ' + pval_text)
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_sqwat_f_matwt_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Subcutaneous: ' + pval_text)
print('Male')
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_gwat_m_matwt_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Gonadal: ' + pval_text)
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_sqwat_m_matwt_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Subcutaneous: ' + pval_text)
# Likelihood ratio tests: Control vs. FKO (MAT Het)
print('')
print('Likelihood Ratio Tests: Control vs. FKO (MAT Het)')
print('Female')
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_gwat_f_control_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Gonadal: ' + pval_text)
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_sqwat_f_control_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Subcutaneous: ' + pval_text)
print('Male')
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_gwat_m_control_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Gonadal: ' + pval_text)
lr, pval, pval_ast, pval_adj, pval_adj_ast = lrt.loc['model_sqwat_m_control_fko', :]
pval_text = 'LR=' + '{0:.2f}'.format(lr) + ', p=' + '{0:.2g}'.format(pval) + ' ' + pval_ast \
+ ', p-adj=' + '{0:.2g}'.format(pval_adj) + ' ' + pval_adj_ast
print('Subcutaneous: ' + pval_text)
# extract coefficients, errors and p-values from models
model_names = ['model_gwat_f_global', 'model_sqwat_f_global', 'model_gwat_m_global', 'model_sqwat_m_global']
extra_hypotheses='Intercept+C(functional_ko)[T.MAT_WT],Intercept+C(functional_ko)[T.FKO]'\
+ ',BW__+BW__:C(functional_ko)[T.MAT_WT],BW__+BW__:C(functional_ko)[T.FKO]'
df_coeff, df_ci_lo, df_ci_hi, df_pval = \
cytometer.stats.models_coeff_ci_pval(
[model_gwat_f_global, model_sqwat_f_global, model_gwat_m_global, model_sqwat_m_global],
extra_hypotheses=extra_hypotheses,
model_names=model_names)
# multitest correction using Benjamini-Krieger-Yekutieli
# we only need to correct the slopes' p-values, because we are not testing the values of the intercepts
col = ['BW__', 'BW__+BW__:C(functional_ko)[T.MAT_WT]', 'BW__+BW__:C(functional_ko)[T.FKO]']
df_corrected_pval = df_pval.copy()
_, aux, _, _ = multipletests(np.array(df_pval[col]).flatten(), method='fdr_tsbky', alpha=0.05, returnsorted=False)
df_corrected_pval[:] = np.nan
df_corrected_pval[col] = aux.reshape(df_corrected_pval[col].shape)
# convert p-values to asterisks
df_pval_ast = pd.DataFrame(cytometer.stats.pval_to_asterisk(df_pval, brackets=False), columns=df_coeff.columns,
index=model_names)
df_corrected_pval_ast = pd.DataFrame(cytometer.stats.pval_to_asterisk(df_corrected_pval, brackets=False),
columns=df_coeff.columns, index=model_names)
if SAVE_FIGS:
df_concat = pd.concat([df_coeff, df_ci_lo, df_ci_hi, df_pval, df_pval_ast, df_corrected_pval, df_corrected_pval_ast], axis=1)
idx = list(interleave(np.array_split(range(df_concat.shape[1]), 7)))
df_concat = df_concat.iloc[:, idx]
df_concat.to_csv(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_depot_weight_models_coeffs_pvals_fko.csv'), na_rep='nan')
if SAVE_FIGS:
plt.clf()
plt.subplot(221)
sex = 'f'
cytometer.stats.plot_linear_regression(model_gwat_f_global, metainfo_f, 'BW__',
other_vars={'sex':sex, 'functional_ko':'Control'},
dep_var='gWAT', sx=BW_mean, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(model_gwat_f_global, metainfo_f, 'BW__',
other_vars={'sex':sex, 'functional_ko':'MAT_WT'},
dep_var='gWAT', sx=BW_mean, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(model_gwat_f_global, metainfo_f, 'BW__',
other_vars={'sex':sex, 'functional_ko':'FKO'},
dep_var='gWAT', sx=BW_mean, c='C4', marker='o',
line_label='FKO')
plt.yticks([0.0, 0.5, 1.0, 1.5, 2.0])
plt.ylim(0, 2.1)
plt.tick_params(labelsize=14)
plt.title('Female', fontsize=14)
plt.ylabel('Gonadal\ndepot weight (g)', fontsize=14)
plt.subplot(222)
sex = 'm'
cytometer.stats.plot_linear_regression(model_gwat_m_global, metainfo_m, 'BW__',
other_vars={'sex':sex, 'functional_ko':'Control'},
dep_var='gWAT', sx=BW_mean, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(model_gwat_m_global, metainfo_m, 'BW__',
other_vars={'sex':sex, 'functional_ko':'MAT_WT'},
dep_var='gWAT', sx=BW_mean, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(model_gwat_m_global, metainfo_m, 'BW__',
other_vars={'sex':sex, 'functional_ko':'FKO'},
dep_var='gWAT', sx=BW_mean, c='C4', marker='o',
line_label='FKO')
plt.yticks([0.0, 0.5, 1.0, 1.5, 2.0])
plt.ylim(0, 2.1)
plt.tick_params(labelsize=14)
plt.title('Male', fontsize=14)
plt.subplot(223)
sex = 'f'
cytometer.stats.plot_linear_regression(model_sqwat_f_global, metainfo_f, 'BW__',
other_vars={'sex':sex, 'functional_ko':'Control'},
dep_var='SC', sx=BW_mean, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(model_sqwat_f_global, metainfo_f, 'BW__',
other_vars={'sex':sex, 'functional_ko':'MAT_WT'},
dep_var='SC', sx=BW_mean, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(model_sqwat_f_global, metainfo_f, 'BW__',
other_vars={'sex':sex, 'functional_ko':'FKO'},
dep_var='SC', sx=BW_mean, c='C4', marker='o',
line_label='FKO')
plt.yticks([0.0, 0.5, 1.0, 1.5, 2.0])
plt.tick_params(labelsize=14)
plt.ylim(0, 2.1)
plt.xlabel('Body weight (g)', fontsize=14)
plt.ylabel('Subcutaneous\ndepot weight (g)', fontsize=14)
plt.legend(loc='upper right')
plt.subplot(224)
sex = 'm'
cytometer.stats.plot_linear_regression(model_sqwat_m_global, metainfo_m, 'BW__',
other_vars={'sex':sex, 'functional_ko':'Control'},
dep_var='SC', sx=BW_mean, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(model_sqwat_m_global, metainfo_m, 'BW__',
other_vars={'sex':sex, 'functional_ko':'MAT_WT'},
dep_var='SC', sx=BW_mean, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(model_sqwat_m_global, metainfo_m, 'BW__',
other_vars={'sex':sex, 'functional_ko':'FKO'},
dep_var='SC', sx=BW_mean, c='C4', marker='o',
line_label='KFO')
plt.yticks([0.0, 0.5, 1.0, 1.5, 2.0])
plt.ylim(0, 2.1)
plt.tick_params(labelsize=14)
plt.xlabel('Body weight (g)', fontsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_depot_linear_model_fko.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_depot_linear_model_fko.jpg'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_depot_linear_model_fko.svg'))
########################################################################################################################
## Analyse cell populations from automatically segmented images in two depots: SQWAT and GWAT:
########################################################################################################################
## area_at_quantile ~ functional_ko
########################################################################################################################
# (only mode, 25%-, 50%- and 75%-quantiles for illustration purposes and debugging)
# 0.05, 0.1 , 0.15, 0.2, ..., 0.9 , 0.95
quantiles = np.linspace(0, 1, 21) #
# indices of the quantiles we are going to model
i_quantiles = [5, 10, 15] # Q1, Q2, Q3
# we are going to compare median values, like in Small et al.
i_q = i_quantiles[1]
# choose one area_at_quantile value as the output of the linear model
df_all['area_at_quantile'] = np.array(df_all['area_at_quantiles'].to_list())[:, i_q]
df_all['area_at_quantile_10e3'] = df_all['area_at_quantile'] * 1e-3
# for convenience create auxiliary dataframes
df_gwat = df_all[df_all['depot'] == 'gwat']
df_sqwat = df_all[df_all['depot'] == 'sqwat']
df_f_gwat = df_all[(df_all['sex'] == 'f') & (df_all['depot'] == 'gwat')]
df_m_gwat = df_all[(df_all['sex'] == 'm') & (df_all['depot'] == 'gwat')]
df_f_sqwat = df_all[(df_all['sex'] == 'f') & (df_all['depot'] == 'sqwat')]
df_m_sqwat = df_all[(df_all['sex'] == 'm') & (df_all['depot'] == 'sqwat')]
# mean areaq Gonadal
areaqmean_control_f_gwat = np.mean(df_f_gwat[df_f_gwat['ko_parent'] == 'PAT']['area_at_quantile_10e3'])
areaqmean_matwt_f_gwat = np.mean(df_f_gwat[(df_f_gwat['ko_parent'] == 'MAT') & (df_f_gwat['genotype'] == 'KLF14-KO:WT')]['area_at_quantile_10e3'])
areaqmean_fko_f_gwat = np.mean(df_f_gwat[(df_f_gwat['ko_parent'] == 'MAT') & (df_f_gwat['genotype'] == 'KLF14-KO:Het')]['area_at_quantile_10e3'])
areaqmean_control_m_gwat = np.mean(df_m_gwat[df_m_gwat['ko_parent'] == 'PAT']['area_at_quantile_10e3'])
areaqmean_matwt_m_gwat = np.mean(df_m_gwat[(df_m_gwat['ko_parent'] == 'MAT') & (df_m_gwat['genotype'] == 'KLF14-KO:WT')]['area_at_quantile_10e3'])
areaqmean_fko_m_gwat = np.mean(df_m_gwat[(df_m_gwat['ko_parent'] == 'MAT') & (df_m_gwat['genotype'] == 'KLF14-KO:Het')]['area_at_quantile_10e3'])
# mean areaq Subcut.
areaqmean_control_f_sqwat = np.mean(df_f_sqwat[df_f_sqwat['ko_parent'] == 'PAT']['area_at_quantile_10e3'])
areaqmean_matwt_f_sqwat = np.mean(df_f_sqwat[(df_f_sqwat['ko_parent'] == 'MAT') & (df_f_sqwat['genotype'] == 'KLF14-KO:WT')]['area_at_quantile_10e3'])
areaqmean_fko_f_sqwat = np.mean(df_f_sqwat[(df_f_sqwat['ko_parent'] == 'MAT') & (df_f_sqwat['genotype'] == 'KLF14-KO:Het')]['area_at_quantile_10e3'])
areaqmean_control_m_sqwat = np.mean(df_m_sqwat[df_m_sqwat['ko_parent'] == 'PAT']['area_at_quantile_10e3'])
areaqmean_matwt_m_sqwat = np.mean(df_m_sqwat[(df_m_sqwat['ko_parent'] == 'MAT') & (df_m_sqwat['genotype'] == 'KLF14-KO:WT')]['area_at_quantile_10e3'])
areaqmean_fko_m_sqwat = np.mean(df_m_sqwat[(df_m_sqwat['ko_parent'] == 'MAT') & (df_m_sqwat['genotype'] == 'KLF14-KO:Het')]['area_at_quantile_10e3'])
# Tukey HSD for area_at_quantile ~ functional_ko
multicomp_gwat_f = sm.stats.multicomp.MultiComparison(df_f_gwat['area_at_quantile_10e3'], df_f_gwat['functional_ko'])
tukeyhsd_gwat_f = multicomp_gwat_f.tukeyhsd()
tukeyhsd_gwat_f = pd.DataFrame(data=tukeyhsd_gwat_f._results_table.data[1:], columns=tukeyhsd_gwat_f._results_table.data[0])
print(tukeyhsd_gwat_f)
multicomp_gwat_m = sm.stats.multicomp.MultiComparison(df_m_gwat['area_at_quantile_10e3'], df_m_gwat['functional_ko'])
tukeyhsd_gwat_m = multicomp_gwat_m.tukeyhsd()
tukeyhsd_gwat_m = pd.DataFrame(data=tukeyhsd_gwat_m._results_table.data[1:], columns=tukeyhsd_gwat_m._results_table.data[0])
print(tukeyhsd_gwat_m)
multicomp_sqwat_f = sm.stats.multicomp.MultiComparison(df_f_sqwat['area_at_quantile_10e3'], df_f_sqwat['functional_ko'])
tukeyhsd_sqwat_f = multicomp_sqwat_f.tukeyhsd()
tukeyhsd_sqwat_f = pd.DataFrame(data=tukeyhsd_sqwat_f._results_table.data[1:], columns=tukeyhsd_sqwat_f._results_table.data[0])
print(tukeyhsd_sqwat_f)
multicomp_sqwat_m = sm.stats.multicomp.MultiComparison(df_m_sqwat['area_at_quantile_10e3'], df_m_sqwat['functional_ko'])
tukeyhsd_sqwat_m = multicomp_sqwat_m.tukeyhsd()
tukeyhsd_sqwat_m = pd.DataFrame(data=tukeyhsd_sqwat_m._results_table.data[1:], columns=tukeyhsd_sqwat_m._results_table.data[0])
print(tukeyhsd_sqwat_m)
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([5.48, 4.8 ])
ax = sns.swarmplot(x='sex', y='area_at_quantile_10e3', hue='functional_ko', data=df_gwat, dodge=True, palette=['C2', 'C3', 'C4'])
plt.xlabel('')
plt.ylabel('Area$_{\mathrm{Q2}}$ ($10^3 \ \mu m^2$)', fontsize=14)
plt.tick_params(labelsize=14)
plt.xticks([0, 1], labels=['Female', 'Male'])
ax.get_legend().set_title('')
ax.legend(['Control (PAT)', 'MAT WT', 'FKO (MAT Het)'], loc='lower right', fontsize=12)
# mean values
plt.plot([-0.35, -0.15], [areaqmean_control_f_gwat,]*2, 'k', linewidth=2)
plt.plot([-0.10, 0.10], [areaqmean_matwt_f_gwat,]*2, 'k', linewidth=2)
plt.plot([ 0.17, 0.35], [areaqmean_fko_f_gwat,]*2, 'k', linewidth=2)
plt.plot([ 0.65, 0.85], [areaqmean_control_m_gwat,]*2, 'k', linewidth=2)
plt.plot([ 0.90, 1.10], [areaqmean_matwt_m_gwat,]*2, 'k', linewidth=2)
plt.plot([ 1.17, 1.35], [areaqmean_fko_m_gwat,]*2, 'k', linewidth=2)
# female
plt.plot([-0.3, -0.3, 0.0, 0.0], [7.350, 7.550, 7.550, 7.350], 'k', lw=1.5)
idx = (tukeyhsd_gwat_f['group1'] == 'Control') & (tukeyhsd_gwat_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_gwat_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.3f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(-0.15, 7.600, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.0, 0.0, 0.3, 0.3], [8.050, 8.250, 8.250, 8.050], 'k', lw=1.5)
idx = (tukeyhsd_gwat_f['group1'] == 'FKO') & (tukeyhsd_gwat_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_gwat_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.15, 8.300, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([-0.3, -0.3, 0.3, 0.3], [8.750, 8.950, 8.950, 8.750], 'k', lw=1.5)
idx = (tukeyhsd_gwat_f['group1'] == 'Control') & (tukeyhsd_gwat_f['group2'] == 'FKO')
pval = list(tukeyhsd_gwat_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.0, 9.000, pval_text, ha='center', va='bottom', fontsize=14)
# male
plt.plot([0.7, 0.7, 1.0, 1.0], [7.700, 7.900, 7.900, 7.700], 'k', lw=1.5)
idx = (tukeyhsd_gwat_m['group1'] == 'Control') & (tukeyhsd_gwat_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_gwat_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.85, 7.950, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([1.0, 1.0, 1.3, 1.3], [8.400, 8.600, 8.600, 8.400], 'k', lw=1.5)
idx = (tukeyhsd_gwat_m['group1'] == 'FKO') & (tukeyhsd_gwat_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_gwat_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.15, 8.650, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.3, 1.3], [9.100, 9.300, 9.300, 9.100], 'k', lw=1.5)
idx = (tukeyhsd_gwat_m['group1'] == 'Control') & (tukeyhsd_gwat_m['group2'] == 'FKO')
pval = list(tukeyhsd_gwat_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.00, 9.350, pval_text, ha='center', va='bottom', fontsize=14)
plt.ylim(1.000, 10.500)
plt.title('Gonadal', fontsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_areaq_fko_gwat.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_areaq_fko_gwat.svg'))
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([5.48, 4.8 ])
ax = sns.swarmplot(x='sex', y='area_at_quantile_10e3', hue='functional_ko', data=df_sqwat, dodge=True, palette=['C2', 'C3', 'C4'])
plt.xlabel('')
plt.ylabel('Area$_{\mathrm{Q2}}$ ($10^3 \ \mu m^2$)', fontsize=14)
plt.tick_params(labelsize=14)
plt.xticks([0, 1], labels=['Female', 'Male'])
ax.get_legend().set_title('')
ax.get_legend().remove()
# mean values
plt.plot([-0.35, -0.15], [areaqmean_control_f_sqwat,]*2, 'k', linewidth=2)
plt.plot([-0.10, 0.10], [areaqmean_matwt_f_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 0.17, 0.35], [areaqmean_fko_f_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 0.65, 0.85], [areaqmean_control_m_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 0.90, 1.10], [areaqmean_matwt_m_sqwat,]*2, 'k', linewidth=2)
plt.plot([ 1.17, 1.35], [areaqmean_fko_m_sqwat,]*2, 'k', linewidth=2)
# female
plt.plot([-0.3, -0.3, 0.0, 0.0], [5.4, 5.6, 5.6, 5.4], 'k', lw=1.5)
idx = (tukeyhsd_sqwat_f['group1'] == 'Control') & (tukeyhsd_sqwat_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_sqwat_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(-0.15, 5.65, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.0, 0.0, 0.3, 0.3], [6.1, 6.3, 6.3, 6.1], 'k', lw=1.5)
idx = (tukeyhsd_sqwat_f['group1'] == 'FKO') & (tukeyhsd_sqwat_f['group2'] == 'MAT_WT')
pval = list(tukeyhsd_sqwat_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.15, 6.35, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([-0.3, -0.3, 0.3, 0.3], [6.8, 7.0, 7.0, 6.8], 'k', lw=1.5)
idx = (tukeyhsd_sqwat_f['group1'] == 'Control') & (tukeyhsd_sqwat_f['group2'] == 'FKO')
pval = list(tukeyhsd_sqwat_f.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.0, 7.05, pval_text, ha='center', va='bottom', fontsize=14)
# male
plt.plot([0.7, 0.7, 1.0, 1.0], [5.15, 5.35, 5.35, 5.15], 'k', lw=1.5)
idx = (tukeyhsd_sqwat_m['group1'] == 'Control') & (tukeyhsd_sqwat_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_sqwat_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.85, 5.4, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([1.0, 1.0, 1.3, 1.3], [5.85, 6.05, 6.05, 5.85], 'k', lw=1.5)
idx = (tukeyhsd_sqwat_m['group1'] == 'FKO') & (tukeyhsd_sqwat_m['group2'] == 'MAT_WT')
pval = list(tukeyhsd_sqwat_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.15, 6.1, pval_text, ha='center', va='bottom', fontsize=14)
plt.plot([0.7, 0.7, 1.3, 1.3], [6.55, 6.75, 6.75, 6.55], 'k', lw=1.5)
idx = (tukeyhsd_sqwat_m['group1'] == 'Control') & (tukeyhsd_sqwat_m['group2'] == 'FKO')
pval = list(tukeyhsd_sqwat_m.loc[idx, 'p-adj'])[0]
pval_text = '{0:.2f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(1.00, 6.8, pval_text, ha='center', va='bottom', fontsize=14)
plt.ylim(1.000, 10.500)
plt.title('Subcutaneous', fontsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_areaq_fko_sqwat.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_swarm_areaq_fko_sqwat.svg'))
print('mean areaq')
print('\tfemale gonadal Control: ' + str(areaqmean_control_f_gwat))
print('\tfemale gonadal MAT WT: ' + str(areaqmean_matwt_f_gwat))
print('\tfemale gonadal FKO: ' + str(areaqmean_fko_f_gwat))
print('\tfemale subcut. Control: ' + str(areaqmean_control_f_sqwat))
print('\tfemale subcut. MAT WT: ' + str(areaqmean_matwt_f_sqwat))
print('\tfemale subcut. FKO: ' + str(areaqmean_fko_f_sqwat))
print('\tmale gonadal Control: ' + str(areaqmean_control_m_gwat))
print('\tmale gonadal MAT WT: ' + str(areaqmean_matwt_m_gwat))
print('\tmale gonadal FKO: ' + str(areaqmean_fko_m_gwat))
print('\tmale subcut. Control: ' + str(areaqmean_control_m_sqwat))
print('\tmale subcut. MAT WT: ' + str(areaqmean_matwt_m_sqwat))
print('\tmale subcut. FKO: ' + str(areaqmean_fko_m_sqwat))
## one data point per animal
## linear regression analysis of quantile_area ~ DW * functional_ko
## USED IN PAPER
########################################################################################################################
## (only mode, 25%-, 50%- and 75%-quantiles for illustration purposes and debugging)
# 0.05, 0.1 , 0.15, 0.2, ..., 0.9 , 0.95
quantiles = np.linspace(0, 1, 21) #
# indices of the quantiles we are going to model
i_quantiles = [5, 10, 15] # Q1, Q2, Q3
# auxiliary variables for LRT null-models
df_all['functional_ko_a'] = df_all['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control_MAT_WT', 'FKO'], ordered=True))
df_all.loc[df_all['functional_ko'] != 'FKO', 'functional_ko_a'] = 'Control_MAT_WT'
df_all['functional_ko_b'] = df_all['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control', 'MAT_WT_FKO'], ordered=True))
df_all.loc[df_all['functional_ko'] != 'Control', 'functional_ko_b'] = 'MAT_WT_FKO'
df_all['functional_ko_c'] = df_all['functional_ko'].astype(
pd.api.types.CategoricalDtype(categories=['Control_FKO', 'MAT_WT'], ordered=True))
df_all.loc[df_all['functional_ko'] != 'MAT_WT', 'functional_ko_c'] = 'Control_FKO'
# fit linear models to area quantiles
models_gwat_f_global = []
models_gwat_m_global = []
models_sqwat_f_global = []
models_sqwat_m_global = []
models_gwat_f_control_matwt = []
models_gwat_m_control_matwt = []
models_sqwat_f_control_matwt = []
models_sqwat_m_control_matwt = []
models_gwat_f_matwt_fko = []
models_gwat_m_matwt_fko = []
models_sqwat_f_matwt_fko = []
models_sqwat_m_matwt_fko = []
models_gwat_f_control_fko = []
models_gwat_m_control_fko = []
models_sqwat_f_control_fko = []
models_sqwat_m_control_fko = []
for i_q in i_quantiles:
# choose one area_at_quantile value as the output of the linear model
df_all['area_at_quantile'] = np.array(df_all['area_at_quantiles'].to_list())[:, i_q]
# for convenience create two dataframes (female and male) with the data for the current depot
df_f_gwat = df_all[(df_all['sex'] == 'f') & (df_all['depot'] == 'gwat')]
df_m_gwat = df_all[(df_all['sex'] == 'm') & (df_all['depot'] == 'gwat')]
df_f_sqwat = df_all[(df_all['sex'] == 'f') & (df_all['depot'] == 'sqwat')]
df_m_sqwat = df_all[(df_all['sex'] == 'm') & (df_all['depot'] == 'sqwat')]
# global models fitted to 3 strata (Control, MAT WT and FKO):
# These are the models that we are going to use to test for correlation, apart from the LRTs
model_gwat_f_global = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko)', data=df_f_gwat).fit()
model_gwat_m_global = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko)', data=df_m_gwat).fit()
model_sqwat_f_global = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko)', data=df_f_sqwat).fit()
model_sqwat_m_global = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko)', data=df_m_sqwat).fit()
# models fitted to 2 strata (combining Control and MAT WT) to be used as null models
model_gwat_f_control_matwt = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_a)', data=df_f_gwat).fit()
model_gwat_m_control_matwt = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_a)', data=df_m_gwat).fit()
model_sqwat_f_control_matwt = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_a)', data=df_f_sqwat).fit()
model_sqwat_m_control_matwt = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_a)', data=df_m_sqwat).fit()
# models fitted to 2 strata (combining MAT WT and FKO) to be used as null models
model_gwat_f_matwt_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_b)', data=df_f_gwat).fit()
model_gwat_m_matwt_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_b)', data=df_m_gwat).fit()
model_sqwat_f_matwt_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_b)', data=df_f_sqwat).fit()
model_sqwat_m_matwt_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_b)', data=df_m_sqwat).fit()
# models fitted to 2 strata (combining Control and FKO) to be used as null models
model_gwat_f_control_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_c)', data=df_f_gwat).fit()
model_gwat_m_control_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_c)', data=df_m_gwat).fit()
model_sqwat_f_control_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_c)', data=df_f_sqwat).fit()
model_sqwat_m_control_fko = sm.OLS.from_formula('area_at_quantile ~ DW * C(functional_ko_c)', data=df_m_sqwat).fit()
models_gwat_f_global.append(model_gwat_f_global)
models_gwat_m_global.append(model_gwat_m_global)
models_sqwat_f_global.append(model_sqwat_f_global)
models_sqwat_m_global.append(model_sqwat_m_global)
models_gwat_f_control_matwt.append(model_gwat_f_control_matwt)
models_gwat_m_control_matwt.append(model_gwat_m_control_matwt)
models_sqwat_f_control_matwt.append(model_sqwat_f_control_matwt)
models_sqwat_m_control_matwt.append(model_sqwat_m_control_matwt)
models_gwat_f_matwt_fko.append(model_gwat_f_matwt_fko)
models_gwat_m_matwt_fko.append(model_gwat_m_matwt_fko)
models_sqwat_f_matwt_fko.append(model_sqwat_f_matwt_fko)
models_sqwat_m_matwt_fko.append(model_sqwat_m_matwt_fko)
models_gwat_f_control_fko.append(model_gwat_f_control_fko)
models_gwat_m_control_fko.append(model_gwat_m_control_fko)
models_sqwat_f_control_fko.append(model_sqwat_f_control_fko)
models_sqwat_m_control_fko.append(model_sqwat_m_control_fko)
if DEBUG:
print(model_gwat_f_global.summary())
print(model_gwat_m_global.summary())
print(model_sqwat_f_global.summary())
print(model_sqwat_m_global.summary())
print(model_gwat_f_control_matwt.summary())
print(model_gwat_m_control_matwt.summary())
print(model_sqwat_f_control_matwt.summary())
print(model_sqwat_m_control_matwt.summary())
print(model_gwat_f_matwt_fko.summary())
print(model_gwat_m_matwt_fko.summary())
print(model_sqwat_f_matwt_fko.summary())
print(model_sqwat_m_matwt_fko.summary())
print(model_gwat_f_control_fko.summary())
print(model_gwat_m_control_fko.summary())
print(model_sqwat_f_control_fko.summary())
print(model_sqwat_m_control_fko.summary())
# extract coefficients, errors and p-values from PAT and MAT models
model_names = ['model_gwat_f_global_q1', 'model_gwat_f_global_q2', 'model_gwat_f_global_q3',
'model_sqwat_f_global_q1', 'model_sqwat_f_global_q2', 'model_sqwat_f_global_q3',
'model_gwat_m_global_q1', 'model_gwat_m_global_q2', 'model_gwat_m_global_q3',
'model_sqwat_m_global_q1', 'model_sqwat_m_global_q2', 'model_sqwat_m_global_q3'
]
extra_hypotheses='Intercept+C(functional_ko)[T.MAT_WT],Intercept+C(functional_ko)[T.FKO]'\
+ ',DW+DW:C(functional_ko)[T.MAT_WT],DW+DW:C(functional_ko)[T.FKO]'
df_coeff, df_ci_lo, df_ci_hi, df_pval = \
cytometer.stats.models_coeff_ci_pval(
[models_gwat_f_global[0], models_gwat_f_global[1], models_gwat_f_global[2],
models_sqwat_f_global[0], models_sqwat_f_global[1], models_sqwat_f_global[2],
models_gwat_m_global[0], models_gwat_m_global[1], models_gwat_m_global[2],
models_sqwat_m_global[0], models_sqwat_m_global[1], models_sqwat_m_global[2]],
extra_hypotheses=extra_hypotheses,
model_names=model_names)
# multitest correction using Benjamini-Krieger-Yekutieli
# we only need to correct the slopes' p-values, because we are not testing the values of the intercepts
col = ['DW', 'DW+DW:C(functional_ko)[T.MAT_WT]', 'DW+DW:C(functional_ko)[T.FKO]']
df_corrected_pval = df_pval.copy()
_, aux, _, _ = multipletests(np.array(df_pval[col]).flatten(), method='fdr_tsbky', alpha=0.05, returnsorted=False)
df_corrected_pval[:] = np.nan
df_corrected_pval[col] = aux.reshape(df_corrected_pval[col].shape)
# convert p-values to asterisks
df_pval_ast = pd.DataFrame(cytometer.stats.pval_to_asterisk(df_pval, brackets=False), columns=df_coeff.columns,
index=model_names)
df_corrected_pval_ast = pd.DataFrame(cytometer.stats.pval_to_asterisk(df_corrected_pval, brackets=False),
columns=df_coeff.columns, index=model_names)
if SAVE_FIGS:
df_concat = pd.concat(
[df_coeff, df_ci_lo, df_ci_hi, df_pval, df_pval_ast, df_corrected_pval, df_corrected_pval_ast], axis=1)
idx = list(interleave(np.array_split(range(df_concat.shape[1]), 7)))
df_concat = df_concat.iloc[:, idx]
df_concat.to_csv(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_area_at_quartiles_fko_models_coeffs_pvals.csv'),
na_rep='nan')
# plot
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([6.4, 7.6])
depot = 'gwat'
plt.subplot(321)
# Q1 Female
i = 0 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'f'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylabel('Area$_{\mathrm{Q1}}$ ($10^3\ \mu m^2$)', fontsize=14)
plt.title('Female', fontsize=14)
plt.legend(loc='best', fontsize=12)
plt.ylim(0.9, 5)
plt.subplot(322)
# Q1 Male
i = 0 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'm'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.title('Male', fontsize=14)
plt.ylim(0.9, 5)
plt.subplot(323)
# Q2 Female
i = 1 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'f'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylabel('Area$_{\mathrm{Q2}}$ ($10^3\ \mu m^2$)', fontsize=14)
plt.ylim(1.4, 8.5)
plt.subplot(324)
# Q2 Male
i = 1 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'm'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylim(1.4, 8.5)
plt.subplot(325)
# Q3 Female
i = 2 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'f'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_gwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylabel('Area$_{\mathrm{Q3}}$ ($10^3\ \mu m^2$)', fontsize=14)
plt.xlabel('Depot weight (g)', fontsize=14)
plt.ylim(1, 14)
plt.subplot(326)
# Q3 Male
i = 2 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'm'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_gwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.xlabel('Depot weight (g)', fontsize=14)
plt.ylim(1, 14)
depot_title = depot.replace('gwat', 'Gonadal').replace('sqwat', 'Subcutaneous')
plt.suptitle(depot_title, fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_area_at_quartile_genotype_models_' + depot + '.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_area_at_quartile_genotype_models_' + depot + '.svg'))
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([6.4, 7.6])
depot = 'sqwat'
plt.subplot(321)
# Q1 Female
i = 0 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'f'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylabel('Area$_{\mathrm{Q1}}$ ($10^3\ \mu m^2$)', fontsize=14)
plt.title('Female', fontsize=14)
plt.ylim(0.5, 3)
plt.subplot(322)
# Q1 Male
i = 0 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'm'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.title('Male', fontsize=14)
plt.ylim(0.5, 3)
plt.subplot(323)
# Q2 Female
i = 1 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'f'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylabel('Area$_{\mathrm{Q2}}$ ($10^3\ \mu m^2$)', fontsize=14)
plt.ylim(0.8, 6)
plt.subplot(324)
# Q2 Male
i = 1 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'm'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylim(0.8, 6)
plt.subplot(325)
# Q3 Female
i = 2 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'f'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_sqwat_f_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.ylabel('Area$_{\mathrm{Q3}}$ ($10^3\ \mu m^2$)', fontsize=14)
plt.xlabel('Depot weight (g)', fontsize=14)
plt.ylim(1, 10.5)
plt.subplot(326)
# Q3 Male
i = 2 # quantile index for "i_quantiles"
i_q = i_quantiles[i] # quantile index for "quantiles"
sex = 'm'
idx = (df_all['sex'] == sex) & (df_all['depot'] == depot)
df = df_all[idx].copy()
df['area_at_quantile'] = np.array(df['area_at_quantiles'].to_list())[:, i_q] # vector of areas at current quantile
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'Control'},
dep_var='area_at_quantile', sy=1e-3, c='C2', marker='x',
line_label='Control')
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'MAT_WT'},
dep_var='area_at_quantile', sy=1e-3, c='C3', marker='+',
line_label='MAT WT')
cytometer.stats.plot_linear_regression(models_sqwat_m_global[i], df, 'DW',
other_vars={'depot': depot, 'sex': sex, 'functional_ko': 'FKO'},
dep_var='area_at_quantile', sy=1e-3, c='C4', marker='o',
line_label='FKO')
plt.tick_params(labelsize=14)
plt.xlabel('Depot weight (g)', fontsize=14)
plt.ylim(1, 10.5)
depot_title = depot.replace('gwat', 'Gonadal').replace('sqwat', 'Subcutaneous')
plt.suptitle(depot_title, fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_area_at_quartile_genotype_models_' + depot + '.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_area_at_quartile_genotype_models_' + depot + '.svg'))
# compute LRTs and extract p-values and LRs
lrt = pd.DataFrame(columns=['lr', 'pval', 'pval_ast'])
# Control vs. MAT WT
lr, pval = cytometer.stats.lrtest(models_gwat_f_control_matwt[0].llf, models_gwat_f_global[0].llf)
lrt.loc['model_gwat_f_control_matwt_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_f_control_matwt[1].llf, models_gwat_f_global[1].llf)
lrt.loc['model_gwat_f_control_matwt_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_f_control_matwt[2].llf, models_gwat_f_global[2].llf)
lrt.loc['model_gwat_f_control_matwt_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_control_matwt[0].llf, models_sqwat_f_global[0].llf)
lrt.loc['model_sqwat_f_control_matwt_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_control_matwt[1].llf, models_sqwat_f_global[1].llf)
lrt.loc['model_sqwat_f_control_matwt_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_control_matwt[2].llf, models_sqwat_f_global[2].llf)
lrt.loc['model_sqwat_f_control_matwt_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_control_matwt[0].llf, models_gwat_m_global[0].llf)
lrt.loc['model_gwat_m_control_matwt_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_control_matwt[1].llf, models_gwat_m_global[1].llf)
lrt.loc['model_gwat_m_control_matwt_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_control_matwt[2].llf, models_gwat_m_global[2].llf)
lrt.loc['model_gwat_m_control_matwt_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_control_matwt[0].llf, models_sqwat_m_global[0].llf)
lrt.loc['model_sqwat_m_control_matwt_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_control_matwt[1].llf, models_sqwat_m_global[1].llf)
lrt.loc['model_sqwat_m_control_matwt_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_control_matwt[2].llf, models_sqwat_m_global[2].llf)
lrt.loc['model_sqwat_m_control_matwt_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
# MAT WT vs FKO (MAT Het)
lr, pval = cytometer.stats.lrtest(models_gwat_f_matwt_fko[0].llf, models_gwat_f_global[0].llf)
lrt.loc['model_gwat_f_matwt_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_f_matwt_fko[1].llf, models_gwat_f_global[1].llf)
lrt.loc['model_gwat_f_matwt_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_f_matwt_fko[2].llf, models_gwat_f_global[2].llf)
lrt.loc['model_gwat_f_matwt_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_matwt_fko[0].llf, models_sqwat_f_global[0].llf)
lrt.loc['model_sqwat_f_matwt_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_matwt_fko[1].llf, models_sqwat_f_global[1].llf)
lrt.loc['model_sqwat_f_matwt_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_matwt_fko[2].llf, models_sqwat_f_global[2].llf)
lrt.loc['model_sqwat_f_matwt_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_matwt_fko[0].llf, models_gwat_m_global[0].llf)
lrt.loc['model_gwat_m_matwt_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_matwt_fko[1].llf, models_gwat_m_global[1].llf)
lrt.loc['model_gwat_m_matwt_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_matwt_fko[2].llf, models_gwat_m_global[2].llf)
lrt.loc['model_gwat_m_matwt_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_matwt_fko[0].llf, models_sqwat_m_global[0].llf)
lrt.loc['model_sqwat_m_matwt_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_matwt_fko[1].llf, models_sqwat_m_global[1].llf)
lrt.loc['model_sqwat_m_cmatwt_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_matwt_fko[2].llf, models_sqwat_m_global[2].llf)
lrt.loc['model_sqwat_m_matwt_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
# Control vs FKO (MAT Het)
lr, pval = cytometer.stats.lrtest(models_gwat_f_control_fko[0].llf, models_gwat_f_global[0].llf)
lrt.loc['model_gwat_f_control_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_f_control_fko[1].llf, models_gwat_f_global[1].llf)
lrt.loc['model_gwat_f_control_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_f_control_fko[2].llf, models_gwat_f_global[2].llf)
lrt.loc['model_gwat_f_control_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_control_fko[0].llf, models_sqwat_f_global[0].llf)
lrt.loc['model_sqwat_f_control_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_control_fko[1].llf, models_sqwat_f_global[1].llf)
lrt.loc['model_sqwat_f_control_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_f_control_fko[2].llf, models_sqwat_f_global[2].llf)
lrt.loc['model_sqwat_f_control_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_control_fko[0].llf, models_gwat_m_global[0].llf)
lrt.loc['model_gwat_m_control_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_control_fko[1].llf, models_gwat_m_global[1].llf)
lrt.loc['model_gwat_m_control_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_gwat_m_control_fko[2].llf, models_gwat_m_global[2].llf)
lrt.loc['model_gwat_m_control_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_control_fko[0].llf, models_sqwat_m_global[0].llf)
lrt.loc['model_sqwat_m_control_fko_Q1', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_control_fko[1].llf, models_sqwat_m_global[1].llf)
lrt.loc['model_sqwat_m_control_fko_Q2', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(models_sqwat_m_control_fko[2].llf, models_sqwat_m_global[2].llf)
lrt.loc['model_sqwat_m_control_fko_Q3', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
# multitest correction using Benjamini-Krieger-Yekutieli
_, lrt['pval_adj'], _, _ = multipletests(lrt['pval'], method='fdr_tsbky', alpha=0.05, returnsorted=False)
lrt['pval_adj_ast'] = cytometer.stats.pval_to_asterisk(lrt['pval_adj'])
if SAVE_FIGS:
lrt.to_csv(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_area_at_quartiles_models_lrt_fko.csv'), na_rep='nan')
########################################################################################################################
## smoothed histograms
##
## We can use all animals for this, even the ones where BW=NaN, because we don't need BW or DW
## USED IN THE PAPER
########################################################################################################################
## only training windows used for hand tracing (there are only Control and MAT Het mice in the dataset)
# a previous version of this section was in klf14_b6ntac_exp_0109_pipeline_v8_validation.py, but now we have updated it
# so that plots are labelled with Control, MAT WT, FKO instead of PAT, MAT
# list of hand traced contours
# The list contains 126 XCF (Gimp format) files with the contours that were used for training DeepCytometer,
# plus 5 files (131 in total) with extra contours for 2 mice where the cell population was not well
# represented.
import pandas as pd
hand_file_svg_list = [
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_010512_col_006912.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_012848_col_016240.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_016812_col_017484.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_019228_col_015060.svg',
'KLF14-B6NTAC 36.1c PAT 98-16 C1 - 2016-02-11 10.45.00_row_029472_col_015520.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_005348_col_019844.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_006652_col_061724.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_006900_col_071980.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_010732_col_016692.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_012828_col_018388.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_013600_col_022880.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_014768_col_022576.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_014980_col_027052.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_027388_col_018468.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_028864_col_024512.svg',
'KLF14-B6NTAC 36.1i PAT 104-16 C1 - 2016-02-12 12.14.38_row_041392_col_026032.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_009588_col_028676.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_011680_col_013984.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_015856_col_012416.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_018720_col_031152.svg',
'KLF14-B6NTAC-36.1a PAT 96-16 C1 - 2016-02-10 16.12.38_row_021796_col_055852.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_011852_col_071620.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_013300_col_055476.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_014320_col_007600.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_015200_col_021536.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_020256_col_002880.svg',
'KLF14-B6NTAC-36.1b PAT 97-16 C1 - 2016-02-10 17.38.06_row_021136_col_010880.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_001292_col_004348.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_005600_col_004224.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_007216_col_008896.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_007372_col_008556.svg',
'KLF14-B6NTAC-37.1c PAT 108-16 C1 - 2016-02-15 14.49.45_row_011904_col_005280.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_010048_col_001856.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_012172_col_049588.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_013232_col_009008.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_016068_col_007276.svg',
'KLF14-B6NTAC-37.1d PAT 109-16 C1 - 2016-02-15 15.19.08_row_019680_col_016480.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_004124_col_012524.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_004384_col_005456.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_006040_col_005272.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_006640_col_008848.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_008532_col_009804.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_013952_col_002624.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_017044_col_031228.svg',
'KLF14-B6NTAC-MAT-16.2d 214-16 C1 - 2016-02-17 16.02.46_row_021804_col_035412.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_010716_col_008924.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_016832_col_016944.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_018784_col_010912.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_024528_col_014688.svg',
'KLF14-B6NTAC-MAT-17.1c 46-16 C1 - 2016-02-01 14.02.04_row_026108_col_068956.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_009840_col_008736.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_017792_col_017504.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_020032_col_018640.svg',
'KLF14-B6NTAC-MAT-17.2c 66-16 C1 - 2016-02-04 11.46.39_row_030820_col_022204.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_007500_col_050372.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_008000_col_003680.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_013348_col_019316.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_019168_col_019600.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_022960_col_007808.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_026132_col_012148.svg',
'KLF14-B6NTAC-MAT-17.2f 68-16 C1 - 2016-02-04 15.05.54_row_027968_col_011200.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_003584_col_017280.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_012908_col_010212.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_013984_col_012576.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_014448_col_019088.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_015200_col_015920.svg',
'KLF14-B6NTAC-MAT-18.1a 50-16 C1 - 2016-02-02 09.12.41_row_028156_col_018596.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_001920_col_014048.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_005344_col_019360.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_009236_col_018316.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_012680_col_023936.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_013256_col_007952.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_014800_col_020976.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_016756_col_063692.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_017360_col_024712.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_020824_col_018688.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_024128_col_010112.svg',
'KLF14-B6NTAC-MAT-18.1e 54-16 C1 - 2016-02-02 15.26.33_row_024836_col_055124.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_005424_col_006896.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_006268_col_013820.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_013820_col_057052.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_014272_col_008064.svg',
'KLF14-B6NTAC-MAT-18.2b 58-16 C1 - 2016-02-03 11.10.52_row_017808_col_012400.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_007296_col_010640.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_013856_col_014128.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_018380_col_063068.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_020448_col_013824.svg',
'KLF14-B6NTAC-MAT-18.2d 60-16 C1 - 2016-02-03 13.13.57_row_024076_col_020404.svg',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52_row_010128_col_013536.svg',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52_row_015776_col_010976.svg',
'KLF14-B6NTAC-MAT-18.2g 63-16 C1 - 2016-02-03 16.58.52_row_015984_col_026832.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_005428_col_058372.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_012404_col_054316.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_013604_col_024644.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_014628_col_069148.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_018384_col_014688.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_019340_col_017348.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_020128_col_010096.svg',
'KLF14-B6NTAC-MAT-18.3b 223-16 C2 - 2016-02-26 10.35.52_row_022000_col_015568.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_006880_col_017808.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_008212_col_015364.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_011004_col_005988.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_015004_col_010364.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_018992_col_005952.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_019556_col_057972.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_021812_col_022916.svg',
'KLF14-B6NTAC-MAT-18.3d 224-16 C1 - 2016-02-26 11.13.53_row_022208_col_018128.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_010084_col_058476.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_012208_col_007472.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_013680_col_019152.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_016260_col_058300.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_019220_col_061724.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_020048_col_028896.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_021012_col_057844.svg',
'KLF14-B6NTAC-PAT-36.3d 416-16 C1 - 2016-03-16 14.44.11_row_023236_col_011084.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_006124_col_082236.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_007436_col_019092.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_009296_col_029664.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_015872_col_019456.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_016556_col_010292.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_023100_col_009220.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_023728_col_011904.svg',
'KLF14-B6NTAC-PAT-37.2g 415-16 C1 - 2016-03-16 11.47.52_row_031860_col_033476.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_004256_col_017552.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_005424_col_010432.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_006412_col_012484.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_012144_col_007056.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_013012_col_019820.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_031172_col_025996.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_034628_col_040116.svg',
'KLF14-B6NTAC-PAT-37.4a 417-16 C1 - 2016-03-16 15.55.32_row_035948_col_041492.svg'
]
# get v2 of the hand traced contours
hand_file_svg_list = [os.path.join(hand_traced_dir, x) for x in hand_file_svg_list]
# filename of the dataframe with the hand traced cell data
df_hand_all_filename = os.path.join(paper_dir, 'klf14_b6ntac_exp_0111_pipeline_v8_validation_smoothed_histo_hand_' + depot + '.csv')
if os.path.isfile(df_hand_all_filename):
# load dataframe with the hand traced data
df_hand_all = pd.read_csv(df_hand_all_filename)
else: # compute dataframe with the hand traced data
# loop hand traced files and make a dataframe with the cell sizes
df_hand_all = pd.DataFrame()
for i, file_svg in enumerate(hand_file_svg_list):
print('File ' + str(i) + '/' + str(len(hand_file_svg_list) - 1) + ': ' + os.path.basename(file_svg))
# load hand traced contours
cells = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
print('Cells: ' + str(len(cells)))
if (len(cells) == 0):
continue
# load training image
file_im = file_svg.replace('.svg', '.tif')
im = PIL.Image.open(file_im)
# read pixel size information
xres = 0.0254 / im.info['dpi'][0] * 1e6 # um
yres = 0.0254 / im.info['dpi'][1] * 1e6 # um
im = np.array(im)
if DEBUG:
plt.clf()
plt.imshow(im)
for j in range(len(cells)):
cell = np.array(cells[j])
plt.fill(cell[:, 0], cell[:, 1], edgecolor='C0', fill=False)
plt.text(np.mean(cell[:, 0]), np.mean(cell[:, 1]), str(j))
# compute cell areas
cell_areas = np.array([shapely.geometry.Polygon(x).area for x in cells]) * xres * yres
df = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(file_svg),
values=cell_areas, values_tag='area',
tags_to_keep=['id', 'ko_parent', 'sex', 'genotype'])
# figure out what depot these cells belong to
# NOTE: this code is here only for completion, because there are no gonadal slides in the training dataset, only
# subcutaneous
aux = os.path.basename(file_svg).replace('KLF14-B6NTAC', '')
if 'B' in aux and 'C' in aux:
raise ValueError('Slice appears to be both gonadal and subcutaneous')
elif 'B' in aux:
depot = 'gwat'
elif 'C' in aux:
depot = 'sqwat'
else:
raise ValueError('Slice is neither gonadal nor subcutaneous')
df['depot'] = depot
df_hand_all = df_hand_all.append(df, ignore_index=True)
# save dataframe for later use
df_hand_all.to_csv(df_hand_all_filename, index=False)
print('Min cell size = ' + '{0:.1f}'.format(np.min(df_hand_all['area'])) + ' um^2 = '
+ '{0:.1f}'.format(np.min(df_hand_all['area']) / xres_ref / yres_ref) + ' pixels')
print('Max cell size = ' + '{0:.1f}'.format(np.max(df_hand_all['area'])) + ' um^2 = '
+ '{0:.1f}'.format(np.max(df_hand_all['area']) / xres_ref / yres_ref) + ' pixels')
# these are the same quantiles as the ones for automatic segmentations in exp 0110
quantiles = np.linspace(0, 1, 21)
area_bin_edges = np.linspace(min_area_um2, max_area_um2, 201)
area_bin_centers = (area_bin_edges[0:-1] + area_bin_edges[1:]) / 2.0
# 1-alpha is the % of confidence interval, e.g. alpha=0.05 => 95% CI
alpha = 0.05
k = stats.norm.ppf(1 - alpha / 2, loc=0, scale=1) # multiplier for CI length (~1.96 for 95% CI)
if SAVE_FIGS:
plt.clf()
plt.subplot(221)
idx = (df_hand_all['depot'] == 'sqwat') & (df_hand_all['sex'] == 'f') & (df_hand_all['ko_parent'] == 'PAT')
kde = sklearn.neighbors.KernelDensity(bandwidth=100, kernel='gaussian').fit(
np.array(df_hand_all[idx]['area']).reshape(-1, 1))
log_dens = kde.score_samples((area_bin_centers).reshape(-1, 1))
pdf = np.exp(log_dens)
plt.plot((area_bin_centers) * 1e-3, pdf / pdf.max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Female Control', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
area_q = stats.mstats.hdquantiles(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
area_stderr = stats.mstats.hdquantiles_sd(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
ci_lo = area_q - k * area_stderr
ci_hi = area_q + k * area_stderr
print('female Control')
print('\tQ1: ' + '{0:.2f}'.format(area_q[0]) + ' (' + '{0:.2f}'.format(ci_lo[0]) + ', ' + '{0:.2f}'.format(ci_hi[0]) + ')')
print('\tQ2: ' + '{0:.2f}'.format(area_q[1]) + ' (' + '{0:.2f}'.format(ci_lo[1]) + ', ' + '{0:.2f}'.format(ci_hi[1]) + ')')
print('\tQ3: ' + '{0:.2f}'.format(area_q[2]) + ' (' + '{0:.2f}'.format(ci_lo[2]) + ', ' + '{0:.2f}'.format(ci_hi[2]) + ')')
plt.plot([area_q[0], area_q[0]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[1], area_q[1]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[2], area_q[2]], [0, 1], 'k', linewidth=1)
plt.subplot(222)
idx = (df_hand_all['depot'] == 'sqwat') & (df_hand_all['sex'] == 'm') & (df_hand_all['ko_parent'] == 'PAT')
kde = sklearn.neighbors.KernelDensity(bandwidth=100, kernel='gaussian').fit(
np.array(df_hand_all[idx]['area']).reshape(-1, 1))
log_dens = kde.score_samples(area_bin_centers.reshape(-1, 1))
pdf = np.exp(log_dens)
plt.plot(area_bin_centers * 1e-3, pdf / pdf.max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Male Control', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
area_q = stats.mstats.hdquantiles(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
area_stderr = stats.mstats.hdquantiles_sd(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
ci_lo = area_q - k * area_stderr
ci_hi = area_q + k * area_stderr
print('male PAT')
print('\tQ1: ' + '{0:.2f}'.format(area_q[0]) + ' (' + '{0:.2f}'.format(ci_lo[0]) + ', ' + '{0:.2f}'.format(ci_hi[0]) + ')')
print('\tQ2: ' + '{0:.2f}'.format(area_q[1]) + ' (' + '{0:.2f}'.format(ci_lo[1]) + ', ' + '{0:.2f}'.format(ci_hi[1]) + ')')
print('\tQ3: ' + '{0:.2f}'.format(area_q[2]) + ' (' + '{0:.2f}'.format(ci_lo[2]) + ', ' + '{0:.2f}'.format(ci_hi[2]) + ')')
plt.plot([area_q[0], area_q[0]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[1], area_q[1]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[2], area_q[2]], [0, 1], 'k', linewidth=1)
plt.subplot(223)
idx = (df_hand_all['depot'] == 'sqwat') & (df_hand_all['sex'] == 'f') & (df_hand_all['ko_parent'] == 'MAT') \
& (df_hand_all['genotype'] == 'KLF14-KO:Het')
kde = sklearn.neighbors.KernelDensity(bandwidth=100, kernel='gaussian').fit(
np.array(df_hand_all[idx]['area']).reshape(-1, 1))
log_dens = kde.score_samples(area_bin_centers.reshape(-1, 1))
pdf = np.exp(log_dens)
plt.plot(area_bin_centers * 1e-3, pdf / pdf.max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Female FKO\n(MAT Het)', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
plt.xlabel('Area ($\cdot 10^3\ \mu m^2$)', fontsize=14)
area_q = stats.mstats.hdquantiles(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
area_stderr = stats.mstats.hdquantiles_sd(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
ci_lo = area_q - k * area_stderr
ci_hi = area_q + k * area_stderr
print('female MAT WT')
print('\tQ1: ' + '{0:.2f}'.format(area_q[0]) + ' (' + '{0:.2f}'.format(ci_lo[0]) + ', ' + '{0:.2f}'.format(ci_hi[0]) + ')')
print('\tQ2: ' + '{0:.2f}'.format(area_q[1]) + ' (' + '{0:.2f}'.format(ci_lo[1]) + ', ' + '{0:.2f}'.format(ci_hi[1]) + ')')
print('\tQ3: ' + '{0:.2f}'.format(area_q[2]) + ' (' + '{0:.2f}'.format(ci_lo[2]) + ', ' + '{0:.2f}'.format(ci_hi[2]) + ')')
plt.plot([area_q[0], area_q[0]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[1], area_q[1]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[2], area_q[2]], [0, 1], 'k', linewidth=1)
plt.subplot(224)
idx = (df_hand_all['depot'] == 'sqwat') & (df_hand_all['sex'] == 'm') & (df_hand_all['ko_parent'] == 'MAT')
kde = sklearn.neighbors.KernelDensity(bandwidth=100, kernel='gaussian').fit(
np.array(df_hand_all[idx]['area']).reshape(-1, 1))
log_dens = kde.score_samples(area_bin_centers.reshape(-1, 1))
pdf = np.exp(log_dens)
plt.plot(area_bin_centers * 1e-3, pdf / pdf.max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Male FKO\n(MAT Het)', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
plt.xlabel('Area ($\cdot 10^3\ \mu m^2$)', fontsize=14)
area_q = stats.mstats.hdquantiles(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
area_stderr = stats.mstats.hdquantiles_sd(df_hand_all[idx]['area'] * 1e-3, prob=[0.25, 0.50, 0.75], axis=0)
ci_lo = area_q - k * area_stderr
ci_hi = area_q + k * area_stderr
print('male MAT WT')
print('\tQ1: ' + '{0:.2f}'.format(area_q[0]) + ' (' + '{0:.2f}'.format(ci_lo[0]) + ', ' + '{0:.2f}'.format(ci_hi[0]) + ')')
print('\tQ2: ' + '{0:.2f}'.format(area_q[1]) + ' (' + '{0:.2f}'.format(ci_lo[1]) + ', ' + '{0:.2f}'.format(ci_hi[1]) + ')')
print('\tQ3: ' + '{0:.2f}'.format(area_q[2]) + ' (' + '{0:.2f}'.format(ci_lo[2]) + ', ' + '{0:.2f}'.format(ci_hi[2]) + ')')
plt.plot([area_q[0], area_q[0]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[1], area_q[1]], [0, 1], 'k', linewidth=1)
plt.plot([area_q[2], area_q[2]], [0, 1], 'k', linewidth=1)
if depot == 'gwat':
plt.suptitle('Gonadal', fontsize=14)
elif depot == 'sqwat':
plt.suptitle('Subcutaneous', fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_pipeline_v8_validation_smoothed_histo_hand_' + depot + '.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_pipeline_v8_validation_smoothed_histo_hand_' + depot + '.svg'))
## whole slides used for hand tracing (there are only Control and MAT Het mice in the dataset)
# all hand traced slides are subcutaneous, so we only need to compare against subcutaneous
depot = 'sqwat'
# identify whole slides used for the hand traced dataset
idx_used_in_hand_traced = np.full((df_all.shape[0],), False)
for hand_file_svg in hand_file_svg_list:
df = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=os.path.basename(hand_file_svg),
values=[depot, ], values_tag='depot',
tags_to_keep=['id', 'ko_parent', 'sex', 'genotype'])
idx_used_in_hand_traced[(df_all[df.columns] == df.values).all(1)] = True
if SAVE_FIGS:
plt.clf()
# f PAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'f') & (df_all['ko_parent'] == 'PAT')]
df = df.reset_index()
histo = np.array(df['smoothed_histo'].tolist())
plt.subplot(221)
plt.plot(area_bin_centers * 1e-3, np.transpose(histo) / histo.max().max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Female Control', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
# m PAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'm') & (df_all['ko_parent'] == 'PAT')]
df = df.reset_index()
histo = np.array(df['smoothed_histo'].tolist())
plt.subplot(222)
plt.plot(area_bin_centers * 1e-3, np.transpose(histo) / histo.max().max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Male Control', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
# f MAT WT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'f') & (df_all['ko_parent'] == 'MAT')
& (df_all['genotype'] == 'KLF14-KO:Het')]
df = df.reset_index()
histo = np.array(df['smoothed_histo'].tolist())
plt.subplot(223)
plt.plot(area_bin_centers * 1e-3, np.transpose(histo) / histo.max().max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.text(0.9, 0.9, 'Female FKO\n(MAT Het)', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xticks([0, 10, 20])
plt.xlim(-1.2, max_area_um2 * 1e-3)
plt.xlabel('Area ($\cdot 10^3\ \mu m^2$)', fontsize=14)
# m MAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'm') & (df_all['ko_parent'] == 'MAT')
& (df_all['genotype'] == 'KLF14-KO:Het')]
df = df.reset_index()
histo = np.array(df['smoothed_histo'].tolist())
plt.subplot(224)
plt.plot(area_bin_centers * 1e-3, np.transpose(histo) / histo.max().max())
plt.tick_params(labelsize=14)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.xticks([0, 10, 20])
plt.text(0.9, 0.9, 'Male FKO\n(MAT Het)', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xlim(-1.2, max_area_um2 * 1e-3)
plt.xlabel('Area ($\cdot 10^3\ \mu m^2$)', fontsize=14)
plt.suptitle('Subcutaneous', fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_smoothed_histo_' + depot + '_hand_subset.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_smoothed_histo_' + depot + '_hand_subset.svg'))
if SAVE_FIGS:
plt.clf()
# f PAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'f') & (df_all['ko_parent'] == 'PAT')]
histo = np.array(df['smoothed_histo'].tolist())
histo_beg = stats.mstats.hdquantiles(histo, prob=0.025, axis=0)
histo_q1 = stats.mstats.hdquantiles(histo, prob=0.25, axis=0)
histo_q2 = stats.mstats.hdquantiles(histo, prob=0.50, axis=0)
histo_q3 = stats.mstats.hdquantiles(histo, prob=0.75, axis=0)
histo_end = stats.mstats.hdquantiles(histo, prob=0.975, axis=0)
plt.subplot(221)
hist_max = histo_end.max()
plt.fill_between(area_bin_centers * 1e-3, histo_beg[0,] / hist_max, histo_end[0,] / hist_max,
alpha=0.5, color='C0')
plt.fill_between(area_bin_centers * 1e-3, histo_q1[0,] / hist_max, histo_q3[0,] / hist_max,
alpha=0.5, color='C0')
plt.plot(area_bin_centers * 1e-3, histo_q2[0,] / hist_max, 'C0', linewidth=2)
plt.tick_params(axis='y', left=False, labelleft=False, right=False, reset=True)
plt.tick_params(labelsize=14)
plt.text(0.9, 0.9, 'Female Control', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xlim(-1.2, max_area_um2 * 1e-3)
# m PAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'm') & (df_all['ko_parent'] == 'PAT')]
histo = np.array(df['smoothed_histo'].tolist())
histo_beg = stats.mstats.hdquantiles(histo, prob=0.025, axis=0)
histo_q1 = stats.mstats.hdquantiles(histo, prob=0.25, axis=0)
histo_q2 = stats.mstats.hdquantiles(histo, prob=0.50, axis=0)
histo_q3 = stats.mstats.hdquantiles(histo, prob=0.75, axis=0)
histo_end = stats.mstats.hdquantiles(histo, prob=0.975, axis=0)
plt.subplot(222)
hist_max = histo_end.max()
plt.fill_between(area_bin_centers * 1e-3, histo_beg[0,] / hist_max, histo_end[0,] / hist_max,
alpha=0.5, color='C0')
plt.fill_between(area_bin_centers * 1e-3, histo_q1[0,] / hist_max, histo_q3[0,] / hist_max,
alpha=0.5, color='C0')
plt.plot(area_bin_centers * 1e-3, histo_q2[0,] / hist_max, 'C0', linewidth=2)
plt.tick_params(axis='y', left=False, labelleft=False, reset=True)
plt.tick_params(labelsize=14)
plt.text(0.9, 0.9, 'Male Control', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xlim(-1.2, max_area_um2 * 1e-3)
# f MAT Het
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'f') & (df_all['ko_parent'] == 'MAT')
& (df_all['genotype'] == 'KLF14-KO:Het')]
histo = np.array(df['smoothed_histo'].tolist())
histo_beg = stats.mstats.hdquantiles(histo, prob=0.025, axis=0)
histo_q1 = stats.mstats.hdquantiles(histo, prob=0.25, axis=0)
histo_q2 = stats.mstats.hdquantiles(histo, prob=0.50, axis=0)
histo_q3 = stats.mstats.hdquantiles(histo, prob=0.75, axis=0)
histo_end = stats.mstats.hdquantiles(histo, prob=0.975, axis=0)
plt.subplot(223)
hist_max = histo_end.max()
plt.fill_between(area_bin_centers * 1e-3, histo_beg[0,] / hist_max, histo_end[0,] / hist_max,
alpha=0.5, color='C0')
plt.fill_between(area_bin_centers * 1e-3, histo_q1[0,] / hist_max, histo_q3[0,] / hist_max,
alpha=0.5, color='C0')
plt.plot(area_bin_centers * 1e-3, histo_q2[0,] / hist_max, 'C0', linewidth=2)
plt.tick_params(axis='y', left=False, labelleft=False, reset=True)
plt.tick_params(labelsize=14)
plt.text(0.9, 0.9, 'Female FKO\n(MAT Het)', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xlim(-1.2, max_area_um2 * 1e-3)
plt.xlabel('Area ($\cdot 10^3\ \mu m^2$)', fontsize=14)
# m MAT Het
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'm') & (df_all['ko_parent'] == 'MAT')
& (df_all['genotype'] == 'KLF14-KO:Het')]
histo = np.array(df['smoothed_histo'].tolist())
histo_beg = stats.mstats.hdquantiles(histo, prob=0.025, axis=0)
histo_q1 = stats.mstats.hdquantiles(histo, prob=0.25, axis=0)
histo_q2 = stats.mstats.hdquantiles(histo, prob=0.50, axis=0)
histo_q3 = stats.mstats.hdquantiles(histo, prob=0.75, axis=0)
histo_end = stats.mstats.hdquantiles(histo, prob=0.975, axis=0)
plt.subplot(224)
hist_max = histo_end.max()
plt.fill_between(area_bin_centers * 1e-3, histo_beg[0,] / hist_max, histo_end[0,] / hist_max,
alpha=0.5, color='C0')
plt.fill_between(area_bin_centers * 1e-3, histo_q1[0,] / hist_max, histo_q3[0,] / hist_max,
alpha=0.5, color='C0')
plt.plot(area_bin_centers * 1e-3, histo_q2[0,] / hist_max, 'C0', linewidth=2)
plt.tick_params(axis='y', left=False, labelleft=False, reset=True)
plt.tick_params(labelsize=14)
plt.xlabel('Area ($\cdot 10^3\ \mu m^2$)', fontsize=14)
plt.text(0.9, 0.9, 'Male FKO\n(MAT Het)', fontsize=14, transform=plt.gca().transAxes, va='top', ha='right')
plt.xlim(-1.2, max_area_um2 * 1e-3)
plt.suptitle('Subcutaneous', fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
# numerical quartiles and CIs associated to the histograms
idx_q1 = np.where(quantiles == 0.25)[0][0]
idx_q2 = np.where(quantiles == 0.50)[0][0]
idx_q3 = np.where(quantiles == 0.75)[0][0]
# f PAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'f') & (df_all['ko_parent'] == 'PAT')]
areas_at_quantiles = np.array(df['area_at_quantiles'].to_list())
stderr_at_quantiles = np.array(df['stderr_at_quantiles'].to_list())
stderr_at_quantiles[:, [0, -1]] = np.nan ## first and last values are artifacts of saving to the CSV file
# inverse-variance method to combine quantiles and sdterr values from multiple mice
areas_at_quantiles_hat, stderr_at_quantiles_hat = \
cytometer.stats.inverse_variance_method(areas_at_quantiles, stderr_at_quantiles)
# compute combined value and CIs in 10^3 um^2 units
alpha = 0.05
q1_hat, q2_hat, q3_hat = areas_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
k = stats.norm.ppf(1 - alpha/2, loc=0, scale=1) # multiplier for CI length (~1.96 for 95% CI)
q1_ci_lo, q2_ci_lo, q3_ci_lo = [q1_hat, q2_hat, q3_hat] - k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
q1_ci_hi, q2_ci_hi, q3_ci_hi = [q1_hat, q2_hat, q3_hat] + k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
print('f PAT')
print('\t' + '{0:.2f}'.format(q1_hat) + ' (' + '{0:.2f}'.format(q1_ci_lo) + ', ' + '{0:.2f}'.format(q1_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q2_hat) + ' (' + '{0:.2f}'.format(q2_ci_lo) + ', ' + '{0:.2f}'.format(q2_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q3_hat) + ' (' + '{0:.2f}'.format(q3_ci_lo) + ', ' + '{0:.2f}'.format(q3_ci_hi) + ')')
if SAVE_FIGS:
plt.subplot(221)
plt.plot([q1_hat, q1_hat], [0, 1], 'k', linewidth=1)
plt.plot([q2_hat, q2_hat], [0, 1], 'k', linewidth=1)
plt.plot([q3_hat, q3_hat], [0, 1], 'k', linewidth=1)
# m PAT
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'm') & (df_all['ko_parent'] == 'PAT')]
areas_at_quantiles = np.array(df['area_at_quantiles'].to_list())
stderr_at_quantiles = np.array(df['stderr_at_quantiles'].to_list())
stderr_at_quantiles[:, [0, -1]] = np.nan ## first and last values are artifacts of saving to the CSV file
# inverse-variance method to combine quantiles and sdterr values from multiple mice
areas_at_quantiles_hat, stderr_at_quantiles_hat = \
cytometer.stats.inverse_variance_method(areas_at_quantiles, stderr_at_quantiles)
# compute combined value and CIs in 10^3 um^2 units
alpha = 0.05
q1_hat, q2_hat, q3_hat = areas_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
k = stats.norm.ppf(1 - alpha/2, loc=0, scale=1) # multiplier for CI length (~1.96 for 95% CI)
q1_ci_lo, q2_ci_lo, q3_ci_lo = [q1_hat, q2_hat, q3_hat] - k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
q1_ci_hi, q2_ci_hi, q3_ci_hi = [q1_hat, q2_hat, q3_hat] + k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
print('m PAT')
print('\t' + '{0:.2f}'.format(q1_hat) + ' (' + '{0:.2f}'.format(q1_ci_lo) + ', ' + '{0:.2f}'.format(q1_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q2_hat) + ' (' + '{0:.2f}'.format(q2_ci_lo) + ', ' + '{0:.2f}'.format(q2_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q3_hat) + ' (' + '{0:.2f}'.format(q3_ci_lo) + ', ' + '{0:.2f}'.format(q3_ci_hi) + ')')
if SAVE_FIGS:
plt.subplot(222)
plt.plot([q1_hat, q1_hat], [0, 1], 'k', linewidth=1)
plt.plot([q2_hat, q2_hat], [0, 1], 'k', linewidth=1)
plt.plot([q3_hat, q3_hat], [0, 1], 'k', linewidth=1)
# f MAT Het
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'f') & (df_all['ko_parent'] == 'MAT')
& (df_all['genotype'] == 'KLF14-KO:Het')]
areas_at_quantiles = np.array(df['area_at_quantiles'].to_list())
stderr_at_quantiles = np.array(df['stderr_at_quantiles'].to_list())
stderr_at_quantiles[:, [0, -1]] = np.nan ## first and last values are artifacts of saving to the CSV file
# inverse-variance method to combine quantiles and sdterr values from multiple mice
areas_at_quantiles_hat, stderr_at_quantiles_hat = \
cytometer.stats.inverse_variance_method(areas_at_quantiles, stderr_at_quantiles)
# compute combined value and CIs in 10^3 um^2 units
alpha = 0.05
q1_hat, q2_hat, q3_hat = areas_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
k = stats.norm.ppf(1 - alpha/2, loc=0, scale=1) # multiplier for CI length (~1.96 for 95% CI)
q1_ci_lo, q2_ci_lo, q3_ci_lo = [q1_hat, q2_hat, q3_hat] - k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
q1_ci_hi, q2_ci_hi, q3_ci_hi = [q1_hat, q2_hat, q3_hat] + k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
print('f MAT Het')
print('\t' + '{0:.2f}'.format(q1_hat) + ' (' + '{0:.2f}'.format(q1_ci_lo) + ', ' + '{0:.2f}'.format(q1_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q2_hat) + ' (' + '{0:.2f}'.format(q2_ci_lo) + ', ' + '{0:.2f}'.format(q2_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q3_hat) + ' (' + '{0:.2f}'.format(q3_ci_lo) + ', ' + '{0:.2f}'.format(q3_ci_hi) + ')')
if SAVE_FIGS:
plt.subplot(223)
plt.plot([q1_hat, q1_hat], [0, 1], 'k', linewidth=1)
plt.plot([q2_hat, q2_hat], [0, 1], 'k', linewidth=1)
plt.plot([q3_hat, q3_hat], [0, 1], 'k', linewidth=1)
# m MAT Het
df = df_all[idx_used_in_hand_traced & (df_all['depot'] == depot) & (df_all['sex'] == 'm') & (df_all['ko_parent'] == 'MAT')
& (df_all['genotype'] == 'KLF14-KO:Het')]
areas_at_quantiles = np.array(df['area_at_quantiles'].to_list())
stderr_at_quantiles = np.array(df['stderr_at_quantiles'].to_list())
stderr_at_quantiles[:, [0, -1]] = np.nan ## first and last values are artifacts of saving to the CSV file
# inverse-variance method to combine quantiles and sdterr values from multiple mice
areas_at_quantiles_hat, stderr_at_quantiles_hat = \
cytometer.stats.inverse_variance_method(areas_at_quantiles, stderr_at_quantiles)
# compute combined value and CIs in 10^3 um^2 units
alpha = 0.05
q1_hat, q2_hat, q3_hat = areas_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
k = stats.norm.ppf(1 - alpha/2, loc=0, scale=1) # multiplier for CI length (~1.96 for 95% CI)
q1_ci_lo, q2_ci_lo, q3_ci_lo = [q1_hat, q2_hat, q3_hat] - k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
q1_ci_hi, q2_ci_hi, q3_ci_hi = [q1_hat, q2_hat, q3_hat] + k * stderr_at_quantiles_hat[[idx_q1, idx_q2, idx_q3]] * 1e-3
print('m MAT Het')
print('\t' + '{0:.2f}'.format(q1_hat) + ' (' + '{0:.2f}'.format(q1_ci_lo) + ', ' + '{0:.2f}'.format(q1_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q2_hat) + ' (' + '{0:.2f}'.format(q2_ci_lo) + ', ' + '{0:.2f}'.format(q2_ci_hi) + ')')
print('\t' + '{0:.2f}'.format(q3_hat) + ' (' + '{0:.2f}'.format(q3_ci_lo) + ', ' + '{0:.2f}'.format(q3_ci_hi) + ')')
if SAVE_FIGS:
plt.subplot(224)
plt.plot([q1_hat, q1_hat], [0, 1], 'k', linewidth=1)
plt.plot([q2_hat, q2_hat], [0, 1], 'k', linewidth=1)
plt.plot([q3_hat, q3_hat], [0, 1], 'k', linewidth=1)
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_smoothed_histo_quartiles_' + depot + '_hand_subset.png'))
plt.savefig(os.path.join(figures_dir, 'klf14_b6ntac_exp_0111_paper_figures_smoothed_histo_quartiles_' + depot + '_hand_subset.svg'))
## all slides (75 subcutaneous and 72 gonadal whole slides)
# reload load dataframe with cell population quantiles and histograms, because previously we removed 3 slides that we
# didn't have the BW for. But for this section, we don't need BW, so we can use all the data
dataframe_areas_filename = os.path.join(dataframe_dir, 'klf14_b6ntac_exp_0110_dataframe_areas_' + method + '.pkl')
df_all = | pd.read_pickle(dataframe_areas_filename) | pandas.read_pickle |
# -*- coding: utf-8 -*-
import os
import logging
import tempfile
import uuid
import shutil
import numpy as np
import pandas as pd
from rastertodataframe import util, tiling
log = logging.getLogger(__name__)
def raster_to_dataframe(raster_path, vector_path=None):
"""Convert a raster to a Pandas DataFrame.
Parameters
----------
raster_path : str
Path to raster file.
vector_path : str
Optional path to vector file. If given, raster pixels will be extracted
from features in the vector. If None, all raster pixels are converted
to a DataFrame.
Returns
-------
pandas.core.frame.DataFrame
"""
# Placeholders for possible temporary files.
temp_dir = vector_mask_fname = None
# Get raster band names.
ras = util.open_raster(raster_path)
raster_band_names = util.get_raster_band_names(ras)
# Create a mask from the pixels touched by the vector.
if vector_path is not None:
# Create a temporary directory for files.
temp_dir = tempfile.mkdtemp()
vec_with_fid = os.path.join(temp_dir, '{}'.format(uuid.uuid1()))
# Add a dummy feature ID column to the vector.
# This is not always present in OGR features.
vec_gdf = util.open_vector(vector_path, with_geopandas=True)
mask_values = list(range(1, len(vec_gdf) + 1))
vec_gdf['__fid__'] = pd.Series(mask_values)
vec_gdf.to_file(vec_with_fid, driver='GeoJSON')
# Mask the vector using the feature ID column.
vector_mask_fname = os.path.join(temp_dir, '{}'.format(uuid.uuid1()))
vector_mask = util.burn_vector_mask_into_raster(
raster_path, vec_with_fid, vector_mask_fname,
vector_field='__fid__')
# Loop over mask values to extract pixels.
tile_dfs = [] # DataFrames of each tile.
mask_arr = vector_mask.GetRasterBand(1).ReadAsArray()
for ras_arr in tiling.tiles(ras):
mask_dfs = [] # DataFrames of each mask.
for mask_val in mask_values:
# Extract only masked pixels.
pixels = util.get_pixels(
ras_arr, mask_arr, mask_val=mask_val)\
.transpose()
fid_px = np.ones(pixels.shape[0]) * mask_val
# Create a DataFrame of masked pixels and their FID.
mask_df = pd.DataFrame(pixels, columns=raster_band_names)
mask_df['__fid__'] = fid_px
mask_dfs.append(mask_df)
# Concat the mask DataFrames.
mask_df = pd.concat(mask_dfs)
# Join with pixels with vector attributes using the FID.
tile_dfs.append(mask_df.merge(vec_gdf, how='left', on='__fid__'))
# Merge all the tiles.
out_df = pd.concat(tile_dfs)
else:
# No vector given, simply load the raster.
tile_dfs = [] # DataFrames of each tile.
for ras_arr in tiling.tiles(ras):
idx = (1, 2) # Assume multiband
if ras_arr.ndim == 2:
idx = (0, 1) # Handle single band rasters
mask_arr = np.ones((ras_arr.shape[idx[0]], ras_arr.shape[idx[1]]))
pixels = util.get_pixels(ras_arr, mask_arr).transpose()
tile_dfs.append(pd.DataFrame(pixels, columns=raster_band_names))
# Merge all the tiles.
out_df = | pd.concat(tile_dfs) | pandas.concat |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import NUMERIC_TYPES, assert_eq
from cudf.utils.dtypes import np_dtypes_to_pandas_dtypes
def test_can_cast_safely_same_kind():
# 'i' -> 'i'
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="int64")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**31], dtype="int64")._column
assert not data.can_cast_safely(to_dtype)
# 'u' -> 'u'
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("uint64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint64")._column
to_dtype = np.dtype("uint32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**33], dtype="uint64")._column
assert not data.can_cast_safely(to_dtype)
# 'f' -> 'f'
data = cudf.Series([np.inf, 1.0], dtype="float64")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series(
[np.finfo("float32").max * 2, 1.0], dtype="float64"
)._column
to_dtype = np.dtype("float32")
assert not data.can_cast_safely(to_dtype)
def test_can_cast_safely_mixed_kind():
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="int32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="uint32")._column
assert not data.can_cast_safely(to_dtype)
to_dtype = np.dtype("float64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1.0, 2.0, 3.0], dtype="float32")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
# not integer float
data = cudf.Series([1.0, 2.0, 3.5], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([10.0, 11.0, 2000.0], dtype="float64")._column
assert data.can_cast_safely(to_dtype)
# float out of int range
data = cudf.Series([1.0, 2.0, 1.0 * (2**31)], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
# negative signed integers casting to unsigned integers
data = cudf.Series([-1, 0, 1], dtype="int32")._column
to_dtype = np.dtype("uint32")
assert not data.can_cast_safely(to_dtype)
def test_to_pandas_nullable_integer():
gsr_not_null = cudf.Series([1, 2, 3])
gsr_has_null = cudf.Series([1, 2, None])
psr_not_null = pd.Series([1, 2, 3], dtype="int64")
psr_has_null = pd.Series([1, 2, None], dtype="Int64")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_to_pandas_nullable_bool():
gsr_not_null = cudf.Series([True, False, True])
gsr_has_null = cudf.Series([True, False, None])
psr_not_null = pd.Series([True, False, True], dtype="bool")
psr_has_null = pd.Series([True, False, None], dtype="boolean")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_can_cast_safely_has_nulls():
data = cudf.Series([1, 2, 3, None], dtype="float32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3.1, None], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
(1.0, 2.0, 3.0),
[float("nan"), None],
np.array([1, 2.0, -3, float("nan")]),
pd.Series(["123", "2.0"]),
pd.Series(["1.0", "2.", "-.3", "1e6"]),
pd.Series(
["1", "2", "3"],
dtype=pd.CategoricalDtype(categories=["1", "2", "3"]),
),
pd.Series(
["1.0", "2.0", "3.0"],
dtype=pd.CategoricalDtype(categories=["1.0", "2.0", "3.0"]),
),
# Categories with nulls
pd.Series([1, 2, 3], dtype=pd.CategoricalDtype(categories=[1, 2])),
pd.Series(
[5.0, 6.0], dtype=pd.CategoricalDtype(categories=[5.0, 6.0])
),
pd.Series(
["2020-08-01 08:00:00", "1960-08-01 08:00:00"],
dtype=np.dtype("<M8[ns]"),
),
pd.Series(
[pd.Timedelta(days=1, seconds=1), pd.Timedelta("-3 seconds 4ms")],
dtype=np.dtype("<m8[ns]"),
),
[
"inf",
"-inf",
"+inf",
"infinity",
"-infinity",
"+infinity",
"inFInity",
],
],
)
def test_to_numeric_basic_1d(data):
expected = pd.to_numeric(data)
got = cudf.to_numeric(data)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1, 2**11],
[1, 2**33],
[1, 2**63],
[np.iinfo(np.int64).max, np.iinfo(np.int64).min],
],
)
@pytest.mark.parametrize(
"downcast", ["integer", "signed", "unsigned", "float"]
)
def test_to_numeric_downcast_int(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**11],
[-1.0, -(2.0**11)],
[1.0, 2.0**33],
[-1.0, -(2.0**33)],
[1.0, 2.0**65],
[-1.0, -(2.0**65)],
[1.0, float("inf")],
[1.0, float("-inf")],
[1.0, float("nan")],
[1.0, 2.0, 3.0, 4.0],
[1.0, 1.5, 2.6, 3.4],
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**129],
[1.0, 2.0**257],
[1.0, 1.79e308],
[-1.0, -(2.0**129)],
[-1.0, -(2.0**257)],
[-1.0, -1.79e308],
],
)
@pytest.mark.parametrize("downcast", ["signed", "integer", "unsigned"])
def test_to_numeric_downcast_large_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0**129],
[1.0, 2.0**257],
[1.0, 1.79e308],
[-1.0, -(2.0**129)],
[-1.0, -(2.0**257)],
[-1.0, -1.79e308],
],
)
@pytest.mark.parametrize("downcast", ["float"])
def test_to_numeric_downcast_large_float_pd_bug(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
# Pandas bug: https://github.com/pandas-dev/pandas/issues/19729
with pytest.raises(AssertionError, match="Series are different"):
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
["1", "2", "3"],
[str(np.iinfo(np.int64).max), str(np.iinfo(np.int64).min)],
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_int(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = pd.to_numeric(ps, downcast=downcast)
got = cudf.to_numeric(gs, downcast=downcast)
assert_eq(expected, got)
@pytest.mark.parametrize(
"data",
[
[""], # pure empty strings
["10.0", "11.0", "2e3"],
["1.0", "2e3"],
["1", "10", "1.0", "2e3"], # int-float mixed
["1", "10", "1.0", "2e3", "2e+3", "2e-3"],
["1", "10", "1.0", "2e3", "", ""], # mixed empty strings
],
)
@pytest.mark.parametrize(
"downcast", ["signed", "integer", "unsigned", "float"]
)
def test_to_numeric_downcast_string_float(data, downcast):
ps = pd.Series(data)
gs = cudf.from_pandas(ps)
expected = | pd.to_numeric(ps, downcast=downcast) | pandas.to_numeric |
from __future__ import print_function, division
import GLM.constants, os, pdb, pandas, numpy, logging, crop_stats
import pygeoutil.util as util
class CropFunctionalTypes:
"""
"""
def __init__(self, res='q'):
"""
:param res: Resolution of output dataset: q=quarter, h=half, o=one
:return:
"""
# Dictionary of crop functional types
self.cft = {'C4Annual': ['maize.asc', 'millet.asc', 'sorghum.asc'],
'C4Perren': ['sugarcane.asc'],
'C3Perren': ['banana.asc', 'berry.asc', 'citrus.asc', 'fruittree.asc', 'grape.asc', 'palm.asc', 'tropevrgrn.asc'],
'Ntfixing': ['alfalfa.asc', 'bean.asc', 'legumehay.asc', 'peanut.asc', 'soybean.asc'],
'C3Annual': ['beet.asc', 'cassava.asc', 'cotton.asc', 'flax.asc', 'hops.asc', 'mixedcover.asc',
'nursflower.asc', 'oat.asc', 'potato.asc', 'rapeseed.asc', 'rice.asc', 'rye.asc',
'safflower.asc', 'sunflower.asc', 'tobacco.asc', 'vegetable.asc', 'wheat.asc'],
'TotlRice': ['rice.asc', 'xrice.asc']}
# Get shape of file
self.skiprows = 6
self.res = res
self.tmpdata = util.open_or_die(path_file=GLM.constants.MFD_DATA_DIR + os.sep + 'maize.asc',
skiprows=self.skiprows, delimiter=' ')
self.asc_hdr = util.get_ascii_header(path_file=GLM.constants.MFD_DATA_DIR + os.sep + 'maize.asc',
getrows=self.skiprows)
self.yshape = self.tmpdata.shape[0]
self.xshape = self.tmpdata.shape[1]
# Create empty numpy arrays
self.c4annual = numpy.zeros(shape=(self.yshape, self.xshape))
self.c4perren = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3perren = numpy.zeros(shape=(self.yshape, self.xshape))
self.ntfixing = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3annual = numpy.zeros(shape=(self.yshape, self.xshape))
self.totlrice = numpy.zeros(shape=(self.yshape, self.xshape))
self.totlcrop = numpy.zeros(shape=(self.yshape, self.xshape))
self.c4anarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.c4prarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3prarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.ntfxarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.c3anarea = numpy.zeros(shape=(self.yshape, self.xshape))
self.croparea = numpy.zeros(shape=(self.yshape, self.xshape))
# Area of each cell in Monfreda dataset
self.mfd_area = numpy.zeros(shape=(self.yshape, self.xshape))
# Ice-water fraction and other static data
self.icwtr = util.open_or_die(GLM.constants.path_GLM_stat)
# Read in area file based on res
if res == 'q':
self.area_data = util.open_or_die(path_file=GLM.constants.CELL_AREA_Q)
elif res == 'h':
self.area_data = util.open_or_die(path_file=GLM.constants.CELL_AREA_H)
elif res == 'o':
self.area_data = util.open_or_die(path_file=GLM.constants.CELL_AREA_O)
else:
logging.info('Incorrect resolution for output of Monfreda')
# Compute cell area (excluding ice-water fraction)
self.cell_area = util.open_or_die(GLM.constants.path_GLM_carea)
self.land_area = self.cell_area * (1.0 - self.icwtr.variables[GLM.constants.ice_water_frac][:, :])
# Get FAO country concordance list
self.fao_id = | pandas.read_csv(GLM.constants.FAO_CONCOR) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 15:51:36 2018
@author: huangjin
"""
import pandas as pd
from tqdm import tqdm
import os
def gen_data(df, time_start, time_end):
df = df.sort_values(by=['code','pt'])
df = df[(df['pt']<=time_end)&(df['pt']>=time_start)]
col = [c for c in df.columns if c not in ['code','pt']]
df_tem = df.groupby(['code']).shift(1).fillna(0)
all_data = df[['code','pt']]
for j in tqdm(range(len(col))):
tem = df[col[j]]-df_tem[col[j]]
all_data = pd.concat([all_data, tem], axis=1)
return all_data
def process_data():
industry_name = ['非银金融','纺织服装','有色金属','计算机','交通运输','医药生物','钢铁','家用电器',
'采掘','国防军工','房地产','建筑材料','休闲服务','综合','建筑装饰','银行',
'轻工制造','化工','电子','机械设备','商业贸易','通信','电气设备','公用事业','传媒',
'农林牧渔','食品饮料','汽车']
industry_name_english = ['Non bank finance', 'textile and clothing', 'non-ferrous metals',
'computer', 'transportation', 'medical biology', 'steel',
'household appliances','Excavation','Defense Force',
'Real Estate', 'Building Materials', 'Leisure Services',
'Comprehensive', 'Architectural Decoration', 'Bank',
'Light manufacturing', 'Chemical', 'Electronic', 'Mechanical equipment',
'Commercial trade', 'Communication', 'Electrical equipment', 'Utilities',
'Media','Agriculture and fishing', 'food and beverage', 'car']
for industry_name_i in range(len(industry_name)):
# 市场值
market_value = pd.read_csv('market_values_end.csv')
stocks_info = pd.read_csv('stocks_info.csv')
stock_info_tem = stocks_info[['code','level_0']]
stock_info_tem = stock_info_tem[stock_info_tem['level_0']==industry_name[industry_name_i]]
market_values = pd.merge(stock_info_tem, market_value, how='left', on = 'code')
market_values.drop('level_0', axis=1, inplace=True)
# 资产负债表
data_all = pd.read_csv('financial_report_balance_sheet.csv')
stocks_info = pd.read_csv('stocks_info.csv')
stock_info_tem = stocks_info[['code','level_0']]
stock_info_tem = stock_info_tem[stock_info_tem['level_0']==industry_name[industry_name_i]]
balance_data = pd.merge(stock_info_tem, data_all, how='left', on = 'code')
balance_data.drop('level_0', axis=1, inplace=True)
print(balance_data.shape)
# 利润表
data_all = pd.read_csv('quant_financial_report_profitloss.csv')
stocks_info = pd.read_csv('stocks_info.csv')
stock_info_tem = stocks_info[['code','level_0']]
stock_info_tem = stock_info_tem[stock_info_tem['level_0']==industry_name[industry_name_i]]
income_data = pd.merge(stock_info_tem, data_all, how='left', on = 'code')
income_data.drop('level_0', axis=1, inplace=True)
print(income_data.shape)
# 现金流量表
data_all = pd.read_csv('quant_financial_report_cashflows_statement.csv')
stocks_info = pd.read_csv('stocks_info.csv')
stock_info_tem = stocks_info[['code','level_0']]
stock_info_tem = stock_info_tem[stock_info_tem['level_0']==industry_name[industry_name_i]]
flow_data = pd.merge(stock_info_tem, data_all, how='left', on = 'code')
flow_data.drop('level_0', axis=1, inplace=True)
print(flow_data.shape)
tem1 = pd.merge(income_data, balance_data, on=['code','pt'], how = 'left')
data_all = pd.merge(tem1, flow_data, on=['code','pt'], how = 'left')
thresh = 0.8*len(data_all)
print('去除缺失值前的维度:', data_all.shape)
data_all.dropna(axis=1, thresh=thresh, inplace=True)
print('去除缺失值后的维度:', data_all.shape)
# 去除和目标值重复的列tot_oper_rev
data_all.drop('tot_oper_rev', axis=1, inplace=True)
# 前驱值填充
data_all = data_all.fillna(method='ffill')
# 没有前驱值,中值填充
data_all = data_all.fillna(data_all.median())
# '2011-03-31', '2018-03-31'
print(data_all.shape)
# 对data_all按照code对相邻时间做差,因为原始特征都是累加的
data1 = gen_data(data_all, '2011-03-31', '2011-12-31')
data2 = gen_data(data_all, '2012-03-31', '2012-12-31')
data3 = gen_data(data_all, '2013-03-31', '2013-12-31')
data4 = gen_data(data_all, '2014-03-31', '2014-12-31')
data5 = gen_data(data_all, '2015-03-31', '2015-12-31')
data6 = gen_data(data_all, '2016-03-31', '2016-12-31')
data7 = gen_data(data_all, '2017-03-31', '2017-12-31')
data8 = gen_data(data_all, '2018-03-31', '2018-12-31')
data_all_end = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from example_data_base import get_historical_data, get_connection, get_query
from sklearn.metrics.pairwise import cosine_similarity
STRONG_SIMILARITY_DIFFERENCE = 5
SMALL_SIMILARITY_DIFFERENCE = 10
FEATURES_AMOUNT = 6
# (strong similar features count, small similar features count)
records_similarities = [(6, 0), (5, 1), (5, 0), (4, 2), (4, 1), (4, 0), (3, 3), (3, 2), (3, 1), (2, 4), (2, 3), (2, 2),
(1, 5), (1, 4), (1, 3), (0, 6), (0, 5), (0, 4)]
# (strong similar features count, small similar features count, similarity wage)
records_similarities_with_wages = []
STRONG_SIM_MULTIPLIER = 3
MEDIUM_SIM_MULTIPLIER = 2
SMALL_SIM_MULTIPLIER = 1
# grade: prediction_modifier
grade_modifiers = {5: 0.5, 4: 0.25, 3: 0.0, 2: -0.25, 1: -0.5}
def are_strong_similar_values(value1, value2):
return abs(value1 - value2) <= STRONG_SIMILARITY_DIFFERENCE
def are_small_similar_values(value1, value2):
return abs(value1 - value2) <= SMALL_SIMILARITY_DIFFERENCE
def prepare_record_similarities():
s = len(records_similarities)
for index, e in enumerate(records_similarities):
w = 1.0 - index / s
records_similarities_with_wages.append((e[0], e[1], w))
def prepare_record(car_name, grade, strong_s_count, small_s_count):
index = records_similarities.index((strong_s_count, small_s_count))
return car_name, grade, records_similarities_with_wages[index][2]
def get_all_similar_records(predicted_features):
historical_data = get_historical_data()
prepare_record_similarities()
result = []
for historical_row in historical_data:
strong_similarity = 0
small_similarity = 0
for index, element in enumerate(predicted_features):
if are_strong_similar_values(element, historical_row[index]):
strong_similarity += 1
elif are_small_similar_values(element, historical_row[index]):
small_similarity += 1
if strong_similarity + small_similarity >= 4:
result.append(prepare_record(historical_row[-2], historical_row[-1], strong_similarity, small_similarity))
return result, len(historical_data)
def get_data_frames():
conn = get_connection()
sql = get_query(with_timestamp=True)
data = | pd.read_sql(sql, conn) | pandas.read_sql |
'''
This file includes all the locally differentially private mechanisms we designed for the SIGMOD work.
I am aware that this code can be cleaned a bit and there is a redundancy. But this helps keeping the code plug-n-play.
I can simply copy a class and use it in a different context.
http://dimacs.rutgers.edu/~graham/pubs/papers/sigmod18.pdf
'''
import numpy as np
import itertools
from scipy.linalg import hadamard
import pandas as pd
import xxhash
import sys
import random
#np.seterr(all='raise')
BIG_PRIME = 9223372036854775783
def rr2 (bit,bern):
if bern:
return bit
return -bit
def pop_probmat(prob,sz):
probmat =np.zeros((sz,sz))
d = np.log2(sz)
for i in range(0,sz):
for j in range(0,sz):
perturbed = count_1(np.bitwise_xor(i,j))
#print i,bin(i),j,bin(j) ,bin(np.bitwise_xor(i,j)),perturbed
probmat[i][j] = np.power(1.0-prob,perturbed) * np.power(prob,d-perturbed)
return probmat
def mps (num,bern,rnum):
if bern:
return num
return rnum
def L1(a,b):
a = np.abs(a)
b= np.abs(b)
return round(np.abs(a-b).sum(),4)
def count_1(num):
cnt =0
while num !=0:
num = np.bitwise_and(num,num-1)
cnt+=1
return cnt
def random_number():
return random.randrange(1, BIG_PRIME - 1)
def compute_marg(misc_vars
,irr_estimate
,ips_estimate
,iht_pert_ns_estimate
,iolh_estimate
,mps_pert_dict
,mrr_pert_dict
,mht_pert_dict
,icms_estimate
,icmsht_estimate
):
### These lists store L1 error for each k way marginal.
irr_l1_array = []
iht_l1_array = []
ips_l1_array =[]
iolh_l1_array =[]
icms_l1_array = []
icmsht_l1_array = []
mps_l1_array= []
mrr_l1_array=[]
mht_l1_array = []
s = misc_vars.allsubsetsint.shape[0]
temp_array2= np.zeros(s)
input_dist_margs = np.zeros(np.power(2,misc_vars.d))
marg_from_irr = np.zeros(np.power(2,misc_vars.d))
marg_from_iht = np.zeros(s)
marg_from_ips = np.zeros(np.power(2,misc_vars.d))
marg_from_iolh = np.zeros(np.power(2,misc_vars.d))
marg_from_icms = np.zeros(np.power(2,misc_vars.d))
marg_from_icmsht = np.zeros(np.power(2,misc_vars.d))
all_cords = np.array(range(0, np.power(2,misc_vars.d)))
temp_array = np.zeros(np.power(2, misc_vars.d))
### We now evaluate each marginal using the method described in Barak et al's paper.
for beta in misc_vars.allsubsetsint:
if count_1(beta) != misc_vars.k:
continue
alphas=misc_vars.alphas_cache[beta]["alphas"]
gammas = alphas
marg_from_irr.fill(0.0)
marg_from_ips.fill(0.0)
marg_from_iht.fill(0.0)
marg_from_iolh.fill(0.0)
marg_from_icms.fill(0.0)
marg_from_icmsht.fill(0.0)
input_dist_margs.fill(0.0)
real_indices = []
for alpha in alphas:
temp_array.fill(0.0)
temp_array2.fill(0.0)
try:
f_alpha = misc_vars.f[alpha]
except:
f_alpha = np.zeros(np.power(2,misc_vars.d))
for i in all_cords:
f_alpha[i] = np.power(-1.0, count_1(np.bitwise_and(alpha, i)))
misc_vars.f[alpha] = f_alpha
for gamma in gammas:
temp_array[gamma]+=misc_vars.f[alpha][gamma]
temp_array2[misc_vars.coef_dict[gamma]] +=np.power(-1.0,count_1(np.bitwise_and(gamma,alpha)))
try:
input_dist_margs += (temp_array * misc_vars.f[alpha].dot(misc_vars.input_dist))
marg_from_irr += (temp_array * misc_vars.f[alpha].dot(irr_estimate))
marg_from_ips += (temp_array * misc_vars.f[alpha].dot(ips_estimate))
marg_from_icms += (temp_array * misc_vars.f[alpha].dot(icms_estimate))
marg_from_icmsht += (temp_array * misc_vars.f[alpha].dot(icmsht_estimate))
marg_from_iolh += (temp_array * misc_vars.f[alpha].dot(iolh_estimate))
except:
print ("Unexpected error:", sys.exc_info())
marg_from_iht += (temp_array2 * iht_pert_ns_estimate[misc_vars.coef_dict[alpha]])
real_indices.append(misc_vars.coef_dict[alpha])
### input######
m_inp = np.abs(np.take(input_dist_margs,gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_inp/=m_inp.sum()
#### INPUT_HT #############
m_inp_ht = np.abs(np.take(marg_from_iht,real_indices)) ## Extracting counts from marginal indices specified by "gammas".
m_inp_ht/=m_inp_ht.sum()
iht_l1_array.append(L1(m_inp_ht,m_inp))
######## INPUT_PS ###########
ips_marg = np.abs(np.take(marg_from_ips,gammas)) ## Extracting counts from marginal indices specified by "gammas".
ips_marg/=ips_marg.sum()
ips_l1_array.append(L1(ips_marg,m_inp))
######## INPUT_RR ##########
m_irr = np.abs(np.take(marg_from_irr, gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_irr /= m_irr.sum()
irr_l1_array.append(L1(m_irr,m_inp))
######### INPUT_OLH ##########
try:
m_iolh = np.abs(np.take(marg_from_iolh,gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_iolh/=m_iolh.sum()
iolh_l1_array.append(L1(m_iolh,m_inp))
except:
## incase we drop INPUT_OLH from execution.
#print ("Unexpected error:", sys.exc_info())
iolh_l1_array.append(0.0)
try:
icms_marg = np.abs(np.take(marg_from_icms,gammas)) ## Extracting counts from marginal indices specified by "gammas".
icms_marg/=icms_marg.sum()
icms_l1_array.append(L1(icms_marg,m_inp))
except:
# incase we drop INPUT_CMS from execution.
#print ("Unexpected error:", sys.exc_info())
icms_l1_array.append(0.0)
try:
icmsht_marg = np.abs(np.take(marg_from_icmsht,gammas)) ## Extracting counts from marginal indices specified by "gammas".
icmsht_marg/=icmsht_marg.sum()
icmsht_l1_array.append(L1(icmsht_marg,m_inp))
except:
# incase we drop INPUT_HTCMS from execution.
#print (icms_marg)
#print ("Unexpected error:", sys.exc_info())
icmsht_l1_array.append(0.0)
######### MARG_RR ###############
mrr_l1_array.append(L1(m_inp,mrr_pert_dict[np.binary_repr(beta,width=misc_vars.d)[::-1]]))
#print (m_inp)
######### MARG_HT #####################
mht_l1_array.append(L1(mht_pert_dict[np.binary_repr(beta,width=misc_vars.d)[::-1]],m_inp))
########## MARG_PS #####################
mps_l1_array.append(L1(mps_pert_dict[np.binary_repr(beta, width=misc_vars.d)[::-1]], m_inp))
irr_l1 = np.array(irr_l1_array).mean(axis=0)
ips_l1 = np.array(ips_l1_array).mean(axis=0)
iht_l1 = np.array(iht_l1_array).mean(axis=0)
iolh_l1 = np.array(iolh_l1_array).mean(axis=0)
icms_l1 = np.array(icms_l1_array).mean(axis=0)
icmsht_l1 = np.array(icmsht_l1_array).mean(axis=0)
mrr_l1 = np.array(mrr_l1_array).mean(axis=0)
mps_l1 = np.array(mps_l1_array).mean(axis=0)
mht_l1 = np.array(mht_l1_array).mean(axis=0)
#print (irr_l1_array,mrr_l1,iht_l1_array,mht_l1,ips_l1,mps_l1,iolh_l1_array,icms_l1_array,icmsht_l1_array)
return (irr_l1,mrr_l1,iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1, icms_l1, icmsht_l1)
class INPUT_RR(object):
def perturb2(self):
return
def perturb(self,index_of_1,p):
i = 0
while i < self.sz:
item = 0.0
if i == index_of_1:
item = 1.0
if self.bern_irr[p][i]:
self.irr[i] += item
else:
self.irr[i] += (1.0 - item)
i += 1
## It is possible to simulate InputRR using Binomial distributions. We
## use this simulation for rapid completion.
def correction2(self,miscvar):
i=0
irr2 = np.zeros(self.sz)
while i < self.sz:
irr2[i] = np.random.binomial(miscvar.input_dist[i],0.5,size=1)[0] +\
np.random.binomial(self.population- miscvar.input_dist[i],1.0-self.prob,size=1)[0]
irr2[i]/=self.population
irr2[i] = (self.irr[i] + self.prob - 1.0) / (2.0 * self.prob - 1.0)
i+=1
np.copyto(self.irr,irr2)
#print (irr2)
## just repeat reconstruction of each index to reduce variance.
def correction3(self,miscvar):
i=0
while i <self.sz:
j=0
while j<5:
self.irr[i] += (np.random.binomial(miscvar.input_dist[i],0.5,size=1)[0] +\
np.random.binomial(self.population- miscvar.input_dist[i],self.prob,size=1)[0])
j+=1
self.irr[i]/=(5.0*self.population)
self.irr[i] = (self.irr[i]-self.prob) / (0.5 -self.prob);
#self.irr[i] = (self.irr[i] + self.prob - 1.0) / (2.0 * self.prob - 1.0)
i+=1
#print (self.irr)
def correction(self):
self.irr/=self.population
#print (self.irr)
for i in range(0,self.sz):
self.irr[i] = (self.irr[i]+self.prob-1.0)/(2.0*self.prob-1.0)
#self.irr/=self.irr.sum()
#print (self.irr.round(4))
def __init__(self,e_eps,d,population):
self.population=population
self.d = d
self.sz = np.power(2, self.d)
self.eps = np.log(e_eps)
self.e_eps = np.power(np.e,(self.eps/2.0))
self.prob = self.e_eps/(1.0+self.e_eps)
#print (self.prob,"input-RR")
self.problist = [self.prob,1.0-self.prob]
#self.bern_irr = np.random.choice([True,False], size=self.sz * self.population, p=self.problist).reshape(self.population, self.sz)
#self.sample_index = np.random.choice(range(0, self.sz), size=self.population)
self.irr = np.zeros(np.power(2,self.d))
class MARG_RR(object):
def perturb(self,index_of_1,p,rand_quests):
i = 0
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.marg_freq[rand_quests] += 1.0
while i < self.sz:
item = 0.0
if i == index_of_1:
item = 1.0
if self.bern[p][i]:
self.marg_dict[rand_quests][i] += item
else:
self.marg_dict[rand_quests][i] += (1.0 - item)
i += 1
def perturb2(self,index_of_1,p,rand_quests):
i = 0
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.marg_freq[rand_quests] += 1.0
while i < self.sz:
item = 0.0
b = self.bern_q
if i == index_of_1:
item = 1.0
b = self.bern_p
if b[p][i]:
self.marg_dict[rand_quests][i] += item
else:
self.marg_dict[rand_quests][i] += (1.0 - item)
i += 1
def perturb3(self,index_of_1,p,rand_quests):
try:
self.marg_freq[rand_quests] += 1.0
self.true_marg[rand_quests][index_of_1]+= 1.0
except:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.true_marg[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] += 1.0
self.true_marg[rand_quests][index_of_1]+= 1.0
def correction(self):
#print ("--------------------------------")
for marg in self.marg_dict:
self.marg_dict[marg] /= self.marg_freq[marg]
for i in range(0,self.sz):
self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def correction2(self):
for marg in self.marg_dict:
#print ("--------------------------------")
self.marg_dict[marg] /= self.marg_freq[marg]
for i in range(0,self.sz):
#self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg][i] = (self.marg_dict[marg][i]-(self.prob)) / (0.5 -(self.prob))
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def correction3(self):
for marg in self.marg_dict:
#self.marg_dict[marg] /= self.marg_freq[marg]
i=0
#print (self.marg_dict[marg])
total = self.marg_freq[marg]
while i <self.sz:
j=0
while j <5:
self.marg_dict[marg][i] += (np.random.binomial(self.true_marg[marg][i],0.5,size=1)[0] +\
np.random.binomial(self.marg_freq[marg]- self.true_marg[marg][i],self.prob,size=1)[0])
j+=1
self.marg_dict[marg][i] /= (5.0*total)
#self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg][i] = (self.marg_dict[marg][i]-(self.prob)) / (0.5 -(self.prob))
i+=1
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def __init__(self,d,k,e_eps,population,k_way):
self.d = d
self.k = k
self.population= population
self.k_way = k_way
self.sz = np.power(2,self.k)
self.eps = np.log(e_eps)
self.e_eps = np.power(np.e,self.eps/2.0)
self.prob = self.e_eps / (1.0+self.e_eps)
#print (self.prob,"marg-RR")
self.problist = [self.prob,1.0-self.prob]
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.bern = np.random.choice([True, False], size=self.sz * self.population, p=self.problist).reshape(self.population, self.sz)
self.bern_p = np.random.choice([True, False], size=self.sz * self.population).reshape(self.population, self.sz)
self.bern_q = np.random.choice([True, False], size=self.sz * self.population, p=self.problist[::-1]).reshape(self.population, self.sz)
self.marg_dict = {}
self.marg_freq={}
self.true_marg={}
class MARG_HT(object):
def perturb(self,index_of_1,p,rand_quests):
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = np.zeros(self.sz)
cf =self.rand_coef[p]
self.marg_freq[rand_quests][cf] += 1.0
htc = self.f[index_of_1][cf]
if self.bern[p]:
self.marg_dict[rand_quests][cf] += htc
else:
self.marg_dict[rand_quests][cf] += -htc
def correction(self):
for rm in self.marg_dict:
self.marg_freq[rm][self.marg_freq[rm] == 0.0] = 1.0
self.marg_dict[rm]/=self.marg_freq[rm]
self.marg_dict[rm]/=(2.0*self.prob-1.0)
self.marg_dict[rm][0]=1.0
#print ("-------------------")
#print (self.marg_dict[rm])
self.marg_dict[rm]= np.abs(self.marg_dict[rm].dot(self.f))
self.marg_dict[rm]/=self.marg_dict[rm].sum()
#print (self.marg_dict[rm].round(4))
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def compute_all_marginals(self):
for marg_int in self.k_way:
self.correct_noise_mps(marg_int)
def __init__(self,d,k,e_eps,population,k_way,cls):
self.d = d
self.k = k
self.population= population
self.sz = np.power(2,self.k)
self.e_eps = e_eps
self.f = hadamard(self.sz).astype("float64")
self.prob = (self.e_eps/(1.0+self.e_eps))
self.problist = [self.prob,1.0-self.prob]
self.coef_dist = np.zeros(cls)
self.k_way = k_way
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.rand_coef= np.random.choice(range(0,self.sz),size=population)
self.bern = np.random.choice([True, False], size= self.population, p=self.problist)#.reshape(self.population, self.sz)
self.marg_freq = {}
self.marg_dict = {}
self.marg_noisy = np.zeros(self.sz)
class MARG_PS(object):
def perturb(self,index_of_1,p,rand_quests):
try:
freq = self.rand_cache[index_of_1]["freq"]
except:
i = 0
while i < self.sz:
options = list(range(0, self.sz))
options.remove(i)
self.rand_cache[i] = {"rnum": np.random.choice(np.array(options), size=10000), "freq": 0}
i += 1
freq = self.rand_cache[index_of_1]["freq"]
if freq > 9990:
options = list(range(0, self.sz))
options.remove(index_of_1)
self.rand_cache[index_of_1]["rnum"] = np.random.choice(np.array(options), size=10000)
self.rand_cache[index_of_1]["freq"] = 0
rnum = self.rand_cache[index_of_1]["rnum"][freq]
try:
self.marg_ps_pert_aggr[rand_quests].append(mps(index_of_1, self.bern[p], rnum))
except:
self.marg_ps_pert_aggr[rand_quests] = [mps(index_of_1, self.bern[p], rnum)]
self.rand_cache[index_of_1]["freq"] += 1
def correct_noise_mps(self,marg_int):
self.marg_int=marg_int
self.marg_ps_noisy.fill(0.0)
if type(self.marg_ps_pert_aggr[marg_int]) != "numpy.ndarray":
for rm in self.marg_ps_pert_aggr:
self.marg_ps_pert_aggr[rm] = np.array(self.marg_ps_pert_aggr[rm])
#print (self.marg_ps_pert_aggr.keys())
for index in self.marg_ps_pert_aggr[marg_int]:
self.marg_ps_noisy[index]+=1.0
self.marg_ps_noisy/=self.marg_ps_noisy.sum()
#marg_ps_recon = np.copy(marg_noisy)
self.marg_ps_recon = self.mat_inv.dot(self.marg_ps_noisy)
self.marg_ps_recon/=self.marg_ps_recon.sum()
#print (self.marg_ps_recon.round(4))
return self.marg_ps_recon
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def compute_all_marginals(self):
for marg_int in self.k_way:
self.marg_dict[marg_int]=self.correct_noise_mps(marg_int)
def __init__(self,d,k,e_eps,population,k_way):
self.d = d
self.k = k
self.population= population
self.k_way = k_way
self.sz = np.power(2,self.k)
#self.data = data
self.e_eps = e_eps
self.prob = (self.e_eps/(self.e_eps+self.sz-1.0))
#print self.prob,"marg-ps"
self.probmat = self.pop_probmat()
self.problist = [self.prob,1.0-self.prob]
self.mat = self.pop_probmat()
self.mat_inv = np.linalg.inv(self.mat)
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.bern = np.random.choice([True, False], p=self.problist, size=self.population)
self.marg_ps_pert_aggr = {}
self.rand_cache = {}
self.marg_int = None
self.marg_ps_noisy = np.zeros(self.sz)
self.marg_dict = {}
## From <NAME> al's USENIX paper.
## https://www.usenix.org/system/files/conference/usenixsecurity17/sec17-wang-tianhao.pdf
## This algorithm indeed does well for high order marginals but doesn't outperform INPUT_HT
## for small k's i.e. 2,3, the one's that are the most interesting.
## We trade the gain in accuracy by computational cost. The encoding (or decoding) cost is O(dN).
class INPUT_OLH(object):
def __init__(self,e_eps, d, population,g=1):
self.d = d
self.population= population
self.sz = int(np.power(2,self.d))
#self.data = data
self.e_eps = e_eps
if g == 1:
self.g = int(np.ceil(e_eps+1.0))
else:
self.g = g
#print (self.g)
self.prob = (self.e_eps/(self.e_eps+self.g-1.0))
self.problist = [self.prob,1.0-self.prob]
self.bern_ps = np.random.choice([False,True], size=self.population, p=self.problist)
self.uni_dist = np.random.choice(range(self.g),size=self.population).astype("int32")
#self.hash_cache = np.array( map(str,range(self.sz)),dtype="str") ## works with Python2
self.hash_cache = np.array(range(self.sz),dtype="str")
#self.hashed_pdist = np.zeros(self.population)
self.estimate = np.zeros(self.sz)
def perturb(self,x,p):
if self.bern_ps[p]:
#x_hash= (xxhash.xxh32(self.hash_cache[x], seed=p).intdigest()) % self.g
pert_val= (xxhash.xxh32(self.hash_cache[x], seed=p).intdigest()) % self.g
else:
pert_val=self.uni_dist[p]
dom_index = 0
while dom_index<self.sz:
if pert_val == (xxhash.xxh32(self.hash_cache[dom_index], seed=p).intdigest() % self.g):
self.estimate[dom_index]+=1.0
dom_index+=1
def correction(self):
p=0
while p <self.sz:
self.estimate[p]=(self.estimate[p] - (self.population/self.g))/(self.prob -(1.0/self.g))
p+=1
self.estimate/=self.estimate.sum()
#print(self.estimate.round(4))
class INPUT_HT(object):
def perturb(self,index_of_1,p):
rc = self.rand_coefs[p]
index = self.misc_vars.coef_dict[rc]
self.coef_dist[index] += 1.0
cf = np.power(-1.0, count_1(np.bitwise_and(index_of_1, rc)))
self.iht_pert_ns_estimate[index] += rr2(cf, self.bern_ht[p])
def correction(self):
self.coef_dist[self.coef_dist==0.0]=1.0
self.iht_pert_ns_estimate/=self.coef_dist
self.iht_pert_ns_estimate/=(2.0*self.prob-1.0)
self.iht_pert_ns_estimate[0] = 1.0
self.coef_dist[self.coef_dist<=0.0]=0.0
def __init__(self,d,k,e_eps,population,misc_vars):
self.d = d
self.k = k
self.misc_vars = misc_vars
self.population= population
self.sz = np.power(2,self.k)
self.e_eps = e_eps
self.prob = self.e_eps/(1.0+self.e_eps)
self.problist = [self.prob,1.0-self.prob]
self.bern_ht = np.random.choice([True,False],p=self.problist,size=self.population)
self.rand_coefs = np.random.choice(self.misc_vars.allsubsetsint,size=self.population)
self.iht_pert_ns_estimate = np.zeros(self.misc_vars.allsubsetsint.shape[0])
#iht_pert_ns_estimate.fill(0.0)
self.coef_dist = np.zeros(self.misc_vars.cls)
## From Apple's paper.
## https://machinelearning.apple.com/2017/12/06/learning-with-privacy-at-scale.html
## This algorithm might be a bad performer. But just adding it for a comparison.
class INPUT_CMS:
def __init__(self, w, d,population,e_eps,domain):
'''
if delta <= 0 or delta >= 1:
raise ValueError("delta must be between 0 and 1, exclusive")
if epsilonh <= 0 or epsilonh >= 1:
raise ValueError("epsilon must be between 0 and 1, exclusive")
#self.w = int(np.ceil(np.e / epsilonh))
#self.d = int(np.ceil(np.log(1 / delta)))
'''
self.w=w
self.d =d
self.population=population
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.M = np.zeros(shape=(self.d, self.w))
#print (self.w,self.d,self.w*self.d,self.M.shape)
self.hash_chooser = np.random.choice(range(self.d),size=self.population)
self.epsilon = np.log(e_eps)
self.flip_prob = 1.0/(1.0+np.power(np.e,self.epsilon/2.0))
problist = [self.flip_prob,1.0-self.flip_prob]
self.bern = np.random.choice([True,False],p=problist,size=self.population*self.w).reshape(self.population,self.w)
self.c_eps = (np.power(np.e,self.epsilon/2.0)+1.0)/(np.power(np.e,self.epsilon/2.0)-1.0)
self.estimate = np.zeros(int(np.power(2,domain)))
def __generate_hash_function(self):
a = random_number()
b= random_number()
return lambda x: (a * x + b) % BIG_PRIME % self.w
def perturb(self, key,p):
hash_choice = self.hash_chooser[p]
hashed_key = self.hash_functions[hash_choice](abs(hash(str(key))))
cnt = 0
while cnt< self.w:
item = -1.0
if cnt == hashed_key:
item = 1.0
if self.bern[p][cnt]:
item = -item
self.M[hash_choice][cnt]+=(self.d * (item*self.c_eps*0.5+0.5))
cnt+=1
def query(self,key):
l =0
avg=0.0
hsh_str= abs(hash(str(key)))
while l < self.d:
hashed_key = self.hash_functions[l](hsh_str)
avg+=self.M[l][hashed_key]
l+=1
avg/=self.d
est = ((1.0*self.w)/(self.w-1.0))* (avg- (1.0*self.population)/self.w)
return est
def correction(self):
cnt=0
while cnt <self.estimate.shape[0]:
self.estimate[cnt]=self.query(cnt)
cnt+=1
self.estimate[self.estimate < 0.0] = 0.0
self.estimate/=self.estimate.sum()
## From Apple's paper.
## https://machinelearning.apple.com/2017/12/06/learning-with-privacy-at-scale.html
## This algorithm might be a bad performer. But just adding it for a comparison.
class INPUT_HTCMS:
#def __init__(self, delta, epsilonh,population,e_eps):
def __init__(self, w, d,population,e_eps,domain):
self.w=int(w)
self.d =int(d)
self.ht = hadamard(self.w, dtype="float32")
self.population=population
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.M = np.zeros(shape=(self.d, self.w))
#print (self.w,self.d,self.w*self.d,self.M.shape)
self.hash_chooser = np.random.choice(range(self.d),size=self.population).astype("int32")
self.coef_chooser = np.random.choice(range(self.w),size=self.population).astype("int32")
#self.hash_choice_counter = np.zeros(self.d)
self.flip_prob = 1.0/(1.0+e_eps)
problist = [self.flip_prob,1.0-self.flip_prob]
self.bern = np.random.choice([True,False],p=problist,size=self.population)
self.c_eps = (e_eps+1.0)/(e_eps-1.0)
self.estimate = np.zeros(int(np.power(2,domain)))
def __generate_hash_function(self):
a = random_number()
b= random_number()
return lambda x: (a * x + b) % BIG_PRIME % self.w
def perturb(self, key,p):
hash_choice = self.hash_chooser[p]
#self.hash_choice_counter[hash_choice]+=1.0
hashed_key = self.hash_functions[hash_choice](abs(hash(str(key))))
rand_coef = self.coef_chooser[p]
item = self.ht[rand_coef][hashed_key]
if self.bern[p]:
item = -item
self.M[hash_choice][rand_coef]+=(self.d * item*self.c_eps)
def correction(self):
cnt = 0
while cnt < self.d:
#print self.M[cnt]
self.M[cnt] = self.ht.dot(self.M[cnt])
cnt+=1
cnt=0
while cnt <self.estimate.shape[0]:
self.estimate[cnt]=self.query(cnt)
cnt+=1
self.estimate[self.estimate < 0.0] = 0.0
self.estimate/=self.estimate.sum()
def query(self,key):
l =0
avg=0.0
hsh_str= abs(hash(str(key)))
while l < self.d:
hashed_key = self.hash_functions[l](hsh_str)
avg+=self.M[l][hashed_key]
l+=1
avg/=self.d
est = ((1.0*self.w)/(self.w-1.0))* (avg- (1.0*self.population)/self.w)
return est
class INPUT_PS(object):
def perturb2(self,index_of_1,p):
if self.bern_ps[p]:
self.ips_ps_pert_aggr[index_of_1] += 1.0
else:
self.ips_ps_pert_aggr[self.rand_coef_ps[p]] += 1.0
def perturb(self,index_of_1,p):
try:
freq = self.rand_cache[index_of_1]["freq"]
except:
i = 0
while i < self.sz:
options = list(range(0, self.sz))
options.remove(i)
self.rand_cache[i] = {"rnum": np.random.choice(np.array(options), size=10000), "freq": 0}
i += 1
freq = self.rand_cache[index_of_1]["freq"]
if freq > 9990:
options = list(range(0, self.sz))
options.remove(index_of_1)
self.rand_cache[index_of_1]["rnum"] = np.random.choice(np.array(options), size=10000)
self.rand_cache[index_of_1]["freq"] = 0
rnum = self.rand_cache[index_of_1]["rnum"][freq]
ips_output = mps(index_of_1, self.bern[p], rnum)
self.ips_ps_pert_aggr[ips_output] += 1.0
self.rand_cache[index_of_1]["freq"] += 1
def correction2(self):
self.ips_ps_pert_aggr /= self.population
#print self.ips_ps_pert_aggr, "pert",self.ips_ps_pert_aggr.sum()
for i in range(0, self.sz):
self.ips_ps_pert_aggr[i] = (self.ips_ps_pert_aggr[i] * self.sz + self.probps - 1.0) / (self.probps * (self.sz + 1.0) - 1.0)
#print self.ips_ps_pert_aggr.round(4)
def correction(self):
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
for i in range(0,self.sz):
self.ips_ps_pert_aggr[i] = (self.ips_ps_pert_aggr[i]*self.sz+self.prob-1.0)/(self.prob*(self.sz+1.0)-1.0)
#print self.marg_ps_recon.round(4)
'''
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
# marg_ps_recon = np.copy(marg_noisy)
self.ips_ps_pert_aggr = np.abs(self.mat_inv.dot(self.ips_ps_pert_aggr))
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
'''
#return self.ips_ps_pert_aggr
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def __init__(self,d,k,e_eps,population,misc_vars):
self.d = d
self.k = k
self.population= population
self.k_way = misc_vars.k_way
self.sz = np.power(2,self.d)
self.e_eps = e_eps
self.prob = (self.e_eps/(self.e_eps+self.sz-1.0))
#print (self.prob,"input-ps")
self.problist = [self.prob,1.0-self.prob]
self.probps = (self.e_eps - 1.0) / (self.e_eps + self.sz - 1.0)
self.problist2 = [self.probps, 1.0 - self.probps]
self.rand_coef_ps = np.random.choice(np.array(range(0, self.sz)), size=self.population)
self.bern_ps = np.random.choice([True, False], size=self.population, p=[self.probps, 1.0 - self.probps])
#self.mat = self.pop_probmat()
#self.mat_inv = np.linalg.inv(self.mat) n = gc.collect()
self.bern = np.random.choice([True, False], p=self.problist, size=self.population)
self.ips_ps_pert_aggr = np.zeros(self.sz)
self.rand_cache = {}
self.marg_int = None
self.rand_cache = {}
#inp_trans_menthods.loc[l]=np.array([population,d,len(iway),input_ht_pert,iht_pert_ns_estimate,had_coefs,input_ps,input_rr],dtype="object")
def change_mapping(d):
if d:
return "1"
return "0"
def get_real_data(population,d):
data = pd.read_pickle("data/nyc_taxi_bin_sample.pkl").sample(population,replace=True)
data = data.as_matrix()
f = np.vectorize(change_mapping)
i = data.shape[1]
remainder = d % i
ncopies = d/i
copies = []
j = 0
while j < ncopies:
copies.append(data)
j+=1
#print data[:,range(0,remainder)]
copies.append(data[:,range(0,remainder)])
#rand_perm = np.random.choice(range(0,d),replace=False,size=d)
#print rand_perm
data_high = np.concatenate(tuple(copies),axis=1)#[:,rand_perm]
#print (data_high.shape)
#columns= data.columns.tolist()
#print columns
#data = f(data_high)
return f(data_high).astype("str")
class MARGINAL_VARS(object):
#We cache the set of necessary and sufficient indices to evaluate each <= k way marginal.
def compute_downward_closure(self):
all_cords = np.array(range(0, np.power(2, self.d)))
## iterate over all possible <=k way marginals.
for beta in self.allsubsetsint:
marg_str = bin(beta)[2:]
marg_str = "0" * (self.d - len(marg_str)) + marg_str
parity = np.power(2, count_1(beta))
alphas = np.zeros(parity, dtype="int64")
cnt = 0
for alpha in all_cords:
if np.bitwise_and(alpha, beta) == alpha:
alphas[cnt] = alpha
cnt += 1
### we add marginals in string formats incase needed.
self.alphas_cache[marg_str] = {"alphas": alphas, "probps": ((self.e_eps - 1.0) / (parity + self.e_eps - 1.0))}
self.alphas_cache[beta] = {"alphas": alphas, "probps": ((self.e_eps - 1.0) / (parity + self.e_eps - 1.0))}
## This method finds the set of <=k way marginal indices i.e. list of all subsets of length <=k from d.
def get_k_way_marginals(self):
j = 0
marginal = np.array(["0"] * self.d)
while j <= self.k:
subsets = list(itertools.combinations(range(0, self.d), j))
subsets = np.array([list(elem) for elem in subsets])
for s in subsets:
marginal.fill("0")
for b in s:
marginal[b] = "1"
self.allsubsetsint.append(int("".join(marginal)[::-1], 2))
if j == self.k:
# k_way.append(int("".join(marginal),2))
self.k_way.append("".join(marginal)[::-1])
self.k_way_bit_pos.append(s)
# print s,marginal,"".join(marginal)
j += 1
self.allsubsetsint = np.array(self.allsubsetsint, dtype="int64")
self.k_way = np.array(self.k_way, dtype="str")
self.k_way_bit_pos = np.array(self.k_way_bit_pos, dtype="int64")
self.allsubsetsint.sort()
#print (self.allsubsetsint)
## We tie marginals indices and corresponding bit positions together.
#print (dict(zip(self.k_way, self.k_way_bit_pos)))
return dict(zip(self.k_way, self.k_way_bit_pos))
def __init__(self,d,k,e_eps):
self.d = d
self.k = k
self.input_dist = np.zeros(np.power(2, self.d))
self.allsubsetsint = []
self.k_way = []
self.k_way_bit_pos = []
self.e_eps = e_eps
#self.f = hadamard(np.power(2,self.d)).astype("float64")
self.f = {}
self.alphas_cache = {}
self.k_way_bit_pos_dict =self.get_k_way_marginals()
self.cls = self.allsubsetsint.shape[0]
self.coef_dict = dict(zip(self.allsubsetsint, np.array(range(0, self.cls), dtype="int64")))
self.compute_downward_closure()
'''
Main driver routine that accepts all parameters and
runs perturbation simulation.
'''
def driver(d,k,e_eps,population,misc_vars):
width = 256
no_hash = 5
###### Use the NYC Taxi data.
#data = get_real_data(population, d)
####### Use synthetic data if you don't have the taxi data. ########
data = np.random.choice(["1","0"],p=[0.3,0.7],size=d*population).reshape(population,d)
misc_vars.input_dist.fill(0.0)
##### Input Based Algorithms ########
iht_obj = INPUT_HT(d, k, e_eps, population, misc_vars)
ips_obj = INPUT_PS(d, k, e_eps, population, misc_vars)
irr_obj = INPUT_RR(e_eps, d, population)
iolh_obj = INPUT_OLH(e_eps, d, population)
icms_obj = INPUT_CMS(width, no_hash,population,e_eps,d)
icmsht_obj = INPUT_HTCMS(width, no_hash,population,e_eps,d)
############ Marginal Based Algorithms #########
mps_obj = MARG_PS(d, k, e_eps, population, misc_vars.k_way)
mrr_obj = MARG_RR(d, k, e_eps, population, misc_vars.k_way)
mht_obj = MARG_HT(d, k, e_eps, population, misc_vars.k_way, misc_vars.cls)
p = 0
while p < population:
x = data[p]
index_of_1 = int("".join(x), 2)
misc_vars.input_dist[index_of_1] += 1.0
############# input_RR###############
#irr_obj.perturb(index_of_1,p)
#irr_obj.perturb2()
#########################input-PS #################################
ips_obj.perturb2(index_of_1,p)
########################################
iht_obj.perturb(index_of_1, p)
##########################INPUT_OLH ###############################
#INPUT_OLH is a compute intense scheme. Hence we don't run it for larger d's.
if d < 10:
iolh_obj.perturb(index_of_1,p)
##########################inp_CMS ########################
icms_obj.perturb(index_of_1,p)
##########################inp_HTCMS ########################
icmsht_obj.perturb(index_of_1,p)
########### marg-ps ###########
rand_questions = mps_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
# print rand_questions,responses
index_of_1 = int("".join(data[p][responses]), 2)
mps_obj.perturb(index_of_1, p, rand_questions)
######################### marg-ht ############################
rand_questions = mht_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
# print rand_quests,responses
index_of_1 = int("".join(data[p][responses]), 2)
mht_obj.perturb(index_of_1, p, rand_questions)
######################### marg-rs #################################
rand_questions = mrr_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
index_of_1 = int("".join(data[p][responses]), 2)
mrr_obj.perturb3(index_of_1, p, rand_questions)
p += 1
irr_obj.correction3(misc_vars)
#irr_obj.correction2(misc_vars)
misc_vars.input_dist /= population
#irr_obj.correction()
#print (misc_vars.input_dist.round(4))
ips_obj.correction()
iht_obj.correction()
if d < 10:
iolh_obj.correction()
icms_obj.correction()
icmsht_obj.correction()
#print(icmsht_obj.estimate)
mht_obj.correction()
mrr_obj.correction3()
mps_obj.compute_all_marginals()
return compute_marg(misc_vars
, irr_obj.irr
, ips_obj.ips_ps_pert_aggr
, iht_obj.iht_pert_ns_estimate
, iolh_obj.estimate
, mps_obj.marg_dict
, mrr_obj.marg_dict
, mht_obj.marg_dict
, icms_obj.estimate
, icmsht_obj.estimate
)
'''
Call this method is used when you want to vary k keeping d, eps fixed.
eps = 1.1
d = 9
'''
def vary_k():
## number of repetitions.
rpt = 5
e_eps = 3.0
d = 9
counter = 0
## dfmean and dfstd store the results. We use them in our plotting script.
l1 = np.zeros((rpt, 9))
dfmean = pd.DataFrame(columns=["population", "d", "k", "e_eps", "irr_l1", "mrr_l1", "iht_l1", "mht_l1", "ips_l1", "mps_l1","iolh_l1","icms_l1","icmsht_l1"])
dfstd = | pd.DataFrame(columns=["irr_l1_std", "mrr_l1_std", "iht_l1_std", "mht_l1_std", "ips_l1_std", "mps_l1_std","iolh_l1_std","icms_l1_std","icmsht_l1_std"]) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from scipy import interp
from statsmodels.distributions import ECDF
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from . import FIGURES_DIR
sns.set(context='notebook', font_scale=3.0, font='sans-serif')
sns.set_palette(sns.color_palette('Set1', n_colors=5)[::-1])
POHMM_COLOR = sns.xkcd_rgb['denim blue']
HMM_COLOR = sns.xkcd_rgb['reddish brown']
def save_fig(name, ext='pdf'):
plt.savefig(os.path.join(FIGURES_DIR, name + '.%s' % ext), bbox_inches='tight')
plt.close()
return
def plot_error(roc):
"""
Plot far and frr as a function of threshold
"""
roc = roc.copy()
min_thresh, max_thresh = roc['threshold'].min(), roc['threshold'].max()
far = roc[['fold', 'threshold', 'far']]
far.columns = ['fold', 'threshold', 'value']
far['Error'] = 'FAR'
frr = roc[['fold', 'threshold', 'frr']]
frr.columns = ['fold', 'threshold', 'value']
frr['Error'] = 'FRR'
roc = pd.DataFrame(pd.concat([frr, far]))
sns.set(style='darkgrid')
sns.set_context('notebook', font_scale=3.0)
plt.figure(figsize=(8, 6))
g = sns.tsplot(roc, time='threshold', unit='fold', condition='Error', value='value', color='cubehelix', ci=95)
g.set_xlabel('Threshold')
g.set_ylabel('Error')
g.set_xlim(min_thresh, max_thresh)
g.set_ylim(0, 1)
plt.legend(title=None, loc='lower left')
return
def plot_roc(roc, condition, pivot):
"""
Plot an roc curve with confidence bands
"""
# Interpolate to get tpr for each fpr
far = np.linspace(0, 1, 1000)
def _interp(df):
df = pd.DataFrame({'far': far, 'frr': interp(far[::-1], df['far'][::-1], df['frr'][::-1])[::-1]})
return df
if type(roc) == list:
rocs = []
for name, r in roc:
r = r.groupby(pivot).apply(_interp).reset_index(level=1, drop=True).reset_index()
r[condition] = name
rocs.append(r)
roc = pd.concat(rocs)
else:
roc = roc.groupby(pivot).apply(_interp).reset_index(level=1, drop=True).reset_index()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, aspect='equal')
g = sns.tsplot(roc, time='far', unit=pivot, condition=condition, value='frr', ci=95)
g.set_xlabel('False acceptance rate')
g.set_ylabel('False rejection rate')
g.set_xlim(0, 1)
g.set_ylim(0, 1)
plt.legend(title=None)
return
def plot_penalty_example(penalty):
"""
Show the penalty function for genuine and impostor users
"""
genuine_idx = penalty['reference_user'] == penalty['query_user']
genuine = penalty[genuine_idx]
impostor = penalty[~genuine_idx]
thresh = genuine['penalty'].max()
impostor.loc[:, 'type'] = 'Impostor'
sns.set(style='darkgrid')
sns.set_context('notebook', font_scale=3.0)
plt.figure(figsize=(16, 10))
g = sns.tsplot(impostor, time='event_idx', unit='reference_user', condition='type', value='penalty',
color='cubehelix', ci=95)
g.set_xlabel('Event')
g.set_ylabel('Penalty')
plt.plot(genuine['event_idx'], genuine['penalty'], label='Genuine')
plt.axhline(thresh, linestyle='--', color='k', label='Threshold')
plt.legend(title=None, loc='lower right')
plt.xticks(np.linspace(0, 500, 6))
return
def plot_penalty_distribution_example(penalty):
"""
Show the penalty function for genuine and impostor users
"""
genuine_idx = penalty['reference_user'] == penalty['query_user']
genuine = penalty[genuine_idx]
impostor = penalty[genuine_idx == False]
thresh = genuine['penalty'].max()
impostor.loc[:, 'type'] = 'Impostor'
max_penalty = penalty['penalty'].max()
# Add the 0 terms at t=0
genuine = np.concatenate([[0], genuine['penalty'].values])
impostor = np.concatenate([[0] * len(impostor['reference_user'].unique()), impostor['penalty'].values])
sns.kdeplot(genuine, color='g',
shade=True, label='Genuine')
sns.kdeplot(impostor, color='b',
shade=True, label='Impostor')
plt.xlabel('Penalty')
plt.ylabel('Density')
plt.axvline(thresh, linestyle='--', color='k', label='Threshold')
plt.legend(title=None, loc='upper right')
plt.xlim(0, max_penalty)
return
def plot_powerlaw_examples(fits, names):
def plot_fn(ax, i):
fits[i].plot_ccdf(ax=ax, linewidth=1, color='k')
fits[i].lognormal.plot_ccdf(ax=ax, linewidth=1, linestyle=':', color='b')
fits[i].truncated_power_law.plot_ccdf(ax=ax, linewidth=1, linestyle='--', color='r')
ax.text(0.5, 0.01, names[i], va='bottom', ha='center', transform=ax.transAxes, color='gray', fontsize=15,
backgroundcolor=ax.get_axis_bgcolor())
ax.set_xscale('log', base=10)
ax.set_yscale('log', base=10)
return
return plot6(plot_fn, xlabel='$ \\tau $', ylabel='$ \mathbb{P} (\\tau) $', sharey=True)
def plot_powerlaw_exponents(alphas, names, bins):
def plot_fn(ax, i):
sns.distplot(alphas[i], ax=ax, bins=bins, norm_hist=True, color=sns.xkcd_rgb['denim blue'])
ax.text(0.5, 0.95, names[i], va='top', ha='center', transform=ax.transAxes, color='gray', fontsize=15,
backgroundcolor=ax.get_axis_bgcolor())
ax.set_xlim(0, 8)
ax.set_ylim(0, 1.6)
return
return plot6(plot_fn, xlabel='$ \\alpha $', ylabel='$ \\mathbb{P} (\\alpha) $', sharex=True, sharey=True)
def plot_allan_factor_examples(Ts, AFs, names, xlabels):
def plot_fn(ax, i):
ax.plot(Ts[i], AFs[i], color='k', linewidth=0.1)
ax.loglog()
ax.set_xlabel(xlabels[i])
ax.text(0.5, 0.95, names[i], va='top', ha='center', transform=ax.transAxes, color='gray', fontsize=15,
backgroundcolor=ax.get_axis_bgcolor())
return
return plot6(plot_fn, xlabel=None, ylabel='AF$ (T) $', sharey=True)
def plot_uniformity_hists(dfs, names, xlabels, max_ticks=10):
def plot_fn(ax, i):
if i <= 1:
sns.distplot(dfs[i].values, ax=ax, bins=np.linspace(0, 1, 11), norm_hist=True,
color=sns.xkcd_rgb['denim blue'])
ax.set_xlim(0, 1)
ax.set_ylim(0, 2)
elif i == 2:
sns.distplot(dfs[i].values, ax=ax, bins=np.linspace(0, 1, 13), norm_hist=True,
color=sns.xkcd_rgb['denim blue'])
ax.set_xlim(0, 1)
ax.set_ylim(0, 2)
ax.set_xticks([0, 0.25, 0.5, 0.75, 1])
ax.set_xticklabels(['00:00', '06:00', '12:00', '18:00', '24:00'])
else:
sns.barplot(dfs[i].index.values, dfs[i].values, color=sns.xkcd_rgb['denim blue'])
ax.set_xticklabels(['M', 'T', 'W', 'Th', 'F', 'Sa', 'Su'])
ax.set_ylim(0, 0.3)
ax.set_xlabel(xlabels[i])
ax.text(0.5, 0.95, names[i], va='top', ha='center', transform=ax.transAxes, color='gray', fontsize=15,
backgroundcolor=ax.get_axis_bgcolor())
return
return plot6(plot_fn, xlabel=None, ylabel='Density')
def plot_stationarity_examples(m, names):
import matplotlib.cm as cm
def plot_fn(ax, i):
plt.grid(False)
plt.imshow(m[i], origin='lower', interpolation='none', cmap=cm.Greys,
extent=[0.5, m[i].shape[0] + 0.5, 0.5, m[i].shape[1] + 0.5])
ax.set_xticks(np.arange(1, m[i].shape[0] + 1))
ax.set_yticks(np.arange(1, m[i].shape[1] + 1))
plt.clim(m[i].values.mean() - 4 * m[i].values.std(), m[i].values.mean() + 4 * m[i].values.std())
ax.text(0.5, 0.95, names[i], va='top', ha='center', transform=ax.transAxes, color='black', fontsize=15)
return
return plot6(plot_fn, xlabel='Train sample', ylabel='Predict sample')
def plot_model_empirical_pdf(df, m, xlim):
states = m.predict_states_df(df)['state']
tau_0 = df.loc[states == 0, 'tau'].values
tau_1 = df.loc[states == 1, 'tau'].values
upper = xlim
x = np.linspace(0, upper, 1000)
fun_0 = m.pdf_fn('tau', hstate=0)
fun_1 = m.pdf_fn('tau', hstate=1)
plt.figure(figsize=(8, 6))
plt.plot(x, fun_0(x), label='Active state', color=sns.xkcd_rgb['denim blue'])
plt.hist(tau_0, bins=np.linspace(0, upper, 21), normed=True, color=sns.xkcd_rgb['denim blue'], alpha=0.3)
plt.plot(x, fun_1(x), label='Passive state', color=sns.xkcd_rgb['light red'])
plt.hist(tau_1, bins=np.linspace(0, upper, 21), normed=True, color=sns.xkcd_rgb['light red'], alpha=0.3)
plt.xticks(np.linspace(0, upper, 5))
plt.xlim(0, upper)
plt.xlabel('$ \\tau $ (ms)')
plt.ylabel('Density')
plt.legend(loc='upper right')
return
def plot_marginal_pdf_examples(dfs, names, xlims):
def plot_fn(ax, i):
m, df = dfs[i]
_, states = m.predict_states_df(df)
tau_0 = df.loc[states == 0, 'tau'].values
tau_1 = df.loc[states == 1, 'tau'].values
upper = xlims[i]
x = np.linspace(0, upper, 1000)
fun_0 = m.pdf_fn('tau', hstate=0)
fun_1 = m.pdf_fn('tau', hstate=1)
ax.plot(x, fun_0(x), label='State 0', color=sns.xkcd_rgb['denim blue'])
ax.hist(tau_0, bins=np.linspace(0, upper, 21), normed=True, color=sns.xkcd_rgb['denim blue'], alpha=0.3)
ax.plot(x, fun_1(x), label='State 1', color=sns.xkcd_rgb['light red'])
ax.hist(tau_1, bins=np.linspace(0, upper, 21), normed=True, color=sns.xkcd_rgb['light red'], alpha=0.3)
ax.text(0.5, 0.95, names[i], va='top', ha='center', transform=ax.transAxes, color='gray', fontsize=15)
ax.set_xticks(np.linspace(0, upper, 5))
ax.set_xlim(0, upper)
return
return plot6(plot_fn, xlabel='$ \\tau $', ylabel='Density')
def plot6(plot_fn, xlabel=None, ylabel=None, figsize=(12, 6), sharex=None, sharey=None):
"""
Generic function to make a 3x2 plot
"""
fig = plt.figure(figsize=figsize)
for i in range(6):
if i == 0:
ax = fig.add_subplot(2, 3, i + 1)
if sharex:
sharex = ax
if sharey:
sharey = ax
else:
ax = fig.add_subplot(2, 3, i + 1, sharex=sharex, sharey=sharey)
plot_fn(ax, i)
if xlabel is not None and (not sharex or i >= 3):
ax.set_xlabel(xlabel)
if ylabel is not None and (not sharey or i % 3 == 0):
ax.set_ylabel(ylabel)
plt.tight_layout()
return
def gen_roc():
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
thresh = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
far = fpr[0]
frr = 1 - tpr[0]
roc = | pd.DataFrame({'threshold': thresh[0], 'far': far, 'frr': frr}) | pandas.DataFrame |
import pandas as pd
import math
from sklearn.preprocessing import MinMaxScaler
class DataProcessor:
def __init__(self):
self.df_train = None
self.df_test = None
self.df_store = None
self.scale_y = None
'''importing data'''
def load_data(self, path):
self.df_train = pd.read_csv(path+'/train.csv', parse_dates=['Date'], dtype={'StateHoliday': 'category'})
self.df_test = pd.read_csv(path+'/test.csv', parse_dates=['Date'], dtype={'StateHoliday': 'category'})
self.df_store = pd.read_csv(path+'/store.csv')
'''sorting data to prepare a timeseries'''
def preprocessing(self, n_input):
self.df_train.sort_values('Date', ascending=True, inplace=True)
self.df_test.sort_values('Date', ascending=True, inplace=True)
'''joining train and test data to manipulate them together'''
train_test = pd.concat([self.df_train, self.df_test])
'''to keep the shape same between train and test, filling in na for all test rows'''
train_test.loc[train_test['Sales'].isna(), 'Sales'] = -1
'''Splitting date into day, month, year and weekofyear'''
train_test['Month'] = train_test['Date'].dt.month
train_test['Year'] = train_test['Date'].dt.year
train_test['Day'] = train_test['Date'].dt.day
train_test['WeekOfYear'] = train_test['Date'].dt.weekofyear
'''
# df_open = self.df_train.loc[df_train['Open'] == 0]
# df_open.groupby('DayOfWeek')['Open'].describe()
# df_open = self.df_test.loc[df_test['Open'] == 0]
# df_open.groupby('DayOfWeek')['Open'].describe()
In test set, values are missing for Column = Open, based on the trend uing train set, it's concluded that shops
remain open on week days mostly, hence filling these missing values with 1
'''
train_test['Open'].fillna(1, inplace=True)
'''store file has 3 missing values for CompetitionDistance, filling in these with the median'''
self.df_store['CompetitionDistance'].fillna(self.df_store['CompetitionDistance'].median(), inplace=True)
'''merging store data with train and test concatenated dataset'''
train_test_merged = pd.merge(train_test, self.df_store, on='Store', how='left')
'''Evaluating CompetitionOpenMonths and PromoOpenMonths'''
train_test_merged['CompetitionOpenMonths'] = 12 * (
train_test_merged['Year'] - train_test_merged['CompetitionOpenSinceYear']) + train_test_merged[
'Month'] - \
train_test_merged['CompetitionOpenSinceMonth']
train_test_merged['PromoOpenMonths'] = 12 * (
train_test_merged['Year'] - train_test_merged['Promo2SinceYear']) + (
train_test_merged['WeekOfYear'] - train_test_merged[
'Promo2SinceWeek']) / 4.0
train_test_merged['CompetitionOpenSinceMonth'].fillna(0, inplace=True)
train_test_merged['CompetitionOpenSinceMonth'].fillna(0, inplace=True)
train_test_merged['CompetitionOpenSinceYear'].fillna(0, inplace=True)
train_test_merged['Promo2SinceWeek'].fillna(0, inplace=True)
train_test_merged['Promo2SinceYear'].fillna(0, inplace=True)
train_test_merged['PromoInterval'].fillna(0, inplace=True)
train_test_merged['CompetitionOpenMonths'].fillna(0, inplace=True)
train_test_merged['PromoOpenMonths'].fillna(0, inplace=True)
'''Splitting train and test for separate evaluation and processing'''
train_data = train_test_merged.loc[:self.df_train.index.size - 1, :]
test_data = train_test_merged.loc[self.df_train.index.size:, :]
'''
#train_data[train_data['Customers'] != 0].groupby(['StoreType', 'DayOfWeek'])['Sales', 'Customers'].sum()
Based on the above result, finding
1. average Sales per storetype per dayofweek
2. average number of customers per storetype per dayofweek
'''
df_avg = pd.DataFrame(train_data[train_data['Customers'] != 0].groupby(['StoreType', 'DayOfWeek']).apply(
lambda x: x['Sales'].sum() / x['Customers'].sum()))
df_avg_cust = pd.DataFrame(
train_data[train_data['Customers'] != 0].groupby(['StoreType', 'DayOfWeek'])['Customers'].mean())
df_avg_cust.columns = ['AvgCustomer']
df_avg.columns = ['AvgSalesPCustomer']
train_data = train_data.merge(df_avg, on=['StoreType', 'DayOfWeek'], how='left')
train_data = train_data.merge(df_avg_cust, on=['StoreType', 'DayOfWeek'], how='left')
test_data = test_data.merge(df_avg, on=['StoreType', 'DayOfWeek'], how='left')
test_data = test_data.merge(df_avg_cust, on=['StoreType', 'DayOfWeek'], how='left')
'''Filling Na'''
test_data['AvgCustomer'].fillna(0, inplace=True)
test_data['AvgSalesPCustomer'].fillna(0, inplace=True)
train_data['AvgCustomer'].fillna(0, inplace=True)
train_data['AvgSalesPCustomer'].fillna(0, inplace=True)
'''With the help of a key map for the months, finding out those months in which promo was active'''
month2str = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sept', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
train_data['monthStr'] = train_data.Month.map(month2str)
test_data['monthStr'] = test_data.Month.map(month2str)
train_data['IsPromoMonth'] = 0
for interval in train_data.PromoInterval.unique():
interval = str(interval)
if interval != '':
for month in interval.split(','):
train_data.loc[
(train_data.monthStr == month) & (train_data.PromoInterval == interval), 'IsPromoMonth'] = 1
test_data['IsPromoMonth'] = 0
for interval in test_data.PromoInterval.unique():
interval = str(interval)
if interval != '':
for month in interval.split(','):
test_data.loc[
(test_data.monthStr == month) & (test_data.PromoInterval == interval), 'IsPromoMonth'] = 1
'''Checking data types at this state to make sure everything in float for model to process
#test_data.dtypes
In case of StateHoliday one values is numeric and others are string.
In order to get dummies, changing numeric to string
'''
train_data.loc[train_data['StateHoliday'] == 0, 'StateHoliday'] = 'd'
test_data.loc[test_data['StateHoliday'] == 0, 'StateHoliday'] = 'd'
train_data = pd.get_dummies(train_data, columns=["StateHoliday", "StoreType", "Assortment"], drop_first=False)
test_data = pd.get_dummies(test_data, columns=["StateHoliday", "StoreType", "Assortment"], drop_first=False)
'''Preparing a list of columns in order, to feed into the model'''
cols_num = ["Sales", "DayOfWeek", "Open", "Promo", "SchoolHoliday", "CompetitionDistance",
"CompetitionOpenSinceMonth", "Promo2",
"Promo2SinceWeek", "AvgSalesPCustomer", "AvgCustomer", "Month", "Day",
"CompetitionOpenMonths", "PromoOpenMonths", "IsPromoMonth", "Store", 'StateHoliday_0',
'StateHoliday_a',
'StateHoliday_b', 'StateHoliday_c', 'StoreType_a', 'StoreType_b',
'StoreType_c', 'StoreType_d', 'Assortment_a', 'Assortment_b', 'Assortment_c']
'''In case of test data there StateHoliday type b and c are missing, in order to keep the shape same
between train and test set, adding null columns'''
test_data['StateHoliday_b'] = 0
test_data['StateHoliday_c'] = 0
'''Forming desired data sets for train and test with all the necessary columns in desired order'''
train_data1 = train_data[cols_num]
test_data1 = test_data[cols_num]
'''Adding data worth n_input size from train to test to get prediction for the time series'''
test_data1 = pd.concat([train_data1.iloc[-n_input:, :], test_data1])
'''Applying min max to normalize the data.
Keeping different fitness function for train features, train label and test features
'''
scale_x = MinMaxScaler()
self.scale_y = MinMaxScaler()
scale_test = MinMaxScaler()
x_train = scale_x.fit_transform(train_data1.astype(float))
y_train = self.scale_y.fit_transform(train_data['Sales'].astype(float).values.reshape(-1, 1))
x_test = scale_test.fit_transform(test_data1.astype(float))
'''Splitting train data into train and validation set'''
split_idx = math.floor(len(x_train) * 0.8)
x_val = x_train[split_idx:]
y_val = y_train[split_idx:]
x_train = x_train[:split_idx]
y_train = y_train[:split_idx]
return x_train, x_val, y_train, y_val, x_test
def generate_result(self, predict, path):
predict1 = self.scale_y.inverse_transform(predict)
df_result = | pd.DataFrame() | pandas.DataFrame |
import os
import six
import inspect
import threading
import pandas as pd
import json
from tornado.gen import coroutine, Return, sleep
from tornado.httpclient import AsyncHTTPClient
from gramex.config import locate, app_log, merge, variables
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
# Expose joblob.load via gramex.ml
load = joblib.load # noqa
class Classifier(object):
'''
:arg data DataFrame: data to train / re-train the model with
:arg model_class str: model class to use (default: ``sklearn.naive_bayes.BernoulliNB``)
:arg model_kwargs dict: kwargs to pass to model class constructor (defaults: ``{}``)
:arg output str: output column name (default: last column in training data)
:arg input list: input column names (default: all columns except ``output``)
:arg labels list: list of possible output values (default: unique ``output`` in training)
'''
def __init__(self, **kwargs):
vars(self).update(kwargs)
self.model_class = kwargs.get('model_class', 'sklearn.naive_bayes.BernoulliNB')
self.trained = False # Boolean Flag
def __str__(self):
return repr(vars(self))
def update_params(self, params):
model_keys = ('model_class', 'url', 'input', 'output', 'trained', 'query', 'model_kwargs')
model_params = {k: v[0] if isinstance(v, list) and k != 'input' else v
for k, v in params.items() if k in model_keys}
if model_params:
self.trained = params.get('trained', False)
vars(self).update(model_params)
def train(self, data):
'''
:arg data DataFrame: data to train / re-train the model with
:arg model_class str: model class to use (default: ``sklearn.naive_bayes.BernoulliNB``)
:arg model_kwargs dict: kwargs to pass to model class constructor (defaults: ``{}``)
:arg output str: output column name (default: last column in training data)
:arg input list: input column names (default: all columns except ``output``)
:arg labels list: list of possible output values (default: unique ``output`` in training)
Notes:
- If model has already been trained, extend the model. Else create it
'''
self.output = vars(self).get('output', data.columns[-1])
self.input = vars(self).get('input', list(data.columns[:-1]))
self.model_kwargs = vars(self).get('model_kwargs', {})
self.labels = vars(self).get('labels', None)
# If model_kwargs have changed since we trained last, re-train model.
if not self.trained and hasattr(self, 'model'):
vars(self).pop('model')
if not hasattr(self, 'model'):
# Split it into input (x) and output (y)
x, y = data[self.input], data[self.output]
# Transform the data
self.scaler = StandardScaler()
self.scaler.fit(x)
# Train the classifier. Partially, if possible
try:
clf = locate(self.model_class)(**self.model_kwargs)
except TypeError:
raise ValueError('{0} is not a correct model class'.format(self.model_class))
if self.labels and hasattr(clf, 'partial_fit'):
try:
clf.partial_fit(self.scaler.transform(x),
y, classes=self.labels)
except AttributeError:
raise ValueError('{0} does not support partial fit'.format(self.model_class))
else:
clf.fit(self.scaler.transform(x), y)
self.model = clf
# Extend the model
else:
x, y = data[self.input], data[self.output]
classes = set(self.model.classes_)
classes |= set(y)
self.model.partial_fit(self.scaler.transform(x), y)
def predict(self, data):
'''
Return a Series that has the results of the classification of data
'''
# Convert list of lists or numpy arrays into DataFrame. Assume columns are as per input
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data, columns=self.input)
# Take only trained input columns
return self.model.predict(self.scaler.transform(data))
def save(self, path):
'''
Serializes the model and associated parameters
'''
joblib.dump(self, path, compress=9)
def _conda_r_home():
'''
Returns the R home directory for Conda R if it is installed. Else None.
Typically, people install Conda AND R (in any order), and use the system R
(rather than the conda R) by placing it before Conda in the PATH.
But the system R does not work with Conda rpy2. So we check if Conda R
exists and return its path, so that it can be used as R_HOME.
'''
try:
from conda.base.context import context
except ImportError:
app_log.error('Anaconda not installed. Cannot use Anaconda R')
return None
r_home = os.path.normpath(os.path.join(context.root_prefix, 'lib', 'R'))
if os.path.isdir(os.path.join(r_home, 'bin')):
return r_home
app_log.error('Anaconda R not installed')
return None
def r(code=None, path=None, rel=True, conda=True, convert=True,
repo='https://cran.microsoft.com/', **kwargs):
'''
Runs the R script and returns the result.
:arg str code: R code to execute.
:arg str path: R script path. Cannot be used if code is specified
:arg bool rel: True treats path as relative to the caller function's file
:arg bool conda: True overrides R_HOME to use the Conda R
:arg bool convert: True converts R objects to Pandas and vice versa
:arg str repo: CRAN repo URL
All other keyword arguments as passed as parameters
'''
# Use Conda R if possible
if conda:
r_home = _conda_r_home()
if r_home:
os.environ['R_HOME'] = r_home
# Import the global R session
try:
from rpy2.robjects import r, pandas2ri, globalenv
except ImportError:
app_log.error('rpy2 not installed. Run "conda install rpy2"')
raise
except RuntimeError:
app_log.error('Cannot find R. Set R_HOME env variable')
raise
# Set a repo so that install.packages() need not ask for one
r('local({r <- getOption("repos"); r["CRAN"] <- "%s"; options(repos = r)})' % repo)
# Activate or de-activate automatic conversion
# https://pandas.pydata.org/pandas-docs/version/0.22.0/r_interface.html
if convert:
pandas2ri.activate()
else:
pandas2ri.deactivate()
# Pass all other kwargs as global environment variables
for key, val in kwargs.items():
globalenv[key] = val
if code and path:
raise RuntimeError('Use r(code=) or r(path=...), not both')
if path:
# if rel=True, load path relative to parent directory
if rel:
stack = inspect.getouterframes(inspect.currentframe(), 2)
folder = os.path.dirname(os.path.abspath(stack[1][1]))
path = os.path.join(folder, path)
result = r.source(path, chdir=True)
# source() returns a withVisible: $value and $visible. Use only the first
result = result[0]
else:
result = r(code)
return result
def groupmeans(data, groups, numbers, cutoff=.01, quantile=.95, minsize=None,
weight=None):
'''
Yields the significant differences in average between every pair of
groups and numbers.
:arg DataFrame data: pandas.DataFrame to analyze
:arg list groups: category column names to group data by
:arg list numbers: numeric column names in to summarize data by
:arg float cutoff: ignore anything with prob > cutoff.
cutoff=None ignores significance checks, speeding it up a LOT.
:arg float quantile: number that represents target improvement. Defaults to .95.
The ``diff`` returned is the % impact of everyone moving to the 95th
percentile
:arg int minsize: each group should contain at least minsize values.
If minsize=None, automatically set the minimum size to
1% of the dataset, or 10, whichever is larger.
'''
from scipy.stats.mstats import ttest_ind
if minsize is None:
minsize = max(len(data.index) // 100, 10)
if weight is None:
means = data[numbers].mean()
else:
means = weighted_avg(data, numbers, weight)
results = []
for group in groups:
grouped = data.groupby(group, sort=False)
if weight is None:
ave = grouped[numbers].mean()
else:
ave = grouped.apply(lambda v: weighted_avg(v, numbers, weight))
ave['#'] = sizes = grouped.size()
# Each group should contain at least minsize values
biggies = sizes[sizes >= minsize].index
# ... and at least 2 groups overall, to compare.
if len(biggies) < 2:
continue
for number in numbers:
if number == group:
continue
sorted_cats = ave[number][biggies].dropna().sort_values()
if len(sorted_cats) < 2:
continue
lo = data[number][grouped.groups[sorted_cats.index[0]]].values
hi = data[number][grouped.groups[sorted_cats.index[-1]]].values
_, prob = ttest_ind(
pd.np.ma.masked_array(lo, | pd.np.isnan(lo) | pandas.np.isnan |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_same_name_scoping(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(20, 2), index=date_range("20130101", periods=20))
store.put("df", df, format="table")
expected = df[df.index > Timestamp("20130105")]
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
# changes what 'datetime' points to in the namespace where
# 'select' does the lookup
from datetime import datetime # noqa:F401
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_store_index_name(setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(table_format, setup_path):
# GH #13492
idx = Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning")
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_coordinates(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame({"A": range(5), "B": range(5)})
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected, check_freq=False)
# FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None
# but expect freq="18B"
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
msg = (
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_start_stop_table(setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
{"A": np.random.rand(20), "B": np.random.rand(20)},
index=date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = [f"{c:3d}" for c in df.index]
df.columns = [f"{c:3d}" for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(start, stop, setup_path):
# GH 17021
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore():
df = tm.makeDataFrame()
def writer(path):
with HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with HDFStore(path) as store:
return read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore():
df = tm.makeDataFrame()
def writer(path):
with HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with HDFStore(path) as store:
return read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def test_copy():
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
os.remove(new_f) # noqa: PDF008
# new table
df = tm.makeDataFrame()
with tm.ensure_clean() as path:
st = | HDFStore(path) | pandas.io.pytables.HDFStore |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, | Timestamp("2016-01-01") | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# <h2>Introduction:</h2>
# This is my First kernel, I have attempted to understand which features contribute to the Price of the houses.
# <br> A shoutout to SRK and Anisotropic from whom iv learned a lot about data visualisation</br>
# <h2>Lets import the libraries we need for now<h2>
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
get_ipython().run_line_magic('matplotlib', 'inline')
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
#
# <h2>now we import the dataset</h2>
#
# In[ ]:
data = pd.read_csv('../input/kc_house_data.csv')
# In[ ]:
# Lets check it out
data.head()
# <h2>now lets check out how many NaN values are there</h2>
# In[ ]:
data.isnull().sum()
# wow! so we just dont need to bother about using Imputer and handeling the NaN values
# <br>Now lets check out how the data actually is</br>
# In[ ]:
print((data.info()))
print(("**"*40))
print((data.describe()))
# Oops, we forgot to convert the date to datetime, lets get that done first
# In[ ]:
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import numpy as np
import pandas as pd
from io import BytesIO
import PyPDF2
from bs4 import BeautifulSoup
from functools import reduce
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[2]:
##############################
###### Carne aviar############
##############################
url_aviar= 'https://www.magyp.gob.ar/sitio/areas/aves/estadistica/carne/index.php'
html_aviar = requests.get(url_aviar).text
# In[3]:
soup_aviar = BeautifulSoup(html_aviar, 'html.parser')
links_aviar = soup_aviar.find_all('a', href=True)
# In[4]:
#Loopeo para sacar el link
for link in links_aviar:
if ('Indicadores Oferta y Demanda Carne Aviar' in link) and '.xls' in link.get('href'):
aviar_xls = 'https://www.magyp.gob.ar/sitio/areas/aves/estadistica/carne/' + link.get('href')
# In[5]:
#Descargo el archivo, elimino columnas vacias y las filas que no tienen datos en la segunda columna
df_aviar = pd.read_excel(requests.utils.requote_uri(aviar_xls), engine='openpyxl')
df_aviar.dropna(how='all', axis=0, inplace=True)
df_aviar.dropna(subset=[df_aviar.columns[1]], inplace=True)
# In[6]:
#Completo los NaN en la primera fila y luego genero los nombres de las columnas
df_aviar.iloc[0].fillna(method='ffill', inplace=True)
df_aviar.columns = df_aviar.iloc[0, :] + ' - ' + df_aviar.iloc[1, :]
#Me deshago de las 2 primeras filas
df_aviar = df_aviar.iloc[2:, :]
# In[7]:
#Creo una columna con el año, completo los NaN
df_aviar['Year'] = | pd.to_numeric(df_aviar[df_aviar.columns[0]], errors='coerce') | pandas.to_numeric |
import pandas as pd
import streamlit as st
import yfinance as yf
@st.experimental_memo(max_entries=1000, show_spinner=False)
def get_asset_splits(ticker, cache_date):
return yf.Ticker(ticker).actions.loc[:, 'Stock Splits']
@st.experimental_memo(max_entries=50, show_spinner=False)
def get_historical_prices(tickers, start, cache_date):
assert len(tickers) == len(set(tickers))
# fix for penny sterling edgecase
if 'GBXUSD=X' in tickers:
index = tickers.index('GBXUSD=X')
if 'GBPUSD=X' not in tickers:
tickers[index] = 'GBPUSD=X'
return (
yf.download(tickers, start=start)
.loc[:, 'Close']
.assign(**{'GBXUSD=X': lambda x: x['GBPUSD=X'] / 100})
.drop(columns='GBPUSD=X')
)
else:
del tickers[index]
return (
yf.download(tickers, start=start)
.loc[:, 'Close']
.assign(**{'GBXUSD=X': lambda x: x['GBPUSD=X'] / 100})
)
return yf.download(tickers, start=start).loc[:, 'Close']
def correct_asset_amount_affected_by_split(df: pd.DataFrame, ticker_types: pd.Series):
df = df.copy()
for ticker in df.ticker.unique():
if ticker_types[ticker] == 'CRYPTO':
continue
# there can only be one split a day, so cache_date ensures we only download split data once a day
splits = get_asset_splits(ticker, cache_date=str(pd.Timestamp.now().date()))
for date, split in splits.iteritems():
if split == 0:
continue
df.loc[
lambda x: (pd.to_datetime(x.date) <= date) & (x.ticker == ticker),
'amount'
] *= split
return df
def resample(df, freq):
time_format = '%Y-%m' if freq == 'M' else '%Y-%m-%d'
return (
df
.reset_index()
.groupby(pd.Grouper(key='Date', freq=freq))
.mean()
.reset_index()
.assign(Date=lambda x: x.Date.apply(lambda x: x.strftime(time_format)))
.set_index('Date')
)
def calculate_historical_value(s, purchase_df):
if s.name not in purchase_df.ticker.unique():
return s
ticker_purchase_df = (
purchase_df
.assign(date=lambda x: pd.to_datetime(x.date))
.query(f'ticker == "{s.name}"')
.sort_values('date')
)
output = s * 0
for index, row in ticker_purchase_df.iterrows():
if row['operation'] == 'purchase':
output.loc[row['date']:] += s.loc[row['date']:] * row['amount']
elif row['operation'] == 'sale':
output.loc[row['date']:] -= s.loc[row['date']:] * row['amount']
else:
raise ValueError('unexpected operation')
return output
def calculate_current_assets_from_purchases_and_sales(purchase_df, ticker_info_df):
return (
purchase_df
.groupby('ticker')
.apply(lambda x: x.query('operation == "purchase"').amount.sum() - x.query('operation == "sale"').amount.sum())
.to_frame()
.rename(columns={0: 'amount'})
.join(ticker_info_df, how='left')
.assign(type=lambda x: x.type.str.replace('CRYPTOCURRENCY', 'CRYPTO'))
)
def add_latest_asset_prices(df, historical_prices):
latest_prices = historical_prices.ffill().iloc[-1]
latest_currency_prices_in_usd = (
latest_prices
.loc[lambda x: x.index.str.endswith('USD=X')]
.rename(lambda x: x.split('USD=X')[0])
)
return (
df
.assign(
price=latest_prices,
currency_rate=lambda x: x.currency.map(latest_currency_prices_in_usd.to_dict()),
total_usd=lambda x: x.currency_rate * x.amount * x.price,
total_pln=lambda x: x.total_usd / latest_currency_prices_in_usd['PLN'],
)
.sort_values(['type', 'total_pln'], ascending=False)
.round(2)
)
def calculate_historical_value_in_pln(historical_prices, purchase_df, assets_df, months_n=None, frequency='D'):
if months_n:
historical_prices = historical_prices.loc[ | pd.Timestamp.now() | pandas.Timestamp.now |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from geoedfframework.utils.GeoEDFError import GeoEDFError
from geoedfframework.GeoEDFPlugin import GeoEDFPlugin
import pandas as pd
""" Module for implementing the DateTimeFilter. This supports a date time string pattern
that specifies the kinds of values that will be returned from the filter. The user
also provides a start date (in mm/dd/yyyy format) with optional timestamp as hh:mm:ss;
and an optional end date. If an end date is provided, it is assumed that the user wants
to generate all dates between the start and end subject to a period parameter. The period
is a number followed by a character such as D,M,Y etc. for day, month and year. E.g. 2M is
a period of 2 months. The DateTimeFilter is a pre filter that can be used to generate
a string representation of all intervening dates.
"""
class DateTimeFilter(GeoEDFPlugin):
# has_time is Boolean, False by default
# if end is provided, period also needs to be provided
__optional_params = ['end','period','has_time']
__required_params = ['pattern','start']
# we use just kwargs since we need to be able to process the list of attributes
# and their values to create the dependency graph in the GeoEDFConnectorPlugin super class
def __init__(self, **kwargs):
# list to hold all the parameter names; will be accessed in super to
# construct dependency graph
self.provided_params = self.__required_params + self.__optional_params
# check that all required params have been provided
for param in self.__required_params:
if param not in kwargs:
raise GeoEDFError('Required parameter %s for DateTimeFilter not provided' % param)
# specific check for conditionally required params
# if end is provided, also need a period
if 'end' in kwargs:
if 'period' not in kwargs:
raise GeoEDFError('Period is required for DateTimeFilter when both start and end are provided.')
# set all required parameters
for key in self.__required_params:
setattr(self,key,kwargs.get(key))
# set optional parameters
for key in self.__optional_params:
# if key not provided in optional arguments, defaults value to None
setattr(self,key,kwargs.get(key,None))
# if has_time is not provided, set to False
if key == 'has_time':
if self.has_time is None:
self.has_time = False
# initialize filter values array
self.values = []
# class super class init
super().__init__()
# each Filter plugin needs to implement this method
# if error, raise exception; if not, set values attribute
# assume this method is called only when all params have been fully instantiated
def filter(self):
# convert the start and end dates from strings to Pandas DateTime
try:
# check if time is present
if self.has_time:
start_date = pd.to_datetime(self.start,format='%m/%d/%Y %H:%M:%S')
else:
start_date = pd.to_datetime(self.start,format='%m/%d/%Y')
if self.end is not None:
if self.has_time:
end_date = | pd.to_datetime(self.end,format='%m/%d/%Y %H:%M:%S') | pandas.to_datetime |
"""Load remote meet data to DB."""
import copy
import datetime
import json
import logging
import os
import sys
from io import StringIO
from urllib.error import HTTPError
from urllib.request import Request, urlopen
import pandas as pd
from codetiming import Timer
from data.models.meets import Meet
from data.models.syncs import Sync, NotSyncedItem
from data.models.users import User
from enums.sa import SyncStatus, SyncEndReason, NotSyncedItemReason, SyncType
from services import sync_service, user_service
from utils import py as py_utils
# add_module_to_sys_path
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, directory)
import settings
import data.db_session as db_session
# `results` is related only to data parsing.
req, resp, last_sync, actual_sync, errors, parsing_results = (
None, None, None, None, [], {})
#TODO: Add memcache mechanisms
#TODO: Consider cases when hashes for users and meets are not consinstent
# from 3rd party server (need to assign internal checks using utils.db.to_hash
# or more simple md5)
#TODO: Add type hints
#TODO: Consider avoid using global scope vars
#TODO: Add progressbar flow and time repr
#TODO: Refactor module and extract based on responsibility parts
#TODO: Add proper logging and errors, results collecting
# @Timer(text=f"Time consumption for {'run'}: {{:.3f}}")
def run(sync_type, forced=False):
"""
# Not used pandas.DataFrame.to_sql approach which allows quickly
# complete the task even skip duplicates, but applied collecting
# all items more accurate, e.g. for future investigation purposes
# as well as make consistent solution with ORM models across whole
# project.
# engine = db_session.create_engine()
# df_users.to_sql('superstore', engine)
"""
global last_sync, actual_sync, req, resp, errors, parsing_results
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.info("Load meet data to DB")
actual_sync_kwargs = {}
sync = None
with db_session.create_session() as session:
remote_data = get_remote_data(session, forced, sync_type)
if remote_data:
# Parsing and storing to DB got data.
df_users, df_meets = extract_pandas_data_frames(remote_data)
# Storing users data to DB.
db_users_synced, db_not_synced_items_users = insert_df_users_to_db(
session, df_users)
# Storing meets data to DB.
db_meets_synced, db_not_synced_items_meets = insert_df_meets_to_db(
session, df_meets)
#TODO: Log results of DB inserting
# Store parsing results.
actual_sync_kwargs.update(**dict(
end_date=datetime.datetime.now(),
status=SyncStatus.finished,
end_reason=SyncEndReason.data_parsing_end,
parsing_results=parsing_results))
py_utils.set_obj_attr_values(actual_sync, actual_sync_kwargs)
session.commit()
sync = copy.deepcopy(actual_sync)
# Set to None globals shares after each sync
req, resp, last_sync, actual_sync, errors = None, None, None, None, []
return sync
@Timer(text=f"Time consumption for {'insert_df_users_to_db'}: {{:.3f}}")
def insert_df_users_to_db(session, df_users):
logging.info("Inserting users data to DB")
(df_users_unique, df_users_duplicated_data,
df_users_duplicated_none) = aggregate_users_df(df_users)
# Inset new portion of unique users.
sync_users = build_sync_users(session, df_users_unique)
session.add_all(sync_users)
# Insert not synced users items (not checking previous syncs even for
# forced options), meaning every sync will produce adding probably
# duplications but provided info for investigations.
sync_users_duplicated_data_items = build_sync_not_synced_items(
df_users_duplicated_data, NotSyncedItemReason.duplicated_data.value)
sync_users_duplicated_none_items = build_sync_not_synced_items(
df_users_duplicated_none, NotSyncedItemReason.duplicated_none.value)
db_not_synced_items_users = list(py_utils.flatten(
[sync_users_duplicated_data_items, sync_users_duplicated_none_items]))
session.add_all(db_not_synced_items_users)
session.commit()
logging.info("Inserting users data to DB is finished.")
return sync_users, db_not_synced_items_users
@Timer(text=f"Time consumption for {'insert_df_meets_to_db'}: {{:.3f}}")
def insert_df_meets_to_db(session, df_meets):
logging.info("Inserting user meets data to DB")
(df_meets_unique, df_meets_duplicated_data,
df_meets_duplicated_none) = aggregate_meets_df(df_meets)
# Inset new portion of unique meets.
sync_meets, df_meets_not_recognized_data = build_sync_meets(
session, df_meets_unique)
session.add_all(sync_meets)
# Insert not synced meets items (not checking previous syncs even for
# forced options), meaning every sync will produce adding probably
# duplications but provided info for investigations.
sync_meets_duplicated_data_items = build_sync_not_synced_items(
df_meets_duplicated_data, NotSyncedItemReason.duplicated_data.value)
sync_meets_duplicated_none_items = build_sync_not_synced_items(
df_meets_duplicated_none, NotSyncedItemReason.duplicated_none.value)
sync_meets_not_recognized_data = build_sync_not_synced_items(
df_meets_not_recognized_data,
NotSyncedItemReason.not_recognized_data.value)
db_not_synced_items_meets = list(py_utils.flatten(
[sync_meets_duplicated_data_items,
sync_meets_duplicated_none_items,
sync_meets_not_recognized_data]))
session.add_all(db_not_synced_items_meets)
session.commit()
logging.info("Inserting meets data to DB is finished.")
return sync_meets, db_not_synced_items_meets
@Timer(text=f"Time consumption for {'build_sync_users'}: {{:.3f}}")
def build_sync_users(session, df_users):
global actual_sync
users = []
# df_users_unique_kwargs = df_users.to_dict('records')
for df_user in df_users.itertuples():
user = User.as_unique(
session=session,
hash_id=df_user.user_id)
user.sync_id = actual_sync.id
user.name = df_user.user_name
users.append(user)
return users
@Timer(text=f"Time consumption for {'build_sync_meets'}: {{:.3f}}")
def build_sync_meets(session, df_meets):
global actual_sync
meets = []
empty_df_meet = pd.DataFrame(
columns=['user_id','meet_start_date','meet_end_date','meet_id'])
df_meets_not_recognized_data = [empty_df_meet]
db_users_qr = user_service.get_users_qr(session)
for df_meet in df_meets.itertuples():
# Try to get user from DB based on meets df data.
db_meet_user = user_service.get_user_hash_id(
user_hash_id=df_meet.user_id, users_qr=db_users_qr)
if not db_meet_user:
df_meets_not_recognized_data.append( | pd.DataFrame([df_meet]) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Topic visualisation
# Visualising topics of app reviews, overlaying them with their clusters
# %%
from mapping_parenting_tech import logging, PROJECT_DIR
from mapping_parenting_tech.utils import lda_modelling_utils as lmu
from mapping_parenting_tech.utils import play_store_utils as psu
from tqdm import tqdm
import pandas as pd
import numpy as np
import altair as alt
import pickle
import umap
OUTPUT_DIR = PROJECT_DIR / "outputs/data"
REVIEWS_DIR = OUTPUT_DIR / "app_reviews"
INPUT_DIR = PROJECT_DIR / "inputs/data/play_store"
TPM_DIR = OUTPUT_DIR / "tpm"
MODEL_NAME = "play_store_reviews"
# %%
import utils
# %%
import mapping_parenting_tech.utils.embeddings_utils as eu
from sentence_transformers import SentenceTransformer
from mapping_parenting_tech.utils import plotting_utils as pu
# %%
# Functionality for saving charts
import mapping_parenting_tech.utils.altair_save_utils as alt_save
AltairSaver = alt_save.AltairSaver()
# %% [markdown]
# ## Load and process data
# %%
# Load ids for those apps that are relevant
relevant_apps = pd.read_csv(INPUT_DIR / "relevant_app_ids.csv")
# %%
# Load in app details
details = pd.read_json(OUTPUT_DIR / "all_app_details.json", orient="index")
details.reset_index(inplace=True, drop=True)
# details.rename(columns={"index": "appId"}, inplace=True)
details.shape
# %%
# Add apps' cluster to their details
# NB: left join to preserve apps that aren't relevant - they're needed to map onto the embedding...
details = details.merge(
relevant_apps,
how="left",
on="appId",
)
# %%
# get the index of those apps that aren't relevant
remove_apps = details[details["cluster"].isna()].index
# %% [markdown]
# ## Plot embeddings
# %%
app_details = details[-details.cluster.isnull()]
# %%
# Embedding model name
EMBEDDING_MODEL = "all-mpnet-base-v2"
# File names
vector_filename = "app_description_vectors_2022_04_27"
embedding_model = EMBEDDING_MODEL
EMBEDINGS_DIR = PROJECT_DIR / "outputs/data"
# %%
# model = SentenceTransformer(EMBEDDING_MODEL)
# %%
v = eu.Vectors(
filename=vector_filename, model_name=EMBEDDING_MODEL, folder=EMBEDINGS_DIR
)
v.vectors.shape
# %%
# app_details[app_details.title.str.contains('AppClose')]
# %%
description_embeddings = v.select_vectors(app_details.appId.to_list())
description_embeddings.shape
# %%
# # remove 'irrelevant' apps from the dataframe and the embedding
# description_embeddings = np.delete(description_embeddings, remove_apps, 0)
# details.drop(remove_apps, inplace=True)
# %%
# Reduce the embedding to 2 dimensions
reducer = umap.UMAP(
n_components=2,
random_state=1,
n_neighbors=8,
min_dist=0.3,
spread=0.5,
)
embedding = reducer.fit_transform(description_embeddings)
# %%
# Reduce the embedding to 2 dimensions
reducer = umap.UMAP(
n_components=2,
random_state=2,
n_neighbors=20,
min_dist=0.5,
spread=0.7,
)
embedding = reducer.fit_transform(description_embeddings)
# %%
# Prepare dataframe for visualisation
df = (
app_details.copy()
.assign(x=embedding[:, 0])
.assign(y=embedding[:, 1])
.assign(circle_size=lambda x: 0.2 * (x.score + 1))
.assign(user=lambda x: x.cluster.apply(utils.map_cluster_to_user))
)
# %% [markdown]
# ## Prepare
# %%
from bs4 import BeautifulSoup
def clean_html(html):
soup = BeautifulSoup(html)
text = soup.get_text()
return text
def shorten_text(text, l=250):
return text[0:l] + "..."
def shorten_and_clean(html):
return shorten_text(clean_html(html))
# %%
filename = "app_landscape_v2022_05_04"
df_viz = | pd.read_csv(AltairSaver.path + f"/tables/{filename}.csv") | pandas.read_csv |
import random
from copy import deepcopy
import logging
import json, gzip
from tqdm import tqdm
import pandas as pd
from LeapOfThought.artiset import ArtiSet
from LeapOfThought.resources.teachai_kb import TeachAIKB
from LeapOfThought.common.data_utils import pandas_multi_column_agg
from LeapOfThought.common.file_utils import cached_path
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Counting(ArtiSet):
def __init__(self, args):
self.artiset_name = 'Counting'
self.variant = args.variant
self.experiment_version = args.experiment_version
logger.info("loading...")
super().__init__(args)
def print_stats(self):
super().print_stats()
# counting specific stats
meta = pd.DataFrame(self.examples_meta)
logger.info(f"Is_hypothetical_statement :\n{pandas_multi_column_agg(meta, ['split', 'is_hypothetical_statement'])}\n")
logger.info(f"Duplicate statements :\n{len(meta['phrase']) - len(set(meta['phrase']))}\n")
# analyzing the distibution of total_num_of_instances and % counted_instances
if 'counted_instances' in meta:
meta['counted_instances'] = meta['counted_instances'].fillna('')
# meta = meta[meta['counted_instances'].fillna('')]
meta['counted_instances_perc'] = meta['counted_instances'].apply(len) \
/ meta['total_num_of_instances']
meta = meta[meta['is_hypothetical_statement'] == False]
logger.info(f"counted_instances_perc mean answer :\n{meta.groupby('counted_instances_perc')['answer'].mean().round(2)}\n")
logger.info(f"counted_instances_perc count :\n{meta.groupby('counted_instances_perc')['answer'].count()}\n")
# calculating total number of instances per predicate
agg = pandas_multi_column_agg(pd.DataFrame([{'predicate': e['statement']['predicate'], \
'total_num_of_instances': e['total_num_of_instances'], 'z': 1} \
for e in self.examples_meta if 'total_num_of_instances' in e]), \
['predicate', 'total_num_of_instances'])
logger.info(f"total_num_of_instances pre predicate:\n{agg}\n")
# calculating total number of instances per predicate
meta['predicate'] = meta['statement'].apply(lambda x: x['predicate'])
agg = pandas_multi_column_agg(meta[['total_num_of_instances', 'counted_instances_perc', 'answer', 'phrase']], \
['total_num_of_instances', 'counted_instances_perc'])
logger.info(f"counted_instances_perc pre predicate:\n{agg}\n")
def building_non_balanced_counting_examples(self, counted_instances, false_statements, split):
examples = []
for counted_instances, false_statements in zip(counted_instances.to_list(), false_statements.to_list()):
counted_instances, total_num_of_instances = counted_instances
# for each set of counted_instances and false_statements we now choose how many of the
# counted instances we should remove before choosing the statment (where 0 means we are keeping all
# the counted instances)
all_counted_instaces = deepcopy(counted_instances)
counted_instances_in_context = []
total_count_rule = {'subject': counted_instances[0]['object'], 'predicate': 'has ' + str(total_num_of_instances),
'object': counted_instances[0]['predicate'], 'validity': 'always true'}
for num_of_counted_instances in range(total_num_of_instances + 1):
if total_num_of_instances == 1 and len(counted_instances_in_context) == 0 and random.sample([0, 0, 1], 1)[0]:
instances_to_sample_statement = counted_instances
else:
instances_to_sample_statement = counted_instances + [false_statements.pop()]
# in the dev set, for balancing labels, let's take cases in which the count reaches the max.
if split == 'dev' and num_of_counted_instances == total_num_of_instances:
num_to_sample = random.sample([0, 1, 1], 1)[0]
else:
# usually sample 2 statement for each num_of_counted_instances
num_to_sample = random.sample([1] + [min(len(instances_to_sample_statement), 2)] * 2, 1)[0]
statements = random.sample(instances_to_sample_statement, num_to_sample)
for statement in statements:
if self.variant == 'increment_counted_instances':
for increment_counted_instances_num in range(total_num_of_instances + 1):
if statement['validity'] == 'always true' and increment_counted_instances_num == total_num_of_instances:
continue
examples.append({'statement': deepcopy(statement),
'counted_instances': deepcopy(all_counted_instaces[0:increment_counted_instances_num]),
'total_num_of_instances': total_num_of_instances,
'total_count_rule': total_count_rule})
else:
examples.append({'statement': deepcopy(statement),
'counted_instances': deepcopy(counted_instances_in_context),
'total_num_of_instances': total_num_of_instances,
'total_count_rule': total_count_rule})
if len(counted_instances) > 0:
counted_instances_in_context.append(counted_instances.pop())
return examples
def read_predictions_and_meta(self, name, variant, base_model, model, full_path=None):
if full_path is None:
pred_path = cached_path("https://aigame.s3-us-west-2.amazonaws.com/predictions/" +
base_model + "/" + model + "/" + name + "_" + variant + ".json")
else:
pred_path = full_path
preds = []
with open(pred_path) as f:
all_results = json.load(f)
print(f"total EM for {base_model} {model} is {all_results['EM']}")
preds = all_results['predictions']
preds = | pd.DataFrame(preds) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# 1 Import libraries and Set path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import scipy.stats as scs
from scipy.stats.mstats import winsorize
from scipy.stats.mstats import gmean
from tabulate import tabulate
# 2 Set path of my sub-directory
from pathlib import Path
# key in your own file path below
myfolder = Path('key in your own file path here')
# 3 Set up files to write output and charts
from matplotlib.backends.backend_pdf import PdfPages
outfile = open('output.txt', 'w')
chartfile = PdfPages('chart-retreg.pdf')
# Stock returns data
# 4 Read Compustat monthly stock returns data
df1 = pd.read_csv(my-folder / 'stock-returns.csv', parse_dates = ['datadate'])
df1 = df1.sort_values(by=['gvkey','datadate'])
df1 = df1.dropna()
# 5 Create portfolio formation year (pfy) variable, where
# pfy = current year for Jul-Dec dates and previous year for Jan-Jun dates.
# This is to facilitate compounding returns over Jul-Jun by pfy later below.
df1['year'], df1['month'] = df1['datadate'].dt.year, df1['datadate'].dt.month
df1['pfy'] = np.where(df1.month > 6, df1.year, df1.year - 1)
# 6 Compute monthly return compounding factor (1+monthly return)
# trt1m is the monthly return, express as percentage, need to convert to % by / 100
df1['mretfactor'] = 1 + df1.trt1m/100
df1 = df1.sort_values(by=['gvkey','pfy'])
df2 = df1[['gvkey', 'conm', 'datadate', 'pfy', 'mretfactor']]
# 7 Compound monthly returns to get annual returns at end-June of each pfy,
# ensuring only firm-years with 12 mths of return data from Jul-Jun are selected.
df2['yret'] = df2.groupby(['gvkey', 'pfy'])['mretfactor'].cumprod() - 1
df3 = df2.groupby(['gvkey', 'pfy']).nth(11)
df3['yret'] = winsorize(df3['yret'], limits=[0.025,0.025])
df3 = df3.drop(['mretfactor'], axis=1) # "axis=1" means to drop column
# Accounting data
# 8 Read Compustat accounting data
df4 = pd.read_csv(myfolder / 'accounting-data2.csv', parse_dates = ['datadate'])
df4 = df4.sort_values(by=['gvkey','datadate'])
# 9 Create portfolio formation year (pfy) variable, portfolio formation in April where
# pfy = current year for Jan-Mar year-end dates and next year for Apr-Dec year-end dates.
# This is to facilitate compounding returns over July-June by pfy below.
# dt.year is pandas method to extract year from 'datadate' variable
# dt.month is pandas method to extract month from 'datadate' variable
df4['year'], df4['month'] = df4['datadate'].dt.year, df4['datadate'].dt.month
df4['pfy'] = np.where(df4.month < 4, df4.year, df4.year + 1)
# 10 Compute accounting variables from Compustat data, keep relevant variables, delete missing values
# Profitability
df4['ROA'] = df4['ni'] / df4['at']
df4['ROA_prev'] = df4.groupby('gvkey')['ROA'].shift(1)
# Leverage
df4['Leverage_ratio'] = df4['dltt'] / df4['seq']
df4['Leverage_ratio_prev'] = df4.groupby('gvkey')['Leverage_ratio'].shift(1)
df4['Current_ratio'] = df4['act'] / df4['lct']
df4['Current_ratio_prev'] = df4.groupby('gvkey')['Current_ratio'].shift(1)
df4['csho_prev'] = df4.groupby('gvkey')['csho'].shift(1)
df4['Shares_issued'] = df4['csho'] - df4['csho_prev']
# Operating
df4['GP_margin'] = df4['gp'] / df4['revt']
df4['GP_margin_prev'] = df4.groupby('gvkey')['GP_margin'].shift(1)
df4['at_prev'] = df4.groupby('gvkey')['at'].shift(1)
df4['at_average']= (df4['at'] + df4['at_prev'])/2
df4['Asset_TO'] = df4['revt'] / df4['at_average']
df4['Asset_TO_prev'] = df4.groupby('gvkey')['Asset_TO'].shift(1)
df4['GP_profitability'] = df4['gp']/df4['at']
df4 = df4[['ib', 'gvkey', 'pfy', 'ni', 'oancf', 'mkvalt', 'gsector', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability' ]]
df4 = df4[np.isfinite(df4)]
df4 = df4.dropna()
# 11 EDA before winsorize
dfeda = df4[['ROA', 'ROA_prev', 'oancf', 'ib', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'mkvalt', 'ni']]
dfeda['PE'] = dfeda['mkvalt'] / dfeda['ni']
dfeda['CROA'] = dfeda['ROA'] - dfeda['ROA_prev']
dfeda['Cquality'] = np.where(dfeda['oancf']> dfeda['ib'], 1, 0)
dfeda2 = dfeda[['ROA', 'oancf', 'CROA', 'Cquality', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'PE']]
print('EDA before winsorize \n\n', dfeda2.describe(), '\n'*5, file=outfile)
# 12 Winsorize variables at 2.5% of left and right tails
for var in ['ib', 'ni', 'oancf', 'mkvalt', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability']:
df4[var] = winsorize(df4[var], limits=[0.025,0.025])
# 13 EDA after winsorize
dfeda3 = df4[['ROA', 'ROA_prev', 'oancf', 'ib', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'mkvalt', 'ni']]
dfeda3['PE'] = dfeda3['mkvalt'] / dfeda3['ni']
dfeda3['CROA'] = dfeda3['ROA'] - dfeda3['ROA_prev']
dfeda3['Cquality'] = np.where(dfeda3['oancf']> dfeda3['ib'], 1, 0)
dfeda4 = dfeda3[['ROA', 'oancf', 'CROA', 'Cquality', 'Leverage_ratio', 'Current_ratio', 'Shares_issued',
'GP_margin', 'Asset_TO', 'PE']]
print('EDA after winsorize \n\n', dfeda4.describe(), '\n'*5, file=outfile)
# Merge Stock returns data with Accounting data
# 14 Merge accounting dataset (df4) with returns dataset (df3)
# "inner" means to merge only observations that have data in BOTH datasets
df5 = pd.merge(df3, df4, how='inner', on=['gvkey', 'pfy'])
df5 = df5[['ib', 'gvkey', 'conm', 'pfy', 'yret', 'ni', 'mkvalt', 'oancf', 'gsector', 'ROA', 'ROA_prev', 'Leverage_ratio', 'Leverage_ratio_prev', 'Current_ratio',
'Current_ratio_prev', 'csho_prev', 'Shares_issued', 'GP_margin', 'GP_margin_prev', 'at_prev',
'at_average', 'Asset_TO', 'Asset_TO_prev', 'GP_profitability']]
# Compute F-score
# 15 Compute 9 F-score ratios
# Profitability
df5['F_income'] = np.where(df5['ROA']> 0, 1, 0)
df5['F_opcash'] = np.where(df5['oancf']> 0, 1, 0)
df5['F_ROA'] = np.where(df5['ROA']>df5['ROA_prev'], 1, 0)
df5['F_quality'] = np.where(df5['oancf']> df5['ib'], 1, 0)
# Leverage
df5['F_leverage'] = np.where(df5['Leverage_ratio']< df5['Leverage_ratio_prev'], 1, 0)
df5['F_currentratio'] = np.where(df5['Current_ratio']> df5['Current_ratio_prev'], 1, 0)
df5['F_dilute'] = np.where(df5['Shares_issued']< 0 , 1, 0)
# Operating
df5['F_GPM'] = np.where(df5['GP_margin']< df5['GP_margin_prev'], 1, 0)
df5['F_ATO'] = np.where(df5['Asset_TO']< df5['Asset_TO_prev'], 1, 0)
# 16 Group F-score based on categories
df5['F-profitability'] = df5['F_income'] + df5['F_opcash'] + df5['F_ROA'] + df5['F_quality']
df5['F_leverage_liquidity'] = df5['F_leverage'] + df5['F_currentratio'] + df5['F_dilute']
df5['F_operating'] = df5['F_GPM'] + df5['F_ATO']
df5['F_score'] = df5['F-profitability'] + df5['F_leverage_liquidity'] + df5['F_operating']
# Long Portfolio
# 17 Filter out F_score more than 7
df6 = df5[df5.F_score > 7]
# 18 Average PE per pfy per gsector
df6['PE'] = df6['mkvalt'] / df6['ni']
df7 = df6.groupby(['pfy','gsector'], as_index=False)['PE'].mean()
# 19 Filter for stocks with PE lower than gsector average
df8 = df6.merge(df7, on = ['pfy','gsector'], how='left')
df8['y_x'] = df8['PE_y'] - df8['PE_x']
df11 = df8[df8['y_x'] > 0]
# 20 Finding the number of unique company/gvkey in our long portfolio
df12 = df11['gvkey'].unique()
# 21 Mean yret of each pfy
df23 = pd.DataFrame(df11.groupby(['pfy'], as_index=False)['yret'].mean())
df23.rename(columns={'yret':'pyret'}, inplace = True)
# 22 add pfy count number
df24 = df11.groupby(['pfy'], as_index=False)['yret'].count()
df25 = pd.merge(df23, df24, how='inner', on=['pfy'])
df25.rename(columns={'yret':'count'}, inplace = True)
# 23 Compute yearly return compounding factor (1+yearly return)
df25['ppyret'] = df25['pyret'] + 1
# Risk free rate
# 24 Calculate risk free rate using UStreasury 1month
import quandl
from datetime import datetime
# Key in your quandl api key below
QUANDL_API_KEY = 'key in your quandl api key here'
quandl.ApiConfig.api_key = QUANDL_API_KEY
start = datetime(2002, 1, 1)
end = datetime(2020, 12, 31)
rf = quandl.get('USTREASURY/YIELD.1',start_date=start, end_date=end)
risk_free = rf['1 MO']
rfr = risk_free.mean()/100
# 25 Annualise the total return, based on average and total
Lportfolio_annualised_return_rut = scs.gmean(df25.loc[:,"ppyret"])-1
# 26 Calculate annualized volatility from the standard deviation
Lportfolio_vola_rut = np.std(df25['pyret'], ddof=1)
# 27 Calculate the Sharpe ratio
Lportfolio_sharpe_rut = ((Lportfolio_annualised_return_rut - rfr)/ Lportfolio_vola_rut)
# 28 Define negative returns and compute standard deviation
Lportfolio_negative_ret_rut = df25.loc[df25['pyret'] < 0]
Lportfolio_expected_ret_rut = np.mean(df25['pyret'])
Lportfolio_downside_std_rut = Lportfolio_negative_ret_rut['pyret'].std()
# 29 Compute Sortino Ratio
Lportfolio_sortino_rut = (Lportfolio_expected_ret_rut - rfr)/Lportfolio_downside_std_rut
# 30 Compute Worst and Best pfy return
Lpcolumn = df25["pyret"]
Lpmax_value = Lpcolumn.max()
Lpmin_value = Lpcolumn.min()
# 31 Compute % of profitable pfy
Lpprofitable_pfy = len(df25[df25['pyret']>0]['pyret'])/len(df25['pyret'])
# 32 Compute long portfolio monthly price
#Merge long portofio df11 with stock return to get monthly close price
col = ['pfy','gvkey']
df21 = df11[col]
df26 = pd.merge(df1, df21, how='inner', on=['gvkey', 'pfy'])
# Calculate long portfolio monthly price
df27 = df26.groupby(['pfy','month'], as_index=False)['prccm'].mean()
# 33 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
lphwm = np.zeros(len(df27))
lpdrawdown = np.zeros(len(df27))
lpduration = 0
# 34 Determine maximum drawdown (maxDD)
for t in range(len(df27)):
lphwm[t] = max(lphwm[t-1], df27['prccm'][t])
lpdrawdown[t] = ((lphwm[t] - df27.prccm[t]) / lphwm[t]) * 100
lpmaxDD = lpdrawdown.max()
# 35 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df27)):
if np.allclose(lpdrawdown[j], lpmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df27.prccm[k], lphwm[j], atol=1e-8):
lpduration = j - k
else:
continue
else:
continue
# Short portfolio
# 36 Filter out F_score less than 2
df28 = df5[df5.F_score < 2]
# 37 Average PE per pfy per gsector
df28['PE'] = df28['mkvalt'] / df28['ni']
df29 = df28.groupby(['pfy','gsector'], as_index=False)['PE'].mean()
# 38 Filter for stocks with PE lower than gsector average
df30 = df28.merge(df29, on = ['pfy','gsector'], how='left')
df30['y_x'] = df30['PE_y'] - df30['PE_x']
df33 = df30[df30['y_x'] > 0]
# 39 Finding the number of unique company/gvkey in our short portfolio
df34 = df33['gvkey'].unique()
# 40 Mean yret of each pfy
df37 = pd.DataFrame(df33.groupby(['pfy'], as_index=False)['yret'].mean())
df37.rename(columns={'yret':'pyret'}, inplace = True)
# 41 add pfy count number
df38 = df33.groupby(['pfy'], as_index=False)['yret'].count()
df39 = pd.merge(df37, df38, how='inner', on=['pfy'])
df39.rename(columns={'yret':'count'}, inplace = True)
# 42 Reverse return sign due to short portfolio
df39['spyret'] = df39['pyret'] * -1
# 43 Compute yearly return compounding factor (1+yearly return)
df39['sppyret'] = df39['spyret'] + 1
# 44 Annualise the total return, based on average and total
Sportfolio_annualised_return_rut = scs.gmean(df39.loc[:,"sppyret"])-1
# 45 Calculate annualized volatility from the standard deviation
Sportfolio_vola_rut = np.std(df39['spyret'], ddof=1)
# 46 Calculate the Sharpe ratio
Sportfolio_sharpe_rut = ((Sportfolio_annualised_return_rut - rfr)/ Sportfolio_vola_rut)
# 47 Define negative returns and compute standard deviation
Sportfolio_negative_ret_rut = df39.loc[df39['spyret'] < 0]
Sportfolio_expected_ret_rut = np.mean(df39['spyret'])
Sportfolio_downside_std_rut = Sportfolio_negative_ret_rut['spyret'].std()
# 48 Compute Sortino Ratio
Sportfolio_sortino_rut = (Sportfolio_expected_ret_rut - rfr)/Sportfolio_downside_std_rut
# 49 Compute Worst and Best pfy return
Spcolumn = df39["spyret"]
Spmax_value = Spcolumn.max()
Spmin_value = Spcolumn.min()
# 50 Compute % of profitable pfy
Spprofitable_pfy = len(df39[df39['spyret']>0]['spyret'])/len(df39['spyret'])
# 51 Compute short portfolio monthly price
# Prepare the short portofio df11 to merge with yahoo finance data
col = ['pfy','gvkey']
df40 = df33[col]
# Merge short portofio df33 with stock return to get monthly close price
df41 = pd.merge(df1, df40, how='inner', on=['gvkey', 'pfy'])
# Calculate short portfolio monthly price
df42 = df41.groupby(['pfy','month'], as_index=False)['prccm'].mean()
# 52 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
sphwm = np.zeros(len(df42))
spdrawdown = np.zeros(len(df42))
spduration = 0
# 53 Determine maximum drawdown (maxDD)
for t in range(len(df42)):
sphwm[t] = max(sphwm[t-1], df42['prccm'][t])
spdrawdown[t] = ((sphwm[t] - df42.prccm[t]) / sphwm[t]) * 100
spmaxDD = spdrawdown.max()
# 54 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df42)):
if np.allclose(spdrawdown[j], spmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df42.prccm[k], sphwm[j], atol=1e-8):
spduration = j - k
else:
continue
else:
continue
# Long & Short Portfolio
# 55 Merge long and short portofio
df43 = df25[['pfy','pyret']]
df44 = df39[['pfy','spyret']]
df45 = pd.merge(df43, df44, how='inner', on=['pfy'])
# 56 compute long short return
df45['lspyret'] = df45['pyret']/2 + df45['spyret']/2
# Compute yearly return compounding factor (1+yearly return)
df45['lsppyret'] = df45['lspyret'] + 1
# 57 Annualise the total return, based on average and total
LSportfolio_annualised_return_rut = scs.gmean(df45.loc[:,"lsppyret"])-1
# 58 Calculate annualized volatility from the standard deviation
LSportfolio_vola_rut = np.std(df45['lspyret'], ddof=1)
# 59 Calculate the Sharpe ratio
LSportfolio_sharpe_rut = ((LSportfolio_annualised_return_rut - rfr)/ LSportfolio_vola_rut)
# 60 Define negative returns and compute standard deviation
LSportfolio_negative_ret_rut = df45.loc[df45['lspyret'] < 0]
LSportfolio_expected_ret_rut = np.mean(df45['lspyret'])
LSportfolio_downside_std_rut = LSportfolio_negative_ret_rut['lspyret'].std()
# 61 Compute Sortino Ratio
LSportfolio_sortino_rut = (LSportfolio_expected_ret_rut - rfr)/LSportfolio_downside_std_rut
# 62 Compute Worst and Best pfy return
LSpcolumn = df45["lspyret"]
LSpmax_value = LSpcolumn.max()
LSpmin_value = LSpcolumn.min()
# 63 Compute % of profitable pfy
LSpprofitable_pfy = len(df45[df45['lspyret']>0]['lspyret'])/len(df45['lspyret'])
# 64 Merge long and short portofio monthly price
df46 = pd.merge(df27, df42, how='inner', on=['pfy', 'month'])
df46['lsprccm'] = df46['prccm_x']/2 + df46['prccm_y']/2
# 65 Compute max drawdown and duration
# Initialize variables: hwm (high watermark), drawdown, duration
lsphwm = np.zeros(len(df46))
lspdrawdown = np.zeros(len(df46))
lspduration = 0
# 66 Determine maximum drawdown (maxDD)
for t in range(len(df46)):
lsphwm[t] = max(lsphwm[t-1], df46['lsprccm'][t])
lspdrawdown[t] = ((lsphwm[t] - df46.lsprccm[t]) / lsphwm[t]) * 100
lspmaxDD = lspdrawdown.max()
# 67 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(df46)):
if np.allclose(lspdrawdown[j], lspmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(df46.lsprccm[k], lsphwm[j], atol=1e-8):
lspduration = j - k
else:
continue
else:
continue
# Market return
# 68 Monthly return of Russell 3000
rut = pd.read_csv(myfolder / '^RUA.csv', parse_dates=['Date'])
rut['rutret'] = rut.sort_values(by='Date')['Adj Close'].pct_change()
# 69 Create portfolio formation year (pfy) variable, where
# pfy = current year for Jul-Dec dates and previous year for Jan-Jun dates.
# This is to facilitate compounding returns over Jul-Jun by pfy later below.
rut['year'], rut['month'] = rut['Date'].dt.year, rut['Date'].dt.month
rut['pfy'] = np.where(rut.month > 6, rut.year, rut.year - 1)
rut
# 70 Compute monthly return compounding factor (1+monthly return)
rut['mretfactor'] = 1 + rut.rutret
rut2 = rut[['Date','Adj Close','rutret', 'pfy', 'mretfactor']]
# 71 Compound monthly returns to get annual returns at end-June of each pfy,
# ensuring only firm-years with 12 mths of return data from Jul-Jun are selected.
rut2['rutyret'] = rut2.groupby(['pfy'])['mretfactor'].cumprod() - 1
rut3 = rut2.groupby(['pfy']).nth(11)
# 72 Compute yearly return compounding factor (1+yearly return)
rut3['rrutyret'] = rut3['rutyret'] + 1
# 73 Compute Returns, Sharpe and Sortino ratio
# 74 Compute monthly stock returns from price data
rut4 = rut3[['Date', 'Adj Close','rutyret']]
rut4 = rut3.rename(columns = {'Adj Close': 'price'})
# 75 Annualise the total return, based on average and total
annualised_return_rut = scs.gmean(rut3.loc[:,"rrutyret"])-1
# 76 Calculate annualized volatility from the standard deviation
vola_rut = np.std(rut4['rutyret'], ddof=1)
# 77 Calculate the Sharpe ratio
sharpe_rut = ((annualised_return_rut - rfr)/ vola_rut)
# 78 Define negative returns and compute standard deviation
negative_ret_rut = rut4.loc[rut4['rutyret'] < 0]
expected_ret_rut = np.mean(rut4['rutyret'])
downside_std_rut = negative_ret_rut['rutyret'].std()
# 79 Compute Sortino Ratio
sortino_rut = (expected_ret_rut - rfr)/downside_std_rut
# 80 Compute Worst and Best pfy return
rcolumn = rut4["rutyret"]
rmax_value = rcolumn.max()
rmin_value = rcolumn.min()
# 81 Compute % of profitable pfy
rprofitable_pfy = len(rut4[rut4['rutyret']>0]['rutyret'])/len(rut4['rutyret'])
# Compute Max drawdown and duration
# 82 Rename to price
rut5 = rut2.rename(columns = {'Adj Close': 'price'})
# 83 Initialize variables: hwm (high watermark), drawdown, duration
rhwm = np.zeros(len(rut5))
rdrawdown = np.zeros(len(rut5))
rduration = 0
# 84 Determine maximum drawdown (maxDD)
for t in range(len(rut5)):
rhwm[t] = max(rhwm[t-1], rut5['price'][t])
rdrawdown[t] = ((rhwm[t] - rut5.price[t]) / rhwm[t]) * 100
rmaxDD = rdrawdown.max()
# 85 Determine maximum drawdown duration
# numpy.allclose compares whether two floating values are equal to the absolute
# tolerance (atol) precision (1e-8 is 1x10^-8)
for j in range(len(rut5)):
if np.allclose(rdrawdown[j], rmaxDD, atol=1e-8):
for k in range(j):
if np.allclose(rut5.price[k], rhwm[j], atol=1e-8):
rduration = j - k
else:
continue
else:
continue
# Investment peformance
# 86 Plot Portfolio and Russell 3000 Returns
rut6 = rut4.drop(['Date', 'price', 'rutret', 'mretfactor', 'rrutyret'], axis=1) # "axis=1" means to drop column
df47 = df45.iloc[: , :-1]
df48 = pd.merge(df47, rut6, how='inner', on=['pfy'])
df48.rename(columns={'pyret':'Long Portfolio', 'spyret':'Short Portfolio', 'lspyret':'Long Short Portfolio','rutyret':'Market Index'}, inplace = True)
df48_plot = pd.melt(df48,id_vars='pfy', var_name='Returns',value_name='returns')
fig, ax = plt.subplots(figsize=(8,6))
ax = sns.lineplot(data=df48_plot, x='pfy', y='returns', hue='Returns')
ax.set(xlabel = 'pfy', ylabel = 'Returns')
ax.set_title('Plot of Portfolio and Russell 3000 Returns')
plt.show()
chartfile.savefig(fig)
# 87 Calculate market Wealth Index
#rut7 = rut.drop(['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'rutret', 'year', 'Adj Close'], axis=1)
rut3['RUT_WI'] = (rut3['rrutyret']).cumprod()
rut3 = rut3.reset_index()
rut8 = rut3.drop(['Date', 'Adj Close', 'rutret', 'mretfactor', 'rutyret', 'rrutyret'], axis=1)
# 88 Calculate long portfolio Wealth Index
df25['P_WI'] = (df25['ppyret']).cumprod()
df49 = df25.drop(['pyret', 'count', 'ppyret'], axis=1)
# 89 Calculate short portfolio Wealth Index
df39['S_WI'] = (df39['sppyret']).cumprod()
df50 = df39.drop(['pyret', 'count', 'spyret', 'sppyret'], axis=1)
# 90 Calculate long short portfolio Wealth Index
df45['LS_WI'] = (df45['lsppyret']).cumprod()
df52 = df45.drop(['pyret', 'spyret', 'lspyret', 'lsppyret'], axis=1)
# 91 Plot Portfolio and Russell 3000 Wealth Index Line plot
df53 = pd.merge(df49, df50, how='right', on=['pfy'])
df54 = pd.merge(df53, df52, how='left', on=['pfy'])
df55 = pd.merge(df54, rut8, how='left', on=['pfy'])
df55.rename(columns={'P_WI':'Long Portfolio WI', 'S_WI':'Short Portfolio WI', 'LS_WI':'Long Short Portfolio WI','RUT_WI':'Market Index WI'}, inplace = True)
df55_plot = pd.melt(df55,id_vars='pfy', var_name='Wealth Index',value_name='wealth index')
fig2, ax2 = plt.subplots(figsize=(8,6))
ax2 = sns.lineplot(data=df55_plot, x='pfy', y='wealth index', hue='Wealth Index')
ax2.set(xlabel = 'pfy', ylabel = 'Wealth Index')
ax2.set_title('Plot of Portfolio and Russell 3000 Wealth Index')
plt.show()
chartfile.savefig(fig2)
# 92 Print Investment Performance in a table
table = [['Performance Matrix', 'Long', 'Short', 'Long & Short', 'Russell 3000 Index)'],
['Compounded annual return', Lportfolio_annualised_return_rut, Sportfolio_annualised_return_rut, LSportfolio_annualised_return_rut, annualised_return_rut],
['Standard deviation of return', Lportfolio_vola_rut, Sportfolio_vola_rut, LSportfolio_vola_rut, vola_rut],
['Downside deviation of return', Lportfolio_downside_std_rut, Sportfolio_downside_std_rut, LSportfolio_downside_std_rut, downside_std_rut],
['Sharpe ratio', Lportfolio_sharpe_rut, Sportfolio_sharpe_rut, LSportfolio_sharpe_rut, sharpe_rut],
['Sortino ratio', Lportfolio_sortino_rut, Sportfolio_sortino_rut, LSportfolio_sortino_rut, sortino_rut],
['Maximum drawdown %', lpdrawdown.round(2).max(), spdrawdown.round(2).max(), lspdrawdown.round(2).max(), rdrawdown.round(2).max()],
['Worst-pfy return', Lpmin_value, Spmin_value, LSpmin_value, rmin_value],
['Best-pfy return', Lpmax_value, Spmax_value, LSpmax_value, rmax_value],
['% of profitable pfy', Lpprofitable_pfy, Spprofitable_pfy, LSpprofitable_pfy, rprofitable_pfy]]
print(tabulate(table, headers='firstrow', tablefmt='fancy_grid'))
print(tabulate(table, headers='firstrow', tablefmt='fancy_grid'), '\n\n', file=outfile)
# 93 t-test between Long Portfolio and Russell 3000
a = df48['Market Index'][pd.isnull(df48['Market Index'])==False]
b = df48['Long Portfolio'][pd.isnull(df48['Long Portfolio'])==False]
model1 = scs.ttest_ind(a, b, equal_var=False, alternative="greater")
print('Long Portfolio returns vs. Russell 3000 returns using t-test \n\n',
model1, '\n'*5, file=outfile)
# 94 t-test between Short Portfolio and Russell 3000
a = df48['Market Index'][ | pd.isnull(df48['Market Index']) | pandas.isnull |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import logging
import time
import sys
import warnings
from abc import ABC, abstractmethod, abstractproperty
import math
import pandas as pd
import numpy as np
from scipy.stats import sem
from sklearn import set_config
from sklearn.dummy import DummyClassifier, DummyRegressor
import matplotlib.pyplot as plt
import ads
from ads.common.utils import (
ml_task_types,
wrap_lines,
is_documentation_mode,
is_notebook,
)
from ads.dataset.label_encoder import DataFrameLabelEncoder
from IPython.core.display import display, HTML
from ads.common import logger, utils
class AutoMLProvider(ABC):
"""
Abstract Base Class defining the structure of an AutoML solution. The solution needs to
implement train() and get_transformer_pipeline().
"""
def __init__(self):
self.X_train = None
self.y_train = None
self.X_valid = None
self.y_valid = None
self.client = None
self.ml_task_type = None
self.class_names = None
self.transformer_pipeline = None
self.est = None
def setup(
self,
X_train,
y_train,
ml_task_type,
X_valid=None,
y_valid=None,
class_names=None,
client=None,
):
"""
Setup arguments to the AutoML instance.
Parameters
----------
X_train : DataFrame
Training features
y_train : DataFrame
Training labels
ml_task_type : One of ml_task_type.{REGRESSION,BINARY_CLASSIFICATION,
MULTI_CLASS_CLASSIFICATION,BINARY_TEXT_CLASSIFICATION,MULTI_CLASS_TEXT_CLASSIFICATION}
X_valid : DataFrame
Validation features
y_valid : DataFrame
Validation labels
class_names : list
Unique values in y_train
client : object
Dask client instance for distributed execution
"""
self.X_train = X_train
self.y_train = y_train
self.X_valid = X_valid
self.y_valid = y_valid
self.ml_task_type = ml_task_type
self.client = client
self.class_names = class_names
@property
def est(self):
"""
Returns the estimator.
The estimator can be a standard sklearn estimator or any object that implement methods from
(BaseEstimator, RegressorMixin) for regression or (BaseEstimator, ClassifierMixin) for classification.
Returns
-------
est : An instance of estimator
"""
return self.__est
@est.setter
def est(self, est):
self.__est = est
@abstractmethod
def train(self, **kwargs):
"""
Calls fit on estimator.
This method is expected to set the 'est' property.
Parameters
----------
kwargs: dict, optional
kwargs to decide the estimator and arguments for the fit method
"""
pass
@abstractmethod
def get_transformer_pipeline(self):
"""
Returns a list of transformers representing the transformations done on data before model prediction.
This method is optional to implement, and is used only for visualizing transformations on data using
ADSModel#visualize_transforms().
Returns
-------
transformers_list : list of transformers implementing fit and transform
"""
pass
class BaselineModel(object):
"""
A BaselineModel object that supports fit/predict/predict_proba/transform
interface. Labels (y) are encoded using DataFrameLabelEncoder.
"""
def __init__(self, est):
self.est = est
self.df_label_encoder = DataFrameLabelEncoder()
def predict(self, X):
"""
Runs the Baselines predict function and returns the result.
Parameters
----------
X: Dataframe or list-like
A Dataframe or list-like object holding data to be predicted on
Returns
-------
List: A list of predictions performed on the input data.
"""
X = self.transform(X)
return self.est.predict(X)
def predict_proba(self, X):
"""
Runs the Baselines predict_proba function and returns the result.
Parameters
----------
X: Dataframe or list-like
A Dataframe or list-like object holding data to be predicted on
Returns
-------
List: A list of probabilities of being part of a class
"""
X = self.transform(X)
return self.est.predict_proba(X)
def fit(self, X, y):
"""
Fits the baseline estimator.
Parameters
----------
X: Dataframe or list-like
A Dataframe or list-like object holding data to be predicted on
Y: Dataframe, Series, or list-like
A Dataframe, series, or list-like object holding the labels
Returns
-------
estimator: The fitted estimator
"""
self.est.fit(X, y)
return self
def transform(self, X):
"""
Runs the Baselines transform function and returns the result.
Parameters
---------
X: Dataframe or list-like
A Dataframe or list-like object holding data to be transformed
Returns
-------
Dataframe or list-like: The transformed Dataframe. Currently, no transformation is performed by the default Baseline Estimator.
"""
return X
def __getattr__(self, item):
return getattr(self.est, item)
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
def __repr__(self):
set_config()
return str(self.est)[:-2]
class BaselineAutoMLProvider(AutoMLProvider):
def get_transformer_pipeline(self):
"""
Returns a list of transformers representing the transformations done on data before model prediction.
This method is used only for visualizing transformations on data using
ADSModel#visualize_transforms().
Returns
-------
transformers_list : list of transformers implementing fit and transform
"""
msg = "Baseline"
return [("automl_preprocessing", AutoMLPreprocessingTransformer(msg))]
def __init__(self, est):
"""
Generates a baseline model using the Zero Rule algorithm by default. For a classification
predictive modeling problem where a categorical value is predicted, the Zero
Rule algorithm predicts the class value that has the most observations in the training dataset.
Parameters
----------
est : BaselineModel
An estimator that supports the fit/predict/predict_proba interface.
By default, DummyClassifier/DummyRegressor are used as estimators
"""
super(BaselineAutoMLProvider, self).__init__()
self.est = est
def __repr__(self):
set_config()
return str(self.est)[:-2]
def train(self, **kwargs):
self.est = self.decide_estimator(**kwargs)
if self.est is None:
raise ValueError(
"Baseline model for (%s) is not supported" % self.ml_task_type
)
try:
self.est.fit(self.X_train, self.y_train)
except Exception as e:
warning_message = f"The baseline estimator failed to fit the data. It could not evaluate {self.est} and gave the exception {e}."
logger.warning(warning_message)
def decide_estimator(self, **kwargs):
"""
Decides which type of BaselineModel to generate.
Returns
-------
Modell: BaselineModel
A baseline model generated for the particular ML task being performed
"""
if self.est is not None:
return self.est
else:
if self.ml_task_type == ml_task_types.REGRESSION:
return BaselineModel(DummyRegressor())
elif self.ml_task_type in [
ml_task_types.BINARY_CLASSIFICATION,
ml_task_types.MULTI_CLASS_CLASSIFICATION,
ml_task_types.BINARY_TEXT_CLASSIFICATION,
ml_task_types.MULTI_CLASS_TEXT_CLASSIFICATION,
]:
return BaselineModel(DummyClassifier())
# An installation of oracle labs automl is required only for this class
class OracleAutoMLProvider(AutoMLProvider, ABC):
def __init__(self, n_jobs=-1, loglevel=None, logger_override=None):
"""
The Oracle AutoML Provider automatically provides a tuned ML pipeline that best models the given a training
dataset and a prediction task at hand.
Parameters
----------
n_jobs : int
Specifies the degree of parallelism for Oracle AutoML. -1 (default) means that AutoML will use all
available cores.
loglevel : int
The verbosity of output for Oracle AutoML. Can be specified using the Python logging module
(https://docs.python.org/3/library/logging.html#logging-levels).
"""
try:
self.automl = __import__("automl")
self.cpuinfo = __import__("cpuinfo")
except ModuleNotFoundError as e:
utils._log_missing_module("automl", "ads[labs]")
raise e
super(OracleAutoMLProvider, self).__init__()
if loglevel is None:
loglevel = logging.DEBUG if ads.debug_mode else logging.ERROR
else:
loglevel = loglevel
if logger_override:
logr = logger_override
else:
logr = logging.getLogger(__name__)
if "AMD" in self.cpuinfo.get_cpu_info().get("brand", "UNKNOWN-BRAND"):
# Disable intra-model parallelism for LightGBM and XGBoost libraries
# which seem to be unstable currently on AMD shapes
self.automl.init(
engine="local",
engine_opts={"n_jobs": n_jobs, "model_n_jobs": 1},
loglevel=loglevel,
)
else:
self.automl.init(
engine="local", engine_opts={"n_jobs": n_jobs}, logger=logr
)
def __repr__(self):
super(OracleAutoMLProvider, self).__repr__()
def get_transformer_pipeline(self):
"""
Returns a list of transformers representing the transformations done on data before model prediction.
This method is used only for visualizing transformations on data using
ADSModel#visualize_transforms().
Returns
-------
transformers_list : list of transformers implementing fit and transform
"""
if hasattr(self.est, "text") and not self.est.text:
msg1 = wrap_lines(
self.est.selected_features_names_, heading="Select features:"
)
return [("automl_feature_selection", AutoMLFeatureSelection(msg1))]
else:
msg = "Apply Tfidf Vectorization\n"
msg += "Normalize features\n"
msg += "Label encode target"
return [("automl_preprocessing", AutoMLPreprocessingTransformer(msg))]
def selected_model_name(self):
"""
Return the name of the selected model by AutoML.
"""
return self.est.selected_model_
def print_summary(
self,
max_rows=None,
sort_column="Mean Validation Score",
ranking_table_only=False,
):
"""
Prints a summary of the Oracle AutoML Pipeline in the last train() call.
Parameters
----------
max_rows : int
Number of trials to print. Pass in None to print all trials
sort_column: string
Column to sort results by. Must be one of ['Algorithm', '#Samples', '#Features', 'Mean Validation Score',
'Hyperparameters', 'All Validation Scores', 'CPU Time']
ranking_table_only: bool
Table to be displayed. Pass in False to display the complete table.
Pass in True to display the ranking table only.
"""
if is_notebook(): # pragma: no cover
logger.info(
f"Training time was ({(time.time() - self.train_start_time):.2f} seconds.)"
)
if len(self.est.tuning_trials_) == 0 or len(self.est.train_shape_) == 0:
logger.error(
"Unfortunately, there were no trials found, so we cannot visualize it."
)
return
info = [
["Training Dataset size", self.X_train.shape],
[
"Validation Dataset size",
self.X_valid.shape if self.X_valid is not None else None,
],
["CV", self.est.num_cv_folds_],
["Target variable", self.y_train.name],
["Optimization Metric", self.est.inferred_score_metric],
["Initial number of Features", self.est.train_shape_[1]],
["Selected number of Features", len(self.est.selected_features_names_)],
["Selected Features", self.est.selected_features_names_],
["Selected Algorithm", self.est.selected_model_],
[
"End-to-end Elapsed Time (seconds)",
self.train_end_time - self.train_start_time,
],
["Selected Hyperparameters", self.est.selected_model_params_],
["Mean Validation Score", self.est.tuning_trials_[0][3]],
["AutoML n_jobs", self.est.n_jobs_],
["AutoML version", self.automl.__version__],
["Python version", sys.version],
]
info_df = | pd.DataFrame(info) | pandas.DataFrame |
import itertools
import numpy as np
import pandas as pd
import pytest
from staircase import Stairs
def _expand_interval_definition(start, end=None, value=1):
return start, end, value
def _compare_iterables(it1, it2):
it1 = [i for i in it1 if i is not None]
it2 = [i for i in it2 if i is not None]
if len(it2) != len(it1):
return False
for e1, e2 in zip(it1, it2):
if e1 != e2:
return False
return True
def s1(closed="left"):
int_seq1 = Stairs(initial_value=0, closed=closed)
int_seq1.layer(1, 10, 2)
int_seq1.layer(-4, 5, -1.75)
int_seq1.layer(3, 5, 2.5)
int_seq1.layer(6, 7, -2.5)
int_seq1.layer(7, 10, -2.5)
return int_seq1
def s2():
int_seq2 = Stairs(initial_value=0)
int_seq2.layer(1, 7, -2.5)
int_seq2.layer(8, 10, 5)
int_seq2.layer(2, 5, 4.5)
int_seq2.layer(2.5, 4, -2.5)
int_seq2.layer(-2, 1, -1.75)
return int_seq2
def s3(): # boolean
int_seq = Stairs(initial_value=0)
int_seq.layer(-10, 10, 1)
int_seq.layer(-8, -7, -1)
int_seq.layer(-5, -2, -1)
int_seq.layer(0.5, 1, -1)
int_seq.layer(3, 3.5, -1)
int_seq.layer(7, 9.5, -1)
return int_seq
def s4(): # boolean
int_seq = Stairs(initial_value=0)
int_seq.layer(-11, 9, 1)
int_seq.layer(-9.5, -8, -1)
int_seq.layer(-7.5, -7, -1)
int_seq.layer(0, 3, -1)
int_seq.layer(6, 6.5, -1)
int_seq.layer(7, 8.5, -1)
return int_seq
@pytest.fixture
def s1_fix():
return s1()
@pytest.fixture
def s2_fix():
return s2()
@pytest.fixture
def s3_fix():
return s3()
@pytest.fixture
def s4_fix():
return s4()
def test_init():
assert Stairs(initial_value=0).identical(Stairs())
assert Stairs().identical(Stairs(initial_value=0))
@pytest.mark.parametrize("init_value", [0, 1.25, -1.25, 2, -2])
def test_init2(init_value):
int_seq = Stairs(initial_value=init_value)
assert (
int_seq.number_of_steps == 0
), "Initialised Stairs should have exactly one interval"
@pytest.mark.parametrize("init_value", [0, 1.25, -1.25, 2, -2])
def test_init3(init_value):
int_seq = Stairs(initial_value=init_value)
assert (
len(int_seq.step_points) == 0
), "Initialised Stairs should not have any finite interval endpoints"
@pytest.mark.parametrize("init_value", [0, 1.25, -1.25, 2, -2])
def test_init4(init_value):
int_seq = Stairs(initial_value=init_value)
assert (
int_seq(-1) == init_value
), "Initialised Stairs should have initial value everywhere"
assert (
int_seq(0) == init_value
), "Initialised Stairs should have initial value everywhere"
assert (
int_seq(1) == init_value
), "Initialised Stairs should have initial value everywhere"
@pytest.mark.parametrize(
"init_value, added_interval",
itertools.product(
[0, 1.25, -1.25],
[(-2, 1), (3, 5, 2), (1, 5, -1), (-5, -3, 3), (3,), (2, None, 2)],
),
)
def test_one_finite_interval(init_value, added_interval):
e = 0.0001
int_seq = Stairs(initial_value=init_value)
int_seq.layer(*added_interval)
start, end, value = _expand_interval_definition(*added_interval)
assert int_seq.number_of_steps == 2 - (
end is None
), "One finite interval added to initial infinite interval should result in 3 intervals"
assert _compare_iterables(
int_seq.step_points, (start, end)
), "Finite endpoints are not what is expected"
assert (
int_seq(float("-inf")) == init_value
), "Adding finite interval should not change initial value"
assert int_seq(float("inf")) == init_value + value * (
end is None
), "Adding finite interval should not change final value"
assert int_seq(start - e) == init_value
assert int_seq(start) == init_value + value
assert int_seq(start + e) == init_value + value
if end is not None:
assert int_seq(end - e) == init_value + value
assert int_seq(end) == init_value
@pytest.mark.parametrize(
"init_value, endpoints, value",
itertools.product(
[0, 1.25, -1.25, 2, -2],
[(-2, 1, 3), (-2, -1, 3), (-3, -2, -1), (1, 2, 3)],
[-1, 2, 3],
),
)
def test_two_adjacent_finite_interval_same_value(init_value, endpoints, value):
e = 0.0001
int_seq = Stairs(initial_value=init_value)
point1, point2, point3 = endpoints
int_seq.layer(point1, point2, value)
int_seq.layer(point2, point3, value)
assert int_seq.number_of_steps == 2, "Expected result to be 3 intervals"
assert _compare_iterables(
int_seq.step_points, (point1, point3)
), "Finite endpoints are not what is expected"
assert (
int_seq(float("-inf")) == init_value
), "Adding finite interval should not change initial value"
assert (
int_seq(float("inf")) == init_value
), "Adding finite interval should not change final value"
assert int_seq(point1 - e) == init_value
assert int_seq(point1) == init_value + value
assert int_seq(point2) == init_value + value
assert int_seq(point3 - e) == init_value + value
assert int_seq(point3) == init_value
@pytest.mark.parametrize(
"init_value, endpoints, value, delta",
itertools.product(
[0, 1.25, -1.25, 2, -2],
[(-2, 1, 3), (-2, -1, 3), (-3, -2, -1), (1, 2, 3)],
[-1, 2, 4],
[3, -3, 1.5, -1.5],
),
)
def test_two_adjacent_finite_interval_different_value(
init_value, endpoints, value, delta
):
e = 0.0001
int_seq = Stairs(initial_value=init_value)
point1, point2, point3 = endpoints
int_seq.layer(point1, point2, value)
int_seq.layer(point2, point3, value + delta)
assert int_seq.number_of_steps == 3, "Expected result to be 4 intervals"
assert _compare_iterables(
int_seq.step_points, (point1, point2, point3)
), "Finite endpoints are not what is expected"
assert (
int_seq(float("-inf")) == init_value
), "Adding finite interval should not change initial value"
assert (
int_seq(float("inf")) == init_value
), "Adding finite interval should not change final value"
assert int_seq(point1 - e) == init_value
assert int_seq(point1) == init_value + value
assert int_seq(point2) == init_value + value + delta
assert int_seq(point3 - e) == init_value + value + delta
assert int_seq(point3) == init_value
@pytest.mark.parametrize(
"init_value, endpoints, value, delta",
itertools.product(
[0, 1.25, -1.25, 2, -2],
[(-2, 1, 2, 3), (-3, -2, -1, 3), (-4, -3, -2, -1), (0, 1, 2, 3)],
[-1, 2, 4],
[3, -3, 1.5, -1.5],
),
)
def test_two_overlapping_finite_interval(init_value, endpoints, value, delta):
e = 0.0001
int_seq = Stairs(initial_value=init_value)
point1, point2, point3, point4 = endpoints
int_seq.layer(point1, point3, value)
int_seq.layer(point2, point4, value + delta)
assert int_seq.number_of_steps == 4, "Expected result to be 5 intervals"
assert _compare_iterables(
int_seq.step_points, (point1, point2, point3, point4)
), "Finite endpoints are not what is expected"
assert (
int_seq(float("-inf")) == init_value
), "Adding finite interval should not change initial value"
assert (
int_seq(float("inf")) == init_value
), "Adding finite interval should not change final value"
assert int_seq(point1 - e) == init_value
assert int_seq(point1) == init_value + value
assert int_seq(point2) == init_value + 2 * value + delta
assert int_seq(point3 - e) == init_value + 2 * value + delta
assert int_seq(point3) == init_value + value + delta
assert int_seq(point4 - e) == init_value + value + delta
assert int_seq(point4) == init_value
@pytest.mark.parametrize(
"init_value, endpoints, value, delta",
itertools.product(
[0, 1.25, -1.25, 2, -2],
[(-2, 1, 2, 3), (-3, -2, -1, 3), (-4, -3, -2, -1), (0, 1, 2, 3)],
[-1, 2, 4],
[3, -3, 1.5, -1.5],
),
)
def test_two_finite_interval_one_subinterval(init_value, endpoints, value, delta):
e = 0.0001
int_seq = Stairs(initial_value=init_value)
point1, point2, point3, point4 = endpoints
int_seq.layer(point1, point4, value)
int_seq.layer(point2, point3, value + delta)
assert int_seq.number_of_steps == 4, "Expected result to be 5 intervals"
assert _compare_iterables(
int_seq.step_points, (point1, point2, point3, point4)
), "Finite endpoints are not what is expected"
assert (
int_seq.initial_value == init_value
), "Adding finite interval should not change initial value"
assert (
int_seq(float("inf")) == init_value
), "Adding finite interval should not change final value"
assert int_seq(point1 - e) == init_value
assert int_seq(point1) == init_value + value
assert int_seq(point2) == init_value + 2 * value + delta
assert int_seq(point3 - e) == init_value + 2 * value + delta
assert int_seq(point3) == init_value + value
assert int_seq(point4 - e) == init_value + value
assert int_seq(point4) == init_value
@pytest.mark.parametrize("init_value", [0, 1.25, -1.25, 2, -2])
def test_layer1(init_value):
intervals_to_add = [(-2, 1), (3, 5), (1, 5), (-5, -3), (None, 0), (0, None)]
int_seq = Stairs(initial_value=init_value)
int_seq2 = Stairs(initial_value=init_value)
for start, end in intervals_to_add:
int_seq.layer(start, end)
starts, ends = list(zip(*intervals_to_add))
starts = [{None: np.nan}.get(x, x) for x in starts]
ends = [{None: np.nan}.get(x, x) for x in ends]
int_seq2.layer(starts, ends)
assert int_seq.identical(int_seq2)
assert int_seq2.identical(int_seq)
@pytest.mark.parametrize("init_value", [0, 1.25, -1.25, 2, -2])
def test_layer2(init_value):
intervals_to_add = [(-2, 1, 1), (3, 5, 2), (1, 5, -1), (-5, -3, 3)]
int_seq = Stairs(initial_value=init_value)
int_seq2 = Stairs(initial_value=init_value)
for interval in intervals_to_add:
int_seq.layer(*interval)
starts, ends, values = list(zip(*intervals_to_add))
int_seq2.layer(starts, ends, values)
assert int_seq.identical(int_seq2)
assert int_seq2.identical(int_seq)
def test_layering_index(s1_fix):
result = Stairs(
start=pd.Index([1, -4, 3, 6, 7]),
end=pd.Index([10, 5, 5, 7, 10]),
value=pd.Index([2, -1.75, 2.5, -2.5, -2.5]),
)
assert result.identical(s1_fix)
def test_layering_frame(s1_fix):
df = pd.DataFrame(
{
"start": [1, -4, 3, 6, 7],
"end": [10, 5, 5, 7, 10],
"value": [2, -1.75, 2.5, -2.5, -2.5],
}
)
assert Stairs(df, "start", "end", "value").identical(s1_fix)
@pytest.mark.parametrize(
"closed",
["left", "right"],
)
@pytest.mark.parametrize(
"initial_value",
[0, -1, 1],
)
def test_from_values(initial_value, closed):
# this corresponds to the step function produced by S1 method
values = pd.Series([-1.75, 0.25, 2.75, 2.00, -0.5, 0], index=[-4, 1, 3, 5, 6, 10])
sf = Stairs.from_values(
initial_value=initial_value,
values=values + initial_value,
closed=closed,
)
assert sf.identical(s1(closed) + initial_value)
@pytest.mark.parametrize(
"index, values",
[
([-np.inf], [0]),
([np.inf], [0]),
([1, 0], [10, 20]),
([0], ["1"]),
([], []),
],
)
def test_from_values_exception(index, values):
with pytest.raises(ValueError):
Stairs.from_values(
initial_value=0,
values=pd.Series(values, index=index),
closed="left",
)
def test_layering_trivial_1(s1_fix):
assert s1_fix.copy().layer(1, 1).identical(s1_fix)
def test_layering_series_with_different_index():
# GH112
result = Stairs(
start=pd.Series([0, 2, 4], index=[0, 2, 4]),
end=pd.Series([1, 3, 5], index=[1, 3, 5]),
)
expected = | pd.Series([1, 0, 1, 0, 1, 0]) | pandas.Series |
"""
Prelim script for looking at netcdf files and producing some trends
Broken into three parts
Part 1 pull out the NDVI from the relevant sites
"""
#==============================================================================
__title__ = "Time Series Chow Test"
__author__ = "<NAME>"
__version__ = "v1.0(27.02.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Import plotting and colorpackages
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
# +++++ Import my packages +++++
import myfunctions.stats as sf
# import MyModules.CoreFunctions as cf
# import MyModules.PlotFunctions as pf
# import MyModules.NetCDFFunctions as ncf
#==============================================================================
def main():
# ========== Get the key infomation from the args ==========
# fdpath = args.fdpath
warn.warn(
'''
This is currently only in alpha testing form
I will replace all the variables and infomation
for experiments in a dataframe so i can look at
different aproaches
''')
# ========== set the filnames ==========
data= OrderedDict()
data["MODISaqua"] = ({
"fname":"./data/veg/MODIS/aqua/processed/MYD13Q1_A*_final.nc",
'var':"ndvi", "gridres":"250m", "region":"SIBERIA", "timestep":"16day",
"start":2002, "end":2018
})
data["COPERN"] = ({
'fname':"./data/veg/COPERN/NDVI_MonthlyMax_1999to2018_at_1kmRUSSIA.nc",
'var':"NDVI", "gridres":"1km", "region":"RUSSIA", "timestep":"Monthly",
"start":1999, "end":2018
})
data["GIMMS"] = ({
"fname":"./data/veg/GIMMS31g/GIMMS31v1/timecorrected/ndvi3g_geo_v1_1_1981to2017_mergetime_compressed.nc",
'var':"ndvi", "gridres":"8km", "region":"Global", "timestep":"16day",
"start":1981, "end":2017
})
win = 3.0 # Window from the start and end of the time series
for syear in [2017, 2018]:
# ========== Pull out info needed from the field data ==========
SiteInfo = Field_data(year = syear)
Firedict = OrderedDict()
Chowdict = OrderedDict()
# ========== Loop over each of the included vegetation datasets ==========
for dsn in data:
# ========== Get the veg values ==========
infile = ("./data/field/exportedNDVI/NDVI_%dsites_%s_%dto%d_%s_"
% (syear, dsn,data[dsn]["start"], data[dsn]["end"], data[dsn]["gridres"]))
df = | pd.read_csv(infile+"AnnualMax.csv", index_col="sn") | pandas.read_csv |
# %% [markdown]
#
# # Comprehensive Exam
#
# ## Coding Artifact
#
# <NAME>
#
# Nov 20, 2020
# ## Model Selection
#
# Base selection of regressors is performed by fitting multiple regressors without
# performing any parameter tuning, then comparing the resulting errors across
# functional groups. Models with lower errors will be marked for further investigation.
# %%
import os
import sys
import math
import logging
from pathlib import Path
from IPython.display import display
import numpy as np
import sklearn
from sklearn.ensemble import (
AdaBoostRegressor,
GradientBoostingRegressor,
RandomForestRegressor,
)
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.multioutput import MultiOutputRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
from tqdm.auto import tqdm
# !%load_ext autoreload
# !%autoreload 2
import matplotlib as mpl
import matplotlib.pyplot as plt
# !%matplotlib inline
# !%config InlineBackend.figure_format = 'retina'
# import seaborn as sns
import pandas as pd
import artifact
from artifact.datasets import load_tkr, tkr_group_lut
from artifact.helpers import RegressionProfile, REGRESSION_PROFILE_PATH
# %%
plt.rcParams["figure.figsize"] = (9, 5.5)
mpl.rcParams["mathtext.fontset"] = "stix"
mpl.rcParams["font.size"] = 14
mpl.rcParams["font.family"] = "Times New Roman"
# sns.set_context("poster")
# sns.set(rc={'figure.figsize': (16, 9.)})
# sns.set_style("whitegrid")
| pd.set_option("display.max_rows", 120) | pandas.set_option |
import pandas as pd
import numpy as np
import git
import os
import sys
from pathlib import Path
import matplotlib.pyplot as plt
#-- Setup paths
# Get parent directory using git
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Change working directory to parent directory
os.chdir(homedir)
# Add 'Dan' directory to the search path for imports
sys.path.append('Dan')
# Import our custom cube managing functions
import cube_formatter as cf
#-- Setup interactive matplotlib
#%matplotlib widget
#-- Control parameters
# Top N clusters to plot with the most deaths
# Set to -1 to plot all
plotN = 20
# Cluster fips to plot
# If isShowAllocations=True, all counties from the following cluster will be plotted
clst2Show = 80 # "FIPS" of cluster to show
# Data Manipulation flags (should match those used in creating submission file)
isComputeDaily = False # Flag to translate cummulative data to daily counts
#- Plot-type control flags
isCumul = True # Flag to denote that the plot should be cumulative, not daily deaths
# NOTE: the following two flags are independent of each other (ie. you can run either, or, or both)
isShowClusters = True # Flag to denote that each cluster should be plotted on its own
isShowAllocations = True # Flag to denote that the counties within clst2Show should be shown
# Key days (should match those used in creating the cube)
global_dayzero = pd.to_datetime('2020 Jan 21')
# Day until which model was trained (train_til in epid model)
# Leave as None to not display a boundary
boundary = '2020 May 10'
# Day to use for allocating to counties
# Leave as None to use most recent date
# OR use '2020-04-23' format to allocate based on proportions from that day
alloc_day = '2020-05-10'
# Flag to choose whether to save .svg of figures
is_saveSVG = False
# Filename (including path) for saving .svg files when is_saveSVG=True
# county, state, and fips will be appended to the name to differentiate plots
svg_flm = 'Dan/MidtermFigs/CountyWideDaily2/'
#-- Files to utilize
# Filename for cube of model data
# should be (row=sample, col=day, pane=state) with state FIPS as beef in row1
mat_model = 'Alex\\PracticeOutputs\\fresh.mat'#'Dan\\train_til_today.csv'
# Reference file to treat as "true" death counts
csv_true = 'data\\us\\covid\\nyt_us_counties_daily.csv' # daily county counts (also used for allocating deaths when req.)
csv_ST_true = 'data\\us\\covid\\nyt_us_states.csv' # this is cumulative ONLY; no _daily version exists
csv_CT_cumul_true = 'data\\us\\covid\\nyt_us_counties.csv' # county cumulative counts
# reference file for clustering df
# This assignment as done below assumes that the right file just has _clusters.csv appended.
# You can enter the actual path manually if you'd like
cluster_ref_fln=os.path.splitext(mat_model)[0] + '_clusters.csv'
#-- Read and format true data to have correct columns
# Read correct file for requested setup
if isCumul:
# plotting cumulative county-wide so pull this file
true_df = pd.read_csv(csv_CT_cumul_true)
# The nyt_us_counties.csv file is SUPER FLAWED so we need to fix this:
# - has some empty values in the fips column cousing prob. with .astype(int)
# - Straight up doesn't have fips entry for NYC so need to hardcode its fips
# Replace empty value on NYC with 36061
true_df.loc[true_df.county=='New York City', 'fips'] = 36061
# Remove rows with nans from the df (these are the counties we don't care about)
true_df = true_df[true_df['fips'].notna()]
else:
# plotting daily county-wide so pull this file
true_df = pd.read_csv(csv_true)
# Reformat some columns
true_df['fips'] = true_df['fips'].astype(int)
true_df['id'] = true_df['date'] + '-' + true_df['fips'].astype(str)
# Add column of dates in datetime format
true_df['dateDT'] = pd.to_datetime(true_df['date'].values)
#-- Read model data and compute daily if requested
# read raw cube from epid. code
full_cube = cf.read_cube(mat_model)
# compute daily values
if isComputeDaily:
full_cube = cf.calc_daily(full_cube)
#-- Remove panes that are not related to clusters (this code only deals with clusters)
# Assumes cluster fips are <1000 and counties are >1000
clst_cube = full_cube[:,:,full_cube[0,0,:] < 1000]
###################################
#### Plot Clusters ################
###################################
#-- Calculate quantiles
# Quantiles to consider
perc_list = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# Calculate along each column ignoring the first row of beef
clst_cube_quants = np.percentile(clst_cube[1:,:,:],perc_list,0)
# cubes now have 9 rows, one for each of the quantiles requested
# The cols and panes are the same format as the input cube
#-- Order results by peak deaths/day predicted AND extract clusters for plotting from the cube
# Get maximum deaths/day ever hit by each cluster
# Use 4th row to use the 50th percentile (ie. the central prediction)
peak_daily_deaths = np.max(clst_cube_quants[4,:,:],0)
# Get indices of sorted (descending) vector
# NOTE: argsort only works in ascdending order so use [::-1] to reverse
peak_inds = np.argsort(peak_daily_deaths)[::-1]
# Take the largest plotN counties (since these are the only ones requested by the user)
peak_inds = peak_inds[:plotN]
# Extract the resulting counties
# results will be implicitly sorted due to use of argsort
clst_cube_quants = clst_cube_quants[:,:,peak_inds] # Get quantiles
clst_cube_fips = clst_cube[0,0,peak_inds] # Get fips ID's
#-- Read in cluster-to-fips translation
# Load cluster data
clst_to_fips = | pd.read_csv(cluster_ref_fln) | pandas.read_csv |
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn import plotting
class SubjectAnalyzer:
def __init__(self,subject_nii_path,mean_nii_path,sd_nii_path,atlas_nii_path):
'''Get paths for files'''
self.subject_nii_path = subject_nii_path
self.mean_nii_path = mean_nii_path
self.sd_nii_path = sd_nii_path
self.atlas_nii_path = atlas_nii_path
# Read nii images:
self.load_data()
# If data is OK, continue to analysis:
if self.is_data_proper:
self.calculate_zscore() # Calculate voxel z-scores
self.calculate_atlas_results() # Calculate atlas areas mean values and z-scores
else: # If data dimensions do not fit, output an error message detailing the error
self.error_message = \
"The following inputs: {}{}{}have an inconsistent have a dimension mismatch with the subject".format(
'mean map, ' if not self.is_mean_proper else '',
'st. dev. map, ' if not self.is_sd_proper else '',
'atlas, ' if not self.is_atlas_proper else '')
def load_data(self):
# Load nifti data of subject, mean and sd of "population" and atlas:
self.subject_img = nib.load(self.subject_nii_path)
self.mean_img = nib.load(self.mean_nii_path)
self.sd_img = nib.load(self.sd_nii_path)
self.atlas_img = nib.load(self.atlas_nii_path)
self.shape = self.subject_img.shape # get dimensions of subject's data
self.is_mean_proper = self.mean_img.shape == self.shape # test that the mean data is the same shape
self.is_sd_proper = self.sd_img.shape == self.shape # test that the sd data is the same shape
self.is_atlas_proper = self.atlas_img.shape == self.shape # test that the atlas data is the same shape
# set is_data_proper to false if one of the inputs is not in the same dimensions as the subject
self.is_data_proper = self.is_mean_proper and self.is_sd_proper and self.is_atlas_proper
self.subject_data = self.subject_img.get_data() # get subject data from image
self.mean_data = self.mean_img.get_data() # get mean data from image
self.sd_data = self.sd_img.get_data() # get SD data from image
self.atlas_data = self.atlas_img.get_data() # get atlas data from image
# set zeros values to nan for subject, mean and sd data
self.subject_data[self.subject_data==0] = np.nan
self.mean_data[self.mean_data == 0] = np.nan
self.sd_data[self.sd_data == 0] = np.nan
def calculate_zscore(self):
'''
calculates the zscore for each subject voxel based on the control mean and sd
finds only significant voxels and saves them as "zs.nii.gz"
'''
self.zscores = (self.subject_data - self.mean_data) / self.sd_data # calculate zscores
zscores = self.zscores
zscores[np.isnan(zscores)] = 0 # replace nans with z scores temporarily
self.significant_zscores = np.where(np.abs(zscores)<=1.96,np.nan,zscores) # finds non significant values and replaces them with zeros for new variable
self.significant_zscores_nii = nib.Nifti1Image(self.significant_zscores,self.subject_img.affine) # creates nifti template
nib.save(self.significant_zscores_nii, 'zs.nii.gz') # save nifti template
zs_nii_path = self.significant_zscores_nii
plotting.plot_glass_brain(zs_nii_path, threshold=1.96, colorbar=True, plot_abs=False,
output_file='Z_map.png',vmax=5)
def calculate_atlas_results(self):
'''
for each area in the atlas supplied, calculate the average value and z-score
'''
vals = np.zeros(self.atlas_data.max()) # initialize values array
zs = np.zeros(self.atlas_data.max()) # initialize zscores array
for i in range(1,self.atlas_data.max()+1): # for every area
vals[i-1] = np.nanmean(self.subject_data[self.atlas_data == i]) # calculate mean value in area
zs[i-1] = np.nanmean(self.zscores[self.atlas_data == i]) # calculate mean z-score in area
vals = pd.Series(vals,index = np.arange(1,self.atlas_data.max()+1)) # create values series
zs_s = pd.Series(zs,index = np.arange(1,self.atlas_data.max()+1)) # create zscore series
self.area_data = pd.DataFrame({'Values': vals, 'Z-scores': zs_s}) # create dataframe from both
self.area_data.index.name = 'Area' # change index name to area
subject_data = self.area_data
decimals = | pd.Series([4, 2], index=['Values', 'Z-scores']) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
| assert_panel_equal(result, expected) | pandas.util.testing.assert_panel_equal |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
import datetime
import operator
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_120
from cudf.testing import _utils as utils
from cudf.testing._utils import assert_eq, assert_exceptions_equal
_TIMEDELTA_DATA = [
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[
136457654736252,
134736784364431,
245345345545332,
223432411,
2343241,
3634548734,
23234,
],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
]
_TIMEDELTA_DATA_NON_OVERFLOW = [
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
]
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_create(data, dtype):
if dtype not in ("timedelta64[ns]"):
pytest.skip(
"Bug in pandas" "https://github.com/pandas-dev/pandas/issues/35465"
)
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.Series(data, dtype=dtype)
assert_eq(psr, gsr)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize("cast_dtype", ["int64", "category"])
def test_timedelta_from_typecast(data, dtype, cast_dtype):
if dtype not in ("timedelta64[ns]"):
pytest.skip(
"Bug in pandas" "https://github.com/pandas-dev/pandas/issues/35465"
)
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.Series(data, dtype=dtype)
if cast_dtype == "int64":
assert_eq(psr.values.view(cast_dtype), gsr.astype(cast_dtype).values)
else:
assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype))
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("cast_dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_to_typecast(data, cast_dtype):
psr = pd.Series(cp.asnumpy(data) if isinstance(data, cp.ndarray) else data)
gsr = cudf.Series(data)
assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype))
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
[0.3534, 12, 22, 343, 43.53534, 4353.42],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_from_pandas(data, dtype):
psr = pd.Series(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
gsr = cudf.from_pandas(psr)
assert_eq(psr, gsr)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_to_numpy(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
expected = np.array(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
expected = expected[~np.isnan(expected)]
actual = gsr.dropna().to_numpy()
np.testing.assert_array_equal(expected, actual)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_series_to_pandas(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
expected = np.array(
cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype
)
expected = pd.Series(expected)
actual = gsr.to_pandas()
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data,other",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"ops",
[
"eq",
"ne",
"lt",
"gt",
"le",
"ge",
"add",
"radd",
"sub",
"rsub",
"floordiv",
"truediv",
"mod",
],
)
def test_timedelta_ops_misc_inputs(data, other, dtype, ops):
gsr = cudf.Series(data, dtype=dtype)
other_gsr = cudf.Series(other, dtype=dtype)
psr = gsr.to_pandas()
other_psr = other_gsr.to_pandas()
expected = getattr(psr, ops)(other_psr)
actual = getattr(gsr, ops)(other_gsr)
if ops in ("eq", "lt", "gt", "le", "ge"):
actual = actual.fillna(False)
elif ops == "ne":
actual = actual.fillna(True)
if ops == "floordiv":
expected[actual.isna().to_pandas()] = np.nan
assert_eq(expected, actual)
@pytest.mark.parametrize(
"datetime_data,timedelta_data",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
(
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
],
)
@pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES)
@pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"ops",
["add", "sub"],
)
def test_timedelta_ops_datetime_inputs(
datetime_data, timedelta_data, datetime_dtype, timedelta_dtype, ops
):
gsr_datetime = cudf.Series(datetime_data, dtype=datetime_dtype)
gsr_timedelta = cudf.Series(timedelta_data, dtype=timedelta_dtype)
psr_datetime = gsr_datetime.to_pandas()
psr_timedelta = gsr_timedelta.to_pandas()
expected = getattr(psr_datetime, ops)(psr_timedelta)
actual = getattr(gsr_datetime, ops)(gsr_timedelta)
assert_eq(expected, actual)
if ops == "add":
expected = getattr(psr_timedelta, ops)(psr_datetime)
actual = getattr(gsr_timedelta, ops)(gsr_datetime)
assert_eq(expected, actual)
elif ops == "sub":
assert_exceptions_equal(
lfunc=operator.sub,
rfunc=operator.sub,
lfunc_args_and_kwargs=([psr_timedelta, psr_datetime],),
rfunc_args_and_kwargs=([gsr_timedelta, gsr_datetime],),
compare_error_message=False,
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(
{
"A": pd.Series(pd.date_range("2012-1-1", periods=3, freq="D")),
"B": pd.Series([pd.Timedelta(days=i) for i in range(3)]),
}
),
pd.DataFrame(
{
"A": pd.Series(
pd.date_range("1994-1-1", periods=50, freq="D")
),
"B": pd.Series([pd.Timedelta(days=i) for i in range(50)]),
}
),
],
)
@pytest.mark.parametrize("op", ["add", "sub"])
def test_timedelta_dataframe_ops(df, op):
pdf = df
gdf = cudf.from_pandas(pdf)
if op == "add":
pdf["C"] = pdf["A"] + pdf["B"]
gdf["C"] = gdf["A"] + gdf["B"]
elif op == "sub":
pdf["C"] = pdf["A"] - pdf["B"]
gdf["C"] = gdf["A"] - gdf["B"]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
pytest.param(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/5938"
),
),
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
],
)
@pytest.mark.parametrize(
"other_scalars",
[
datetime.timedelta(days=768),
datetime.timedelta(seconds=768),
datetime.timedelta(microseconds=7),
datetime.timedelta(minutes=447),
datetime.timedelta(hours=447),
datetime.timedelta(weeks=734),
np.timedelta64(4, "s"),
np.timedelta64(456, "D"),
np.timedelta64(46, "h"),
pytest.param(
np.timedelta64("nat"),
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
np.timedelta64(1, "s"),
np.timedelta64(1, "ms"),
np.timedelta64(1, "us"),
np.timedelta64(1, "ns"),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"op",
[
"add",
"sub",
"truediv",
"mod",
pytest.param(
"floordiv",
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
],
)
def test_timedelta_series_ops_with_scalars(data, other_scalars, dtype, op):
gsr = cudf.Series(data=data, dtype=dtype)
psr = gsr.to_pandas()
if op == "add":
expected = psr + other_scalars
actual = gsr + other_scalars
elif op == "sub":
expected = psr - other_scalars
actual = gsr - other_scalars
elif op == "truediv":
expected = psr / other_scalars
actual = gsr / other_scalars
elif op == "floordiv":
expected = psr // other_scalars
actual = gsr // other_scalars
elif op == "mod":
expected = psr % other_scalars
actual = gsr % other_scalars
assert_eq(expected, actual)
if op == "add":
expected = other_scalars + psr
actual = other_scalars + gsr
elif op == "sub":
expected = other_scalars - psr
actual = other_scalars - gsr
elif op == "truediv":
expected = other_scalars / psr
actual = other_scalars / gsr
elif op == "floordiv":
expected = other_scalars // psr
actual = other_scalars // gsr
elif op == "mod":
expected = other_scalars % psr
actual = other_scalars % gsr
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
pytest.param(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/5938"
),
),
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
],
)
@pytest.mark.parametrize(
"cpu_scalar",
[
datetime.timedelta(seconds=768),
datetime.timedelta(microseconds=7),
np.timedelta64(4, "s"),
pytest.param(
np.timedelta64("nat", "s"),
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
np.timedelta64(1, "s"),
np.timedelta64(1, "ms"),
np.timedelta64(1, "us"),
np.timedelta64("nat", "ns"),
np.timedelta64(1, "ns"),
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize(
"op",
[
"add",
"sub",
"truediv",
"mod",
pytest.param(
"floordiv",
marks=pytest.mark.xfail(
condition=not PANDAS_GE_120,
reason="https://github.com/pandas-dev/pandas/issues/35529",
),
),
],
)
def test_timedelta_series_ops_with_cudf_scalars(data, cpu_scalar, dtype, op):
gpu_scalar = cudf.Scalar(cpu_scalar)
gsr = cudf.Series(data=data, dtype=dtype)
psr = gsr.to_pandas()
if op == "add":
expected = psr + cpu_scalar
actual = gsr + gpu_scalar
elif op == "sub":
expected = psr - cpu_scalar
actual = gsr - gpu_scalar
elif op == "truediv":
expected = psr / cpu_scalar
actual = gsr / gpu_scalar
elif op == "floordiv":
expected = psr // cpu_scalar
actual = gsr // gpu_scalar
elif op == "mod":
expected = psr % cpu_scalar
actual = gsr % gpu_scalar
assert_eq(expected, actual)
if op == "add":
expected = cpu_scalar + psr
actual = gpu_scalar + gsr
elif op == "sub":
expected = cpu_scalar - psr
actual = gpu_scalar - gsr
elif op == "truediv":
expected = cpu_scalar / psr
actual = gpu_scalar / gsr
elif op == "floordiv":
expected = cpu_scalar // psr
actual = gpu_scalar // gsr
elif op == "mod":
expected = cpu_scalar % psr
actual = gpu_scalar % gsr
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
[1000000, 200000, 3000000],
[1000000, 200000, None],
[],
[None],
[None, None, None, None, None],
[12, 12, 22, 343, 4353534, 435342],
np.array([10, 20, 30, None, 100]),
cp.asarray([10, 20, 30, 100]),
[1000000, 200000, 3000000],
[1000000, 200000, None],
[1],
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
],
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
@pytest.mark.parametrize("reduction_op", ["sum", "mean", "median", "quantile"])
def test_timedelta_reduction_ops(data, dtype, reduction_op):
gsr = cudf.Series(data, dtype=dtype)
psr = gsr.to_pandas()
if len(psr) > 0 and psr.isnull().all() and reduction_op == "median":
with pytest.warns(RuntimeWarning, match="Mean of empty slice"):
expected = getattr(psr, reduction_op)()
else:
expected = getattr(psr, reduction_op)()
actual = getattr(gsr, reduction_op)()
if pd.isna(expected) and pd.isna(actual):
pass
elif isinstance(expected, pd.Timedelta) and isinstance(
actual, pd.Timedelta
):
assert (
expected.round(gsr._column.time_unit).value
== actual.round(gsr._column.time_unit).value
)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
_TIMEDELTA_DATA,
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_dt_components(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
psr = gsr.to_pandas()
expected = psr.dt.components
actual = gsr.dt.components
if gsr.isnull().any():
assert_eq(expected, actual.astype("float"))
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
_TIMEDELTA_DATA,
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_dt_properties(data, dtype):
gsr = cudf.Series(data, dtype=dtype)
psr = gsr.to_pandas()
def local_assert(expected, actual):
if gsr.isnull().any():
assert_eq(expected, actual.astype("float"))
else:
assert_eq(expected, actual)
expected_days = psr.dt.days
actual_days = gsr.dt.days
local_assert(expected_days, actual_days)
expected_seconds = psr.dt.seconds
actual_seconds = gsr.dt.seconds
local_assert(expected_seconds, actual_seconds)
expected_microseconds = psr.dt.microseconds
actual_microseconds = gsr.dt.microseconds
local_assert(expected_microseconds, actual_microseconds)
expected_nanoseconds = psr.dt.nanoseconds
actual_nanoseconds = gsr.dt.nanoseconds
local_assert(expected_nanoseconds, actual_nanoseconds)
@pytest.mark.parametrize(
"data",
_TIMEDELTA_DATA,
)
@pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_index(data, dtype):
gdi = cudf.Index(data, dtype=dtype)
pdi = gdi.to_pandas()
assert_eq(pdi, gdi)
@pytest.mark.parametrize("data", _TIMEDELTA_DATA_NON_OVERFLOW)
@pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES)
@pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_index_datetime_index_ops(
data, datetime_dtype, timedelta_dtype
):
gdt = cudf.Index(data, dtype=datetime_dtype)
gtd = cudf.Index(data, dtype=timedelta_dtype)
pdt = gdt.to_pandas()
ptd = gtd.to_pandas()
assert_eq(gdt - gtd, pdt - ptd)
assert_eq(gdt + gtd, pdt + ptd)
@pytest.mark.parametrize(
"datetime_data,timedelta_data",
[
([1000000, 200000, 3000000], [1000000, 200000, 3000000]),
([1000000, 200000, None], [1000000, 200000, None]),
([], []),
([None], [None]),
([None, None, None, None, None], [None, None, None, None, None]),
(
[12, 12, 22, 343, 4353534, 435342],
[12, 12, 22, 343, 4353534, 435342],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
([1000000, 200000, 3000000], [200000, 34543, 3000000]),
([1000000, 200000, None], [1000000, 200000, 3000000]),
([None], [1]),
(
[12, 12, 22, 343, 4353534, 435342],
[None, 1, 220, 3, 34, 4353423287],
),
(np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])),
(cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])),
(
[12, 11, 232, 223432411, 2343241, 234324, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
),
(
[11, 1132324, 2322323111, 23341, 2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
(
[1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323],
[12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234],
),
],
)
@pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES)
@pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES)
def test_timedelta_datetime_index_ops_misc(
datetime_data, timedelta_data, datetime_dtype, timedelta_dtype
):
gdt = cudf.Index(datetime_data, dtype=datetime_dtype)
gtd = cudf.Index(timedelta_data, dtype=timedelta_dtype)
pdt = gdt.to_pandas()
ptd = gtd.to_pandas()
assert_eq(gdt - gtd, pdt - ptd)
assert_eq(gdt + gtd, pdt + ptd)
@pytest.mark.parametrize("data", _TIMEDELTA_DATA_NON_OVERFLOW)
@pytest.mark.parametrize(
"other_scalars",
[
pd.Timedelta(1513393355.5, unit="s"),
| pd.Timedelta(34765, unit="D") | pandas.Timedelta |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
from datetime import timedelta
from datetime import date
scraped_job_titles = []
scraped_job_locations = []
scraped_company_names = []
scraped_salaries = []
scraped_ratings = []
scraped_apply_urls = []
scraped_days = []
scraped_description = []
# PMs
# https://ie.indeed.com/jobs?q=Project+Manager&l=Dublin
# https://ie.indeed.com/jobs?q=Project+Manager&l=Dublin&sort=date&sr=directhire&fromage=7
# BAs
# https://ie.indeed.com/jobs?q=business+analyst&l=Dublin
# https://ie.indeed.com/jobs?q=business+analyst&l=Dublin&sort=date&sr=directhire&fromage=7
# Testers
# https://ie.indeed.com/jobs?q=testers&l=Dublin&sort=date
# https://ie.indeed.com/jobs?q=testers&l=Dublin&sort=date&sr=directhire&fromage=7
# PMOs
# https://ie.indeed.com/jobs?q=pmo&l=Dublin&sort=date
# https://ie.indeed.com/jobs?q=pmo&l=Dublin&sr=directhire&fromage=7&sort=date
start_url = "https://ie.indeed.com/jobs?q=data+analyst&l=Dublin&fromage=7"
link = requests.get(start_url)
site = BeautifulSoup(link.text, "html.parser")
return_res = str(site.find_all('div', attrs={'id': 'searchCountPages'}))
str1 = ""
res = str1.join(return_res)
results = int(int(res.split()[5])/15)
url = "https://ie.indeed.com/jobs?q=data+analyst&l=Dublin&rbl=Dublin&%2480%2C000&sort=date&fromage=7&start="
for i in range(results):
if i == 0:
j = "0"
else:
j = str(i) + "0"
url1 = url + str(j)
link = requests.get(url1)
site = BeautifulSoup(link.text, "html.parser")
jobs_a = site.find_all(name='a', attrs={'data-tn-element': 'jobTitle'})
for job in jobs_a:
job_attrs = job.attrs
scraped_job_titles.append(job_attrs['title'])
loc_div = site.find_all('div', attrs={'class': 'recJobLoc'})
for loc in loc_div:
loc_attrs = loc.attrs
scraped_job_locations.append(loc_attrs['data-rc-loc'])
company_span = site.find_all('span', attrs={'class': 'company'})
for span in company_span:
scraped_company_names.append(span.text.strip())
jobs_divs = site.find_all('div', attrs={'class': 'jobsearch-SerpJobCard'})
for div in jobs_divs:
salary_span = div.find('span', attrs={'class': 'salaryText'})
if salary_span:
scraped_salaries.append(salary_span.string.strip())
else:
scraped_salaries.append('Not shown')
jobs_divs = site.find_all('div', attrs={'class': 'jobsearch-SerpJobCard'})
for div in jobs_divs:
rating_span = div.find('span', attrs={'class': 'ratingsContent'})
if rating_span:
scraped_ratings.append(float(rating_span.text.strip().replace(',', '.')))
else:
scraped_ratings.append(None)
view_job_url = "https://ie.indeed.com/viewjob?jk="
jobs_div = site.find_all(name='div', attrs={'class': 'jobsearch-SerpJobCard'})
for div in jobs_div:
job_id = div.attrs['data-jk']
apply_url = view_job_url + job_id
scraped_apply_urls.append(apply_url)
days_spans = site.find_all('span', attrs={'class': 'date'})
for day in days_spans:
day_string = day.text.strip()
if re.findall('[0-9]+', day_string):
parsed_day = re.findall('[0-9]+', day_string)[0]
if 'hour' in day_string:
job_posted_since = (date.today() - timedelta(int(parsed_day) / 24)).strftime("%d/%m/%Y")
elif 'day' in day_string:
job_posted_since = (date.today() - timedelta(int(parsed_day))).strftime("%d/%m/%Y")
elif 'week' in day_string:
job_posted_since = (date.today() - timedelta(int(parsed_day) * 7)).strftime("%d/%m/%Y")
elif 'month' in day_string:
job_posted_since = (date.today() - timedelta(int(parsed_day) * 30)).strftime("%d/%m/%Y")
else:
job_posted_since = str(day_string)
else:
job_posted_since = date.today().strftime("%d/%m/%Y")
scraped_days.append(job_posted_since)
jobs_list = | pd.DataFrame() | pandas.DataFrame |
"""
Copyright 2020 The Google Earth Engine Community Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from absl import app
from absl import logging
import h5py
import numpy as np
import pandas as pd
import os
meta_variables = (
'cover',
'pai',
'fhd_normal',
'pgap_theta',
'beam',
'shot_number',
'l2b_quality_flag',
'algorithmrun_flag',
'selected_rg_algorithm',
'selected_l2a_algorithm',
'sensitivity',
'geolocation/degrade_flag',
'geolocation/delta_time',
'geolocation/lat_lowestmode',
'geolocation/lon_lowestmode',
'geolocation/local_beam_azimuth',
'geolocation/local_beam_elevation',
'geolocation/solar_azimuth',
'geolocation/solar_elevation',
)
cover_names = [f'cover_z{d}' for d in range(30)]
pai_names = [f'pai_z{d}' for d in range(30)]
pavd_names = [f'pavd_z{d}' for d in range(30)]
# pylint:disable=line-too-long
def extract_values(input_path, output_path):
"""Extracts all relative height values from all algorithms and some qa flags.
Args:
input_path: string, GEDI L2B file path
output_path: string, csv output file path
"""
basename = os.path.basename(input_path)
if not basename.startswith('GEDI') or not basename.endswith('.h5'):
logging.error('Input path is not a GEDI filename: %s', input_path)
return
with h5py.File(input_path, 'r') as hdf_fh:
with open(output_path, 'w') as csv_fh:
write_csv(hdf_fh, csv_fh)
def write_csv(hdf_fh, csv_file):
"""Writes a single CSV file based on the contents of HDF file."""
is_first = True
# Iterating over metrics using a height profile defined for 30 slices.
for k in hdf_fh.keys():
if not k.startswith('BEAM'):
continue
print('\t', k)
df = pd.DataFrame()
for v in meta_variables:
if v.startswith('#'):
continue
name = v.split('/')[-1]
ds = hdf_fh[f'{k}/{v}']
df[name] = ds[:]
df[name].replace([np.inf, -np.inf], np.nan, inplace=True)
if ds.attrs.get('_FillValue') is not None:
# We need to use pd.NA that works with integer types (np.nan does not)
df[name].replace(ds.attrs.get('_FillValue'), pd.NA, inplace=True)
ds = hdf_fh[f'{k}/cover_z']
cover_z = pd.DataFrame(ds, columns=cover_names)
cover_z.replace(ds.attrs.get('_FillValue'), np.nan, True)
ds = hdf_fh[f'{k}/pai_z']
pai_z = | pd.DataFrame(ds, columns=pai_names) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrappers around spark that correspond to common pandas functions.
"""
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
Sized,
Tuple,
Type,
Union,
cast,
no_type_check,
)
from collections.abc import Iterable
from datetime import tzinfo
from functools import reduce
from io import BytesIO
import json
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like # type: ignore[attr-defined]
from pandas.tseries.offsets import DateOffset
import pyarrow as pa
import pyarrow.parquet as pq
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
TimestampNTZType,
DecimalType,
StringType,
DateType,
StructType,
DataType,
)
from pyspark import pandas as ps
from pyspark.pandas._typing import Axis, Dtype, Label, Name
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.utils import (
align_diff_frames,
default_session,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
validate_axis,
log_advice,
)
from pyspark.pandas.frame import DataFrame, _reduce_spark_multi
from pyspark.pandas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
HIDDEN_COLUMNS,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.indexes import Index, DatetimeIndex, TimedeltaIndex
from pyspark.pandas.indexes.multi import MultiIndex
__all__ = [
"from_pandas",
"range",
"read_csv",
"read_delta",
"read_table",
"read_spark_io",
"read_parquet",
"read_clipboard",
"read_excel",
"read_html",
"to_datetime",
"date_range",
"to_timedelta",
"timedelta_range",
"get_dummies",
"concat",
"melt",
"isna",
"isnull",
"notna",
"notnull",
"read_sql_table",
"read_sql_query",
"read_sql",
"read_json",
"merge",
"merge_asof",
"to_numeric",
"broadcast",
"read_orc",
]
def from_pandas(pobj: Union[pd.DataFrame, pd.Series, pd.Index]) -> Union[Series, DataFrame, Index]:
"""Create a pandas-on-Spark DataFrame, Series or Index from a pandas DataFrame, Series or Index.
This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,
but this also works with pandas Series and picks the index.
Parameters
----------
pobj : pandas.DataFrame or pandas.Series
pandas DataFrame or Series to read.
Returns
-------
Series or DataFrame
If a pandas Series is passed in, this function returns a pandas-on-Spark Series.
If a pandas DataFrame is passed in, this function returns a pandas-on-Spark DataFrame.
"""
if isinstance(pobj, pd.Series):
return Series(pobj)
elif isinstance(pobj, pd.DataFrame):
return DataFrame(pobj)
elif isinstance(pobj, pd.Index):
return DataFrame(pd.DataFrame(index=pobj)).index
else:
raise TypeError("Unknown data type: {}".format(type(pobj).__name__))
_range = range # built-in range
def range(
start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None
) -> DataFrame:
"""
Create a DataFrame with some range of numbers.
The resulting DataFrame has a single int64 column named `id`, containing elements in a range
from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter
(i.e. start) is specified, we treat it as the end value with the start value being 0.
This is similar to the range function in SparkSession and is used primarily for testing.
Parameters
----------
start : int
the start value (inclusive)
end : int, optional
the end value (exclusive)
step : int, optional, default 1
the incremental step
num_partitions : int, optional
the number of partitions of the DataFrame
Returns
-------
DataFrame
Examples
--------
When the first parameter is specified, we generate a range of values up till that number.
>>> ps.range(5)
id
0 0
1 1
2 2
3 3
4 4
When start, end, and step are specified:
>>> ps.range(start = 100, end = 200, step = 20)
id
0 100
1 120
2 140
3 160
4 180
"""
sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)
return DataFrame(sdf)
def read_csv(
path: str,
sep: str = ",",
header: Union[str, int, None] = "infer",
names: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
usecols: Optional[Union[List[int], List[str], Callable[[str], bool]]] = None,
squeeze: bool = False,
mangle_dupe_cols: bool = True,
dtype: Optional[Union[str, Dtype, Dict[str, Union[str, Dtype]]]] = None,
nrows: Optional[int] = None,
parse_dates: bool = False,
quotechar: Optional[str] = None,
escapechar: Optional[str] = None,
comment: Optional[str] = None,
encoding: Optional[str] = None,
**options: Any,
) -> Union[DataFrame, Series]:
"""Read CSV (comma-separated) file into DataFrame or Series.
Parameters
----------
path : str
The path string storing the CSV file to be read.
sep : str, default ‘,’
Delimiter to use. Must be a single character.
header : int, default ‘infer’
Whether to to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to `header=0` and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to `header=None`. Explicitly pass `header=0` to be
able to replace existing names
names : str or array-like, optional
List of column names to use. If file contains no header row, then you should
explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.
If a string is given, it should be a DDL-formatted string in Spark SQL, which is
preferred to avoid schema inference for better performance.
index_col: str or list of str, optional, default: None
Index column of table in Spark.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either be
positional (i.e. integer indices into the document columns) or strings that
correspond to column names provided either by the user in names or inferred
from the document header row(s).
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to `True`.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather
than 'X' ... 'X'. Passing in False will cause data to be overwritten if
there are duplicate names in the columns.
Currently only `True` is allowed.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object
together with suitable na_values settings to preserve and not interpret dtype.
nrows : int, default None
Number of rows to read from the CSV file.
parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.
Currently only `False` is allowed.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted items can include
the delimiter and it will be ignored.
escapechar : str (length 1), default None
One-character string used to escape delimiter
comment: str, optional
Indicates the line should not be parsed.
encoding: str, optional
Indicates the encoding to read file
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> ps.read_csv('data.csv') # doctest: +SKIP
"""
# For latin-1 encoding is same as iso-8859-1, that's why its mapped to iso-8859-1.
encoding_mapping = {"latin-1": "iso-8859-1"}
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols) # type: ignore[assignment]
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = default_session().read
reader.option("inferSchema", True)
reader.option("sep", sep)
if header == "infer":
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if quotechar is not None:
reader.option("quote", quotechar)
if escapechar is not None:
reader.option("escape", escapechar)
if comment is not None:
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
reader.options(**options)
if encoding is not None:
reader.option("encoding", encoding_mapping.get(encoding, encoding))
column_labels: Dict[Any, str]
if isinstance(names, str):
sdf = reader.schema(names).csv(path)
column_labels = {col: col for col in sdf.columns}
else:
sdf = reader.csv(path)
if is_list_like(names):
names = list(names)
if len(set(names)) != len(names):
raise ValueError("Found non-unique column index")
if len(names) != len(sdf.columns):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(sdf.schema), len(names))
)
column_labels = dict(zip(names, sdf.columns))
elif header is None:
column_labels = dict(enumerate(sdf.columns))
else:
column_labels = {col: col for col in sdf.columns}
if usecols is not None:
missing: List[Union[int, str]]
if callable(usecols):
column_labels = {
label: col for label, col in column_labels.items() if usecols(label)
}
missing = []
elif all(isinstance(col, int) for col in usecols):
usecols_ints = cast(List[int], usecols)
new_column_labels = {
label: col
for i, (label, col) in enumerate(column_labels.items())
if i in usecols_ints
}
missing = [
col
for col in usecols_ints
if (
col >= len(column_labels)
or list(column_labels)[col] not in new_column_labels
)
]
column_labels = new_column_labels
elif all(isinstance(col, str) for col in usecols):
new_column_labels = {
label: col for label, col in column_labels.items() if label in usecols
}
missing = [col for col in usecols if col not in new_column_labels]
column_labels = new_column_labels
else:
raise ValueError(
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, columns expected but not " "found: %s" % missing
)
if len(column_labels) > 0:
sdf = sdf.select([scol_for(sdf, col) for col in column_labels.values()])
else:
sdf = default_session().createDataFrame([], schema=StructType())
else:
sdf = default_session().createDataFrame([], schema=StructType())
column_labels = {}
if nrows is not None:
sdf = sdf.limit(nrows)
index_spark_column_names: List[str]
index_names: List[Label]
if index_col is not None:
if isinstance(index_col, (str, int)):
index_col = [index_col]
for col in index_col:
if col not in column_labels:
raise KeyError(col)
index_spark_column_names = [column_labels[col] for col in index_col]
index_names = [(col,) for col in index_col]
column_labels = {
label: col for label, col in column_labels.items() if label not in index_col
}
else:
log_advice(
"If `index_col` is not specified for `read_csv`, "
"the default index is attached which can cause additional overhead."
)
index_spark_column_names = []
index_names = []
psdf: DataFrame = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
column_labels=[
label if is_name_like_tuple(label) else (label,) for label in column_labels
],
data_spark_columns=[scol_for(sdf, col) for col in column_labels.values()],
)
)
if dtype is not None:
if isinstance(dtype, dict):
for col, tpe in dtype.items():
psdf[col] = psdf[col].astype(tpe)
else:
for col in psdf.columns:
psdf[col] = psdf[col].astype(dtype)
if squeeze and len(psdf.columns) == 1:
return first_series(psdf)
else:
return psdf
def read_json(
path: str, lines: bool = True, index_col: Optional[Union[str, List[str]]] = None, **options: Any
) -> DataFrame:
"""
Convert a JSON string to DataFrame.
Parameters
----------
path : string
File path
lines : bool, default True
Read the file as a json object per line. It should be always True for now.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/read_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')
>>> ps.read_json(
... path=r'%s/read_json/foo.json' % path, lineSep='___'
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
You can preserve the index in the roundtrip as below.
>>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/read_json/bar.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1 col 2
index
0 a b
1 c d
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_json`, "
"the default index is attached which can cause additional overhead."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
return read_spark_io(path, format="json", index_col=index_col, **options)
def read_delta(
path: str,
version: Optional[str] = None,
timestamp: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> DataFrame:
"""
Read a Delta Lake table on some file system and return a DataFrame.
If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.
Parameters
----------
path : string
Path to the Delta Lake table.
version : string, optional
Specifies the table version (based on Delta's internal transaction version) to read from,
using Delta's time travel feature. This sets Delta's 'versionAsOf' option. Note that
this parameter and `timestamp` parameter cannot be used together, otherwise it will raise a
`ValueError`.
timestamp : string, optional
Specifies the table version (based on timestamp) to read from,
using Delta's time travel feature. This must be a valid date or timestamp string in Spark,
and sets Delta's 'timestampAsOf' option. Note that this parameter and `version` parameter
cannot be used together, otherwise it will raise a `ValueError`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options
Additional options that can be passed onto Delta.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_delta
read_table
read_spark_io
read_parquet
Examples
--------
>>> ps.range(1).to_delta('%s/read_delta/foo' % path) # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 0
>>> ps.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path,
... mode='overwrite') # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 10
1 11
2 12
3 13
4 14
>>> ps.read_delta('%s/read_delta/foo' % path, version=0) # doctest: +SKIP
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(10, 15, num_partitions=1).to_delta(
... '%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
id
index
0 10
1 11
2 12
3 13
4 14
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_delta`, "
"the default index is attached which can cause additional overhead."
)
if version is not None and timestamp is not None:
raise ValueError("version and timestamp cannot be used together.")
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if version is not None:
options["versionAsOf"] = version
if timestamp is not None:
options["timestampAsOf"] = timestamp
return read_spark_io(path, format="delta", index_col=index_col, **options)
def read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:
"""
Read a Spark table and return a DataFrame.
Parameters
----------
name : string
Table name in Spark.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_table
read_delta
read_parquet
read_spark_io
Examples
--------
>>> ps.range(1).to_table('%s.my_table' % db)
>>> ps.read_table('%s.my_table' % db)
id
0 0
>>> ps.range(1).to_table('%s.my_table' % db, index_col="index")
>>> ps.read_table('%s.my_table' % db, index_col="index") # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_table`, "
"the default index is attached which can cause additional overhead."
)
sdf = default_session().read.table(name)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_spark_io(
path: Optional[str] = None,
format: Optional[str] = None,
schema: Union[str, "StructType"] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> DataFrame:
"""Load a DataFrame from a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
schema : string or StructType, optional
Input schema. If none, Spark tries to infer the schema automatically.
The schema can either be a Spark StructType, or a DDL-formatted string like
`col0 INT, col1 DOUBLE`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
DataFrame.to_spark_io
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_parquet
Examples
--------
>>> ps.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)
>>> ps.read_spark_io(
... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')
id
0 0
>>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,
... format='json', lineSep='__')
>>> ps.read_spark_io(
... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')
id
0 10
1 11
2 12
3 13
4 14
You can preserve the index in the roundtrip as below.
>>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,
... format='orc', index_col="index")
>>> ps.read_spark_io(
... path=r'%s/read_spark_io/data.orc' % path, format="orc", index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
sdf = default_session().read.load(path=path, format=format, schema=schema, **options)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_parquet(
path: str,
columns: Optional[List[str]] = None,
index_col: Optional[List[str]] = None,
pandas_metadata: bool = False,
**options: Any,
) -> DataFrame:
"""Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : string
File path
columns : list, default=None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
pandas_metadata : bool, default: False
If True, try to respect the metadata if the Parquet file is written from pandas.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_spark_io
Examples
--------
>>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)
>>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col="index")
>>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `read_parquet`, "
"the default index is attached which can cause additional overhead."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
if columns is not None:
columns = list(columns)
index_names = None
if index_col is None and pandas_metadata:
# Try to read pandas metadata
@no_type_check
@pandas_udf("index_col array<string>, index_names array<string>")
def read_index_metadata(pser: pd.Series) -> pd.DataFrame:
binary = pser.iloc[0]
metadata = pq.ParquetFile(pa.BufferReader(binary)).metadata.metadata
if b"pandas" in metadata:
pandas_metadata = json.loads(metadata[b"pandas"].decode("utf8"))
if all(isinstance(col, str) for col in pandas_metadata["index_columns"]):
index_col = []
index_names = []
for col in pandas_metadata["index_columns"]:
index_col.append(col)
for column in pandas_metadata["columns"]:
if column["field_name"] == col:
index_names.append(column["name"])
break
else:
index_names.append(None)
return pd.DataFrame({"index_col": [index_col], "index_names": [index_names]})
return pd.DataFrame({"index_col": [None], "index_names": [None]})
index_col, index_names = (
default_session()
.read.format("binaryFile")
.load(path)
.limit(1)
.select(read_index_metadata("content").alias("index_metadata"))
.select("index_metadata.*")
.head()
)
psdf = read_spark_io(path=path, format="parquet", options=options, index_col=index_col)
if columns is not None:
new_columns = [c for c in columns if c in psdf.columns]
if len(new_columns) > 0:
psdf = psdf[new_columns]
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_spark_columns, index_names = _get_index_map(sdf, index_col)
psdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
)
)
if index_names is not None:
psdf.index.names = index_names
return psdf
def read_clipboard(sep: str = r"\s+", **kwargs: Any) -> DataFrame:
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
See Also
--------
DataFrame.to_clipboard : Write text out to clipboard.
Returns
-------
parsed : DataFrame
"""
return cast(DataFrame, from_pandas(pd.read_clipboard(sep, **kwargs)))
def read_excel(
io: Union[str, Any],
sheet_name: Union[str, int, List[Union[str, int]], None] = 0,
header: Union[int, List[int]] = 0,
names: Optional[List] = None,
index_col: Optional[List[int]] = None,
usecols: Optional[Union[int, str, List[Union[int, str]], Callable[[str], bool]]] = None,
squeeze: bool = False,
dtype: Optional[Dict[str, Union[str, Dtype]]] = None,
engine: Optional[str] = None,
converters: Optional[Dict] = None,
true_values: Optional[Any] = None,
false_values: Optional[Any] = None,
skiprows: Optional[Union[int, List[int]]] = None,
nrows: Optional[int] = None,
na_values: Optional[Any] = None,
keep_default_na: bool = True,
verbose: bool = False,
parse_dates: Union[bool, List, Dict] = False,
date_parser: Optional[Callable] = None,
thousands: Optional[str] = None,
comment: Optional[str] = None,
skipfooter: int = 0,
convert_float: bool = True,
mangle_dupe_cols: bool = True,
**kwds: Any,
) -> Union[DataFrame, Series, Dict[str, Union[DataFrame, Series]]]:
"""
Read an Excel file into a pandas-on-Spark DataFrame or Series.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. The value URL must be available in Spark's DataFrameReader.
.. note::
If the underlying Spark is below 3.0, the parameter as a string is not supported.
You can use `ps.from_pandas(pd.read_excel(...))` as a workaround.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. pandas-on-Spark will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> ps.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> ps.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> ps.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> ps.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> ps.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 None 1
1 None 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> ps.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
def pd_read_excel(
io_or_bin: Any, sn: Union[str, int, List[Union[str, int]], None], sq: bool
) -> pd.DataFrame:
return pd.read_excel(
io=BytesIO(io_or_bin) if isinstance(io_or_bin, (bytes, bytearray)) else io_or_bin,
sheet_name=sn,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=sq,
dtype=dtype,
engine=engine,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates, # type: ignore[arg-type]
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
if isinstance(io, str):
# 'binaryFile' format is available since Spark 3.0.0.
binaries = default_session().read.format("binaryFile").load(io).select("content").head(2)
io_or_bin = binaries[0][0]
single_file = len(binaries) == 1
else:
io_or_bin = io
single_file = True
pdf_or_psers = pd_read_excel(io_or_bin, sn=sheet_name, sq=squeeze)
if single_file:
if isinstance(pdf_or_psers, dict):
return {
sn: cast(Union[DataFrame, Series], from_pandas(pdf_or_pser))
for sn, pdf_or_pser in pdf_or_psers.items()
}
else:
return cast(Union[DataFrame, Series], from_pandas(pdf_or_psers))
else:
def read_excel_on_spark(
pdf_or_pser: Union[pd.DataFrame, pd.Series],
sn: Union[str, int, List[Union[str, int]], None],
) -> Union[DataFrame, Series]:
if isinstance(pdf_or_pser, pd.Series):
pdf = pdf_or_pser.to_frame()
else:
pdf = pdf_or_pser
psdf = cast(DataFrame, from_pandas(pdf))
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(psdf._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema)
)
def output_func(pdf: pd.DataFrame) -> pd.DataFrame:
pdf = pd.concat(
[pd_read_excel(bin, sn=sn, sq=False) for bin in pdf[pdf.columns[0]]]
)
reset_index = pdf.reset_index()
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
pdf = reset_index
# Just positionally map the column names to given schema's.
return pdf.rename(columns=dict(zip(pdf.columns, return_schema.names)))
sdf = (
default_session()
.read.format("binaryFile")
.load(io)
.select("content")
.mapInPandas(lambda iterator: map(output_func, iterator), schema=return_schema)
)
psdf = DataFrame(psdf._internal.with_new_sdf(sdf))
if squeeze and len(psdf.columns) == 1:
return first_series(psdf)
else:
return psdf
if isinstance(pdf_or_psers, dict):
return {
sn: read_excel_on_spark(pdf_or_pser, sn) for sn, pdf_or_pser in pdf_or_psers.items()
}
else:
return read_excel_on_spark(pdf_or_psers, sheet_name)
def read_html(
io: Union[str, Any],
match: str = ".+",
flavor: Optional[str] = None,
header: Optional[Union[int, List[int]]] = None,
index_col: Optional[Union[int, List[int]]] = None,
skiprows: Optional[Union[int, List[int], slice]] = None,
attrs: Optional[Dict[str, str]] = None,
parse_dates: bool = False,
thousands: str = ",",
encoding: Optional[str] = None,
decimal: str = ".",
converters: Optional[Dict] = None,
na_values: Optional[Any] = None,
keep_default_na: bool = True,
displayed_only: bool = True,
) -> List[DataFrame]:
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~ps.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~ps.read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (example: use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
DataFrame.to_html
"""
pdfs = pd.read_html(
io=io,
match=match,
flavor=flavor,
header=header,
index_col=index_col,
skiprows=skiprows,
attrs=attrs,
parse_dates=parse_dates,
thousands=thousands,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
return cast(List[DataFrame], [from_pandas(pdf) for pdf in pdfs])
# TODO: add `coerce_float` and 'parse_dates' parameters
def read_sql_table(
table_name: str,
con: str,
schema: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
columns: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> DataFrame:
"""
Read SQL database table into a DataFrame.
Given a table name and a JDBC URI, returns a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default None
List of column names to select from SQL table.
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Examples
--------
>>> ps.read_sql_table('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
reader = default_session().read
reader.option("dbtable", table_name)
reader.option("url", con)
if schema is not None:
reader.schema(schema)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
psdf: DataFrame = DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
if columns is not None:
if isinstance(columns, str):
columns = [columns]
psdf = psdf[columns]
return psdf
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql_query(
sql: str, con: str, index_col: Optional[Union[str, List[str]]] = None, **options: Any
) -> DataFrame:
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default index will be used.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string SQL query
SQL query to be executed.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
Examples
--------
>>> ps.read_sql_query('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
reader = default_session().read
reader.option("query", sql)
reader.option("url", con)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql(
sql: str,
con: str,
index_col: Optional[Union[str, List[str]]] = None,
columns: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> DataFrame:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string
SQL query to be executed or a table name.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
>>> ps.read_sql('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
>>> ps.read_sql('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
striped = sql.strip()
if " " not in striped: # TODO: identify the table name or not more precisely.
return read_sql_table(sql, con, index_col=index_col, columns=columns, **options)
else:
return read_sql_query(sql, con, index_col=index_col, **options)
@no_type_check
def to_datetime(
arg,
errors: str = "raise",
format: Optional[str] = None,
unit: Optional[str] = None,
infer_datetime_format: bool = False,
origin: str = "unix",
):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
unit : string, default None
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = ps.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> ps.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> ps.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> ps.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = ps.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> import timeit
>>> timeit.timeit(
... lambda: repr(ps.to_datetime(s, infer_datetime_format=True)),
... number = 1) # doctest: +SKIP
0.35832712500000063
>>> timeit.timeit(
... lambda: repr(ps.to_datetime(s, infer_datetime_format=False)),
... number = 1) # doctest: +SKIP
0.8895321660000004
Using a unix epoch time
>>> ps.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> ps.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
Using a non-unix epoch origin
>>> ps.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None)
"""
# mappings for assembling units
# From pandas: pandas.core.tools.datetimes
_unit_map = {
"year": "year",
"years": "year",
"month": "month",
"months": "month",
"day": "day",
"days": "day",
"hour": "h",
"hours": "h",
"minute": "m",
"minutes": "m",
"second": "s",
"seconds": "s",
"ms": "ms",
"millisecond": "ms",
"milliseconds": "ms",
"us": "us",
"microsecond": "us",
"microseconds": "us",
}
def pandas_to_datetime(
pser_or_pdf: Union[pd.DataFrame, pd.Series], cols: Optional[List[str]] = None
) -> Series[np.datetime64]:
if isinstance(pser_or_pdf, pd.DataFrame):
pser_or_pdf = pser_or_pdf[cols]
return pd.to_datetime(
pser_or_pdf,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
if isinstance(arg, Series):
return arg.pandas_on_spark.transform_batch(pandas_to_datetime)
if isinstance(arg, DataFrame):
unit = {k: _unit_map[k.lower()] for k in arg.keys() if k.lower() in _unit_map}
unit_rev = {v: k for k, v in unit.items()}
list_cols = [unit_rev["year"], unit_rev["month"], unit_rev["day"]]
for u in ["h", "m", "s", "ms", "us"]:
value = unit_rev.get(u)
if value is not None and value in arg:
list_cols.append(value)
psdf = arg[list_cols]
return psdf.pandas_on_spark.transform_batch(pandas_to_datetime, list_cols)
return pd.to_datetime(
arg,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
def date_range(
start: Union[str, Any] = None,
end: Union[str, Any] = None,
periods: Optional[int] = None,
freq: Optional[Union[str, DateOffset]] = None,
tz: Optional[Union[str, tzinfo]] = None,
normalize: bool = False,
name: Optional[str] = None,
closed: Optional[str] = None,
**kwargs: Any,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> ps.date_range(start='1/1/2018', end='1/08/2018') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `start` and `periods`, the number of periods (days).
>>> ps.date_range(start='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `end` and `periods`, the number of periods (days).
>>> ps.date_range(end='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq=None)
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> ps.date_range(
... start='2018-04-24', end='2018-04-27', periods=3
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> ps.date_range(start='1/1/2018', periods=5, freq='M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq=None)
Multiples are allowed
>>> ps.date_range(start='1/1/2018', periods=5, freq='3M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`freq` can also be specified as an Offset object.
>>> ps.date_range(
... start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> ps.date_range(
... start='2017-01-01', end='2017-01-04', closed=None
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq=None)
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> ps.date_range(
... start='2017-01-01', end='2017-01-04', closed='left'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq=None)
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> ps.date_range(
... start='2017-01-01', end='2017-01-04', closed='right'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq=None)
"""
assert freq not in ["N", "ns"], "nanoseconds is not supported"
assert tz is None, "Localized DatetimeIndex is not supported"
return cast(
DatetimeIndex,
ps.from_pandas(
pd.date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs,
)
),
)
@no_type_check
def to_timedelta(
arg,
unit: Optional[str] = None,
errors: str = "raise",
):
"""
Convert argument to timedelta.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta.
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
Possible values:
* 'W'
* 'D' / 'days' / 'day'
* 'hours' / 'hour' / 'hr' / 'h'
* 'm' / 'minute' / 'min' / 'minutes' / 'T'
* 'S' / 'seconds' / 'sec' / 'second'
* 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'
* 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'
* 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'
Must not be specified when `arg` context strings and ``errors="raise"``.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
Returns
-------
ret : timedelta64, TimedeltaIndex or Series of timedelta64 if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
Notes
-----
If the precision is higher than nanoseconds, the precision of the duration is
truncated to nanoseconds for string inputs.
Examples
--------
Parsing a single string to a Timedelta:
>>> ps.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> ps.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
>>> ps.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> ps.to_timedelta(np.arange(5), unit='s') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> ps.to_timedelta(np.arange(5), unit='d') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
def pandas_to_timedelta(pser: pd.Series) -> np.timedelta64:
return pd.to_timedelta(
arg=pser,
unit=unit,
errors=errors,
)
if isinstance(arg, Series):
return arg.transform(pandas_to_timedelta)
else:
return pd.to_timedelta(
arg=arg,
unit=unit,
errors=errors,
)
def timedelta_range(
start: Union[str, Any] = None,
end: Union[str, Any] = None,
periods: Optional[int] = None,
freq: Optional[Union[str, DateOffset]] = None,
name: Optional[str] = None,
closed: Optional[str] = None,
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex, with day as the default frequency.
Parameters
----------
start : str or timedelta-like, optional
Left bound for generating timedeltas.
end : str or timedelta-like, optional
Right bound for generating timedeltas.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
Returns
-------
TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> ps.timedelta_range(start='1 day', periods=4) # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)
The closed parameter specifies which endpoint is included.
The default behavior is to include both endpoints.
>>> ps.timedelta_range(start='1 day', periods=4, closed='right') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)
The freq parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as ‘M’ (month end) will raise.
>>> ps.timedelta_range(start='1 day', end='2 days', freq='6H') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
Specify start, end, and periods; the frequency is generated automatically (linearly spaced).
>>> ps.timedelta_range(start='1 day', end='5 days', periods=4) # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
assert freq not in ["N", "ns"], "nanoseconds is not supported"
return cast(
TimedeltaIndex,
ps.from_pandas(
pd.timedelta_range(
start=start,
end=end,
periods=periods,
freq=freq,
name=name,
closed=closed,
)
),
)
def get_dummies(
data: Union[DataFrame, Series],
prefix: Optional[Union[str, List[str], Dict[str, str]]] = None,
prefix_sep: str = "_",
dummy_na: bool = False,
columns: Optional[Union[Name, List[Name]]] = None,
sparse: bool = False,
drop_first: bool = False,
dtype: Optional[Union[str, Dtype]] = None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables, also
known as one hot encoding.
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
In pandas-on-Spark, this value must be "False".
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
dummies : DataFrame
See Also
--------
Series.str.get_dummies
Examples
--------
>>> s = ps.Series(list('abca'))
>>> ps.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> df = ps.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]},
... columns=['A', 'B', 'C'])
>>> ps.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> ps.get_dummies(ps.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> ps.get_dummies(ps.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> ps.get_dummies(ps.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
if sparse is not False:
raise NotImplementedError("get_dummies currently does not support sparse")
if columns is not None:
if not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
if dtype is None:
dtype = "byte"
if isinstance(data, Series):
if prefix is not None:
prefix = [str(prefix)]
psdf = data.to_frame()
column_labels = psdf._internal.column_labels
remaining_columns = []
else:
if isinstance(prefix, str):
raise NotImplementedError(
"get_dummies currently does not support prefix as string types"
)
psdf = data.copy()
if columns is None:
column_labels = [
label
for label in psdf._internal.column_labels
if isinstance(
psdf._internal.spark_type_for(label), _get_dummies_default_accept_types
)
]
else:
if is_name_like_tuple(columns):
column_labels = [
label
for label in psdf._internal.column_labels
if label[: len(columns)] == columns
]
if len(column_labels) == 0:
raise KeyError(name_like_string(columns))
if prefix is None:
prefix = [
str(label[len(columns) :])
if len(label) > len(columns) + 1
else label[len(columns)]
if len(label) == len(columns) + 1
else ""
for label in column_labels
]
elif any(isinstance(col, tuple) for col in columns) and any(
not is_name_like_tuple(col) for col in columns
):
raise ValueError(
"Expected tuple, got {}".format(
type(set(col for col in columns if not is_name_like_tuple(col)).pop())
)
)
else:
column_labels = [
label
for key in columns
for label in psdf._internal.column_labels
if label == key or label[0] == key
]
if len(column_labels) == 0:
if columns is None:
return psdf
raise KeyError("{} not in index".format(columns))
if prefix is None:
prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]
column_labels_set = set(column_labels)
remaining_columns = [
(
psdf[label]
if psdf._internal.column_labels_level == 1
else psdf[label].rename(name_like_string(label))
)
for label in psdf._internal.column_labels
if label not in column_labels_set
]
if any(
not isinstance(psdf._internal.spark_type_for(label), _get_dummies_acceptable_types)
for label in column_labels
):
raise NotImplementedError(
"get_dummies currently only accept {} values".format(
", ".join(
[cast(Type[DataType], t).typeName() for t in _get_dummies_acceptable_types]
)
)
)
if prefix is not None and len(column_labels) != len(prefix):
raise ValueError(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
elif isinstance(prefix, dict):
prefix = [prefix[column_label[0]] for column_label in column_labels]
all_values = _reduce_spark_multi(
psdf._internal.spark_frame,
[F.collect_set(psdf._internal.spark_column_for(label)) for label in column_labels],
)
for i, label in enumerate(column_labels):
values = all_values[i]
if isinstance(values, np.ndarray):
values = values.tolist()
values = sorted(values)
if drop_first:
values = values[1:]
def column_name(v: Any) -> Name:
if prefix is None or cast(List[str], prefix)[i] == "":
return v
else:
return "{}{}{}".format(cast(List[str], prefix)[i], prefix_sep, v)
for value in values:
remaining_columns.append(
(psdf[label].notnull() & (psdf[label] == value))
.astype(dtype)
.rename(column_name(value))
)
if dummy_na:
remaining_columns.append(psdf[label].isnull().astype(dtype).rename(column_name(np.nan)))
return psdf[remaining_columns]
# TODO: there are many parameters to implement and support. See pandas's pd.concat.
def concat(
objs: List[Union[DataFrame, Series]],
axis: Axis = 0,
join: str = "outer",
ignore_index: bool = False,
sort: bool = False,
) -> Union[Series, DataFrame]:
"""
Concatenate pandas-on-Spark objects along a particular axis with optional set logic
along the other axes.
Parameters
----------
objs : a sequence of Series or DataFrame
Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
Combine two ``Series``.
>>> s1 = ps.Series(['a', 'b'])
>>> s2 = ps.Series(['c', 'd'])
>>> ps.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> ps.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = ps.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = ps.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> ps.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` and ``Series`` objects with different columns.
>>> ps.concat([df2, s1])
letter number 0
0 c 3.0 None
1 d 4.0 None
0 None NaN a
1 None NaN b
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``None`` values.
>>> df3 = ps.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> ps.concat([df1, df3])
letter number animal
0 a 1 None
1 b 2 None
0 c 3 cat
1 d 4 dog
Sort the columns.
>>> ps.concat([df1, df3], sort=True)
animal letter number
0 None a 1
1 None b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> ps.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
>>> df4 = ps.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
Combine with column axis.
>>> ps.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
>>> reset_option("compute.ops_on_diff_frames")
"""
if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(
objs, Iterable
): # TODO: support dict
raise TypeError(
"first argument must be an iterable of pandas-on-Spark "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
if len(cast(Sized, objs)) == 0:
raise ValueError("No objects to concatenate")
objs = list(filter(lambda obj: obj is not None, objs))
if len(objs) == 0:
raise ValueError("All objects passed were None")
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
raise TypeError(
"cannot concatenate object of type "
"'{name}"
"; only ps.Series "
"and ps.DataFrame are valid".format(name=type(objs).__name__)
)
if join not in ["inner", "outer"]:
raise ValueError("Only can inner (intersect) or outer (union) join the other axis.")
axis = validate_axis(axis)
psdf: DataFrame
if axis == 1:
psdfs: List[DataFrame] = [
obj.to_frame() if isinstance(obj, Series) else obj for obj in objs
]
level: int = min(psdf._internal.column_labels_level for psdf in psdfs)
psdfs = [
DataFrame._index_normalized_frame(level, psdf)
if psdf._internal.column_labels_level > level
else psdf
for psdf in psdfs
]
concat_psdf = psdfs[0]
column_labels: List[Label] = concat_psdf._internal.column_labels.copy()
psdfs_not_same_anchor = []
for psdf in psdfs[1:]:
duplicated = [label for label in psdf._internal.column_labels if label in column_labels]
if len(duplicated) > 0:
pretty_names = [name_like_string(label) for label in duplicated]
raise ValueError(
"Labels have to be unique; however, got duplicated labels %s." % pretty_names
)
column_labels.extend(psdf._internal.column_labels)
if same_anchor(concat_psdf, psdf):
concat_psdf = DataFrame(
concat_psdf._internal.with_new_columns(
[
concat_psdf._psser_for(label)
for label in concat_psdf._internal.column_labels
]
+ [psdf._psser_for(label) for label in psdf._internal.column_labels]
)
)
else:
psdfs_not_same_anchor.append(psdf)
if len(psdfs_not_same_anchor) > 0:
@no_type_check
def resolve_func(psdf, this_column_labels, that_column_labels):
raise AssertionError("This should not happen.")
for psdf in psdfs_not_same_anchor:
if join == "inner":
concat_psdf = align_diff_frames(
resolve_func,
concat_psdf,
psdf,
fillna=False,
how="inner",
)
elif join == "outer":
concat_psdf = align_diff_frames(
resolve_func,
concat_psdf,
psdf,
fillna=False,
how="full",
)
concat_psdf = concat_psdf[column_labels]
if ignore_index:
concat_psdf.columns = list(map(str, _range(len(concat_psdf.columns)))) # type: ignore[assignment]
if sort:
concat_psdf = concat_psdf.sort_index()
return concat_psdf
# Series, Series ...
# We should return Series if objects are all Series.
should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))
# DataFrame, Series ... & Series, Series ...
# In this case, we should return DataFrame.
new_objs: List[DataFrame] = []
num_series = 0
series_names = set()
for obj in objs:
if isinstance(obj, Series):
num_series += 1
series_names.add(obj.name)
new_objs.append(obj.to_frame(DEFAULT_SERIES_NAME))
else:
assert isinstance(obj, DataFrame)
new_objs.append(obj)
column_labels_levels: Set[int] = set(obj._internal.column_labels_level for obj in new_objs)
if len(column_labels_levels) != 1:
raise ValueError("MultiIndex columns should have the same levels")
# DataFrame, DataFrame, ...
# All Series are converted into DataFrame and then compute concat.
if not ignore_index:
indices_of_psdfs = [psdf.index for psdf in new_objs]
index_of_first_psdf = indices_of_psdfs[0]
for index_of_psdf in indices_of_psdfs:
if index_of_first_psdf.names != index_of_psdf.names:
raise ValueError(
"Index type and names should be same in the objects to concatenate. "
"You passed different indices "
"{index_of_first_psdf} and {index_of_psdf}".format(
index_of_first_psdf=index_of_first_psdf.names,
index_of_psdf=index_of_psdf.names,
)
)
column_labels_of_psdfs = [psdf._internal.column_labels for psdf in new_objs]
index_names_of_psdfs: List[List[Optional[Label]]]
if ignore_index:
index_names_of_psdfs = [[] for _ in new_objs]
else:
index_names_of_psdfs = [psdf._internal.index_names for psdf in new_objs]
if all(name == index_names_of_psdfs[0] for name in index_names_of_psdfs) and all(
idx == column_labels_of_psdfs[0] for idx in column_labels_of_psdfs
):
# If all columns are in the same order and values, use it.
psdfs = new_objs
else:
if join == "inner":
interested_columns = set.intersection(*map(lambda x: set(x), column_labels_of_psdfs))
# Keep the column order with its firsts DataFrame.
merged_columns = [
label for label in column_labels_of_psdfs[0] if label in interested_columns
]
# When multi-index column, although pandas is flaky if `join="inner" and sort=False`,
# always sort to follow the `join="outer"` case behavior.
if (len(merged_columns) > 0 and len(merged_columns[0]) > 1) or sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
psdfs = [psdf[merged_columns] for psdf in new_objs]
elif join == "outer":
merged_columns = []
for labels in column_labels_of_psdfs:
merged_columns.extend(label for label in labels if label not in merged_columns)
assert len(merged_columns) > 0
# Always sort when multi-index columns or there are more than two Series,
# and if there is only one Series, never sort.
sort = len(merged_columns[0]) > 1 or num_series > 1 or (num_series != 1 and sort)
if sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
psdfs = []
for psdf in new_objs:
columns_to_add = list(set(merged_columns) - set(psdf._internal.column_labels))
# TODO: NaN and None difference for missing values. pandas seems filling NaN.
sdf = psdf._internal.resolved_copy.spark_frame
for label in columns_to_add:
sdf = sdf.withColumn(name_like_string(label), SF.lit(None))
data_columns = psdf._internal.data_spark_column_names + [
name_like_string(label) for label in columns_to_add
]
psdf = DataFrame(
psdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in psdf._internal.index_spark_column_names
],
column_labels=(psdf._internal.column_labels + columns_to_add),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=(psdf._internal.data_fields + ([None] * len(columns_to_add))),
)
)
psdfs.append(psdf[merged_columns])
if ignore_index:
sdfs = [
psdf._internal.spark_frame.select(psdf._internal.data_spark_columns) for psdf in psdfs
]
else:
sdfs = [
psdf._internal.spark_frame.select(
psdf._internal.index_spark_columns + psdf._internal.data_spark_columns
)
for psdf in psdfs
]
concatenated = reduce(lambda x, y: x.union(y), sdfs)
if ignore_index:
index_spark_column_names = []
index_names = []
index_fields = []
else:
index_spark_column_names = psdfs[0]._internal.index_spark_column_names
index_names = psdfs[0]._internal.index_names
index_fields = psdfs[0]._internal.index_fields
result_psdf: DataFrame = DataFrame(
psdfs[0]._internal.copy(
spark_frame=concatenated,
index_spark_columns=[scol_for(concatenated, col) for col in index_spark_column_names],
index_names=index_names,
index_fields=index_fields,
data_spark_columns=[
scol_for(concatenated, col) for col in psdfs[0]._internal.data_spark_column_names
],
data_fields=None, # TODO: dtypes?
)
)
if should_return_series:
# If all input were Series, we should return Series.
if len(series_names) == 1:
name = series_names.pop()
else:
name = None
return first_series(result_psdf).rename(name)
else:
return result_psdf
def melt(
frame: DataFrame,
id_vars: Optional[Union[Name, List[Name]]] = None,
value_vars: Optional[Union[Name, List[Name]]] = None,
var_name: Optional[Union[str, List[str]]] = None,
value_name: str = "value",
) -> DataFrame:
return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)
melt.__doc__ = DataFrame.melt.__doc__
@no_type_check
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
Series.isna : Detect missing values in a Series.
Series.isnull : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
DataFrame.isnull : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Index.isnull : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> ps.isna('dog')
False
>>> ps.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> ps.isna(array)
array([[False, True, False],
[False, False, True]])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = ps.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})
>>> df
a b
0 ant dog
1 bee None
2 cat fly
>>> ps.isna(df)
a b
0 False False
1 False True
2 False False
>>> ps.isnull(df.b)
0 False
1 True
2 False
Name: b, dtype: bool
"""
# TODO: Add back:
# notnull : Boolean inverse of pandas.isnull.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.isnull()
else:
return pd.isnull(obj)
isnull = isna
@no_type_check
def notna(obj):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. NA values, such as None or
:attr:`numpy.NaN`, get mapped to False values.
Returns
-------
bool or array-like of bool
Mask of bool values for each element that
indicates whether an element is not an NA value.
See Also
--------
isna : Detect missing values for an array-like object.
Series.notna : Boolean inverse of Series.isna.
DataFrame.notnull : Boolean inverse of DataFrame.isnull.
Index.notna : Boolean inverse of Index.isna.
Index.notnull : Boolean inverse of Index.isnull.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = ps.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notnull()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ps.notna(ser)
0 True
1 True
2 False
dtype: bool
>>> ps.notna(ser.index)
True
"""
# TODO: Add back:
# Series.notnull :Boolean inverse of Series.isnull.
# DataFrame.notna :Boolean inverse of DataFrame.isna.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.notna()
else:
return pd.notna(obj)
notnull = notna
def merge(
obj: DataFrame,
right: DataFrame,
how: str = "inner",
on: Optional[Union[Name, List[Name]]] = None,
left_on: Optional[Union[Name, List[Name]]] = None,
right_on: Optional[Union[Name, List[Name]]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = ps.merge(df1, df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_psdf = ps.DataFrame({'A': [1, 2]})
>>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
return obj.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
)
def merge_asof(
left: Union[DataFrame, Series],
right: Union[DataFrame, Series],
on: Optional[Name] = None,
left_on: Optional[Name] = None,
right_on: Optional[Name] = None,
left_index: bool = False,
right_index: bool = False,
by: Optional[Union[Name, List[Name]]] = None,
left_by: Optional[Union[Name, List[Name]]] = None,
right_by: Optional[Union[Name, List[Name]]] = None,
suffixes: Tuple[str, str] = ("_x", "_y"),
tolerance: Optional[Any] = None,
allow_exact_matches: bool = True,
direction: str = "backward",
) -> DataFrame:
"""
Perform an asof merge.
This is similar to a left-join except that we match on nearest
key rather than equal keys.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 3.3.0
Parameters
----------
left : DataFrame or named Series
right : DataFrame or named Series
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : bool
Use the index of the left DataFrame as the join key.
right_index : bool
Use the index of the right DataFrame as the join key.
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
right_by : column name
Field names to match on in the right DataFrame.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : int or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : bool, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than).
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
Returns
-------
merged : DataFrame
See Also
--------
merge : Merge with a database-style join.
merge_ordered : Merge with optional filling/interpolation.
Examples
--------
>>> left = ps.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = ps.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> ps.merge_asof(left, right, on="a").sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> ps.merge_asof(
... left,
... right,
... on="a",
... allow_exact_matches=False
... ).sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> ps.merge_asof(
... left,
... right,
... on="a",
... direction="forward"
... ).sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> ps.merge_asof(
... left,
... right,
... on="a",
... direction="nearest"
... ).sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = ps.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = ps.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> ps.merge_asof(left, right, left_index=True, right_index=True).sort_index()
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes = ps.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.030"),
... pd.Timestamp("2016-05-25 13:30:00.041"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.049"),
... pd.Timestamp("2016-05-25 13:30:00.072"),
... pd.Timestamp("2016-05-25 13:30:00.075")
... ],
... "ticker": [
... "GOOG",
... "MSFT",
... "MSFT",
... "MSFT",
... "GOOG",
... "AAPL",
... "GOOG",
... "MSFT"
... ],
... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
... }
... )
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades = ps.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.038"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048")
... ],
... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
... "quantity": [75, 155, 100, 100, 100]
... }
... )
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> ps.merge_asof(
... trades, quotes, on="time", by="ticker"
... ).sort_values(["time", "ticker", "price"]).reset_index(drop=True)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
4 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
We only asof within 2ms between the quote time and the trade time
>>> ps.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=F.expr("INTERVAL 2 MILLISECONDS") # pd.Timedelta("2ms")
... ).sort_values(["time", "ticker", "price"]).reset_index(drop=True)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
4 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> ps.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=F.expr("INTERVAL 10 MILLISECONDS"), # pd.Timedelta("10ms")
... allow_exact_matches=False
... ).sort_values(["time", "ticker", "price"]).reset_index(drop=True)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
4 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
"""
def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]:
if os is None:
return []
elif is_name_like_tuple(os):
return [cast(Label, os)]
elif is_name_like_value(os):
return [(os,)]
else:
return [o if is_name_like_tuple(o) else (o,) for o in os]
if isinstance(left, Series):
left = left.to_frame()
if isinstance(right, Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" and "right_on", '
"not a combination of both."
)
left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(on)))
right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(on)))
else:
if left_index:
if isinstance(left.index, MultiIndex):
raise ValueError("left can only have one index")
left_as_of_names = left._internal.index_spark_column_names
else:
left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(left_on)))
if right_index:
if isinstance(right.index, MultiIndex):
raise ValueError("right can only have one index")
right_as_of_names = right._internal.index_spark_column_names
else:
right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(right_on)))
if left_as_of_names and not right_as_of_names:
raise ValueError("Must pass right_on or right_index=True")
if right_as_of_names and not left_as_of_names:
raise ValueError("Must pass left_on or left_index=True")
if not left_as_of_names and not right_as_of_names:
common = list(left.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
"No common columns to perform merge on. Merge options: "
"left_on=None, right_on=None, left_index=False, right_index=False"
)
left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(common)))
right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(common)))
if len(left_as_of_names) != 1:
raise ValueError("can only asof on a key for left")
if len(right_as_of_names) != 1:
raise ValueError("can only asof on a key for right")
if by:
if left_by or right_by:
raise ValueError('Can only pass argument "on" OR "left_by" and "right_by".')
left_join_on_names = list(map(left._internal.spark_column_name_for, to_list(by)))
right_join_on_names = list(map(right._internal.spark_column_name_for, to_list(by)))
else:
left_join_on_names = list(map(left._internal.spark_column_name_for, to_list(left_by)))
right_join_on_names = list(map(right._internal.spark_column_name_for, to_list(right_by)))
if left_join_on_names and not right_join_on_names:
raise ValueError("missing right_by")
if right_join_on_names and not left_join_on_names:
raise ValueError("missing left_by")
if len(left_join_on_names) != len(right_join_on_names):
raise ValueError("left_by and right_by must be same length")
# We should distinguish the name to avoid ambiguous column name after merging.
right_prefix = "__right_"
right_as_of_names = [right_prefix + right_as_of_name for right_as_of_name in right_as_of_names]
right_join_on_names = [
right_prefix + right_join_on_name for right_join_on_name in right_join_on_names
]
left_as_of_name = left_as_of_names[0]
right_as_of_name = right_as_of_names[0]
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = sdf.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS,
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[field.copy(name=rename(field.name)) for field in internal.index_fields],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
left_internal = left._internal.resolved_copy
right_internal = resolve(right._internal, "right")
left_table = left_internal.spark_frame.alias("left_table")
right_table = right_internal.spark_frame.alias("right_table")
left_as_of_column = scol_for(left_table, left_as_of_name)
right_as_of_column = scol_for(right_table, right_as_of_name)
if left_join_on_names:
left_join_on_columns = [scol_for(left_table, label) for label in left_join_on_names]
right_join_on_columns = [scol_for(right_table, label) for label in right_join_on_names]
on = reduce(
lambda l, r: l & r,
[l == r for l, r in zip(left_join_on_columns, right_join_on_columns)],
)
else:
on = None
if tolerance is not None and not isinstance(tolerance, Column):
tolerance = SF.lit(tolerance)
as_of_joined_table = left_table._joinAsOf(
right_table,
leftAsOfColumn=left_as_of_column,
rightAsOfColumn=right_as_of_column,
on=on,
how="left",
tolerance=tolerance,
allowExactMatches=allow_exact_matches,
direction=direction,
)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)
exprs = []
data_columns = []
column_labels = []
left_scol_for = lambda label: scol_for(
as_of_joined_table, left_internal.spark_column_name_for(label)
)
right_scol_for = lambda label: scol_for(
as_of_joined_table, right_internal.spark_column_name_for(label)
)
for label in left_internal.column_labels:
col = left_internal.spark_column_name_for(label)
scol = left_scol_for(label)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if spark_column_name in (left_as_of_names + left_join_on_names) and (
(right_prefix + spark_column_name) in (right_as_of_names + right_join_on_names)
):
pass
else:
col = col + left_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + left_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
for label in right_internal.column_labels:
# recover `right_prefix` here.
col = right_internal.spark_column_name_for(label)[len(right_prefix) :]
scol = right_scol_for(label).alias(col)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if spark_column_name in left_as_of_names + left_join_on_names and (
(right_prefix + spark_column_name) in right_as_of_names + right_join_on_names
):
continue
else:
col = col + right_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + right_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
# Retain indices if they are used for joining
if left_index or right_index:
index_spark_column_names = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(len(left_internal.index_spark_column_names))
]
left_index_scols = [
scol.alias(name)
for scol, name in zip(left_internal.index_spark_columns, index_spark_column_names)
]
exprs.extend(left_index_scols)
index_names = left_internal.index_names
else:
index_spark_column_names = []
index_names = []
selected_columns = as_of_joined_table.select(*exprs)
internal = InternalFrame(
spark_frame=selected_columns,
index_spark_columns=[scol_for(selected_columns, col) for col in index_spark_column_names],
index_names=index_names,
column_labels=column_labels,
data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],
)
return DataFrame(internal)
@no_type_check
def to_numeric(arg, errors="raise"):
"""
Convert argument to a numeric type.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'raise', 'coerce'}, default 'raise'
* If 'coerce', then invalid parsing will be set as NaN.
* If 'raise', then invalid parsing will raise an exception.
* If 'ignore', then invalid parsing will return the input.
.. note:: 'ignore' doesn't work yet when `arg` is pandas-on-Spark Series.
Returns
-------
ret : numeric if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> psser = ps.Series(['1.0', '2', '-3'])
>>> psser
0 1.0
1 2
2 -3
dtype: object
>>> ps.to_numeric(psser)
0 1.0
1 2.0
2 -3.0
dtype: float32
If given Series contains invalid value to cast float, just cast it to `np.nan`
when `errors` is set to "coerce".
>>> psser = ps.Series(['apple', '1.0', '2', '-3'])
>>> psser
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> ps.to_numeric(psser, errors="coerce")
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float32
Also support for list, tuple, np.array, or a scalar
>>> ps.to_numeric(['1.0', '2', '-3'])
array([ 1., 2., -3.])
>>> ps.to_numeric(('1.0', '2', '-3'))
array([ 1., 2., -3.])
>>> ps.to_numeric(np.array(['1.0', '2', '-3']))
array([ 1., 2., -3.])
>>> ps.to_numeric('1.0')
1.0
"""
if isinstance(arg, Series):
if errors == "coerce":
return arg._with_new_scol(arg.spark.column.cast("float"))
elif errors == "raise":
scol = arg.spark.column
scol_casted = scol.cast("float")
cond = F.when(
F.assert_true(scol.isNull() | scol_casted.isNotNull()).isNull(), scol_casted
)
return arg._with_new_scol(cond)
elif errors == "ignore":
raise NotImplementedError("'ignore' is not implemented yet, when the `arg` is Series.")
else:
raise ValueError("invalid error value specified")
else:
return | pd.to_numeric(arg, errors=errors) | pandas.to_numeric |
from util import load_csv_as_dataframe
import pandas as pd
from feature_extractor import FeatureExtractor
import numpy as np
import pickle
from dateutil import parser
import monthdelta
import csv
from util import read_csv_file
from dateutil.relativedelta import relativedelta
import timeit
class LeaderBoard():
def __init__(self, lb1_lb2_file='data/LeaderBoardData/TADPOLE_LB1_LB2.csv', d1_file='data/d1_data.csv'):
lb1_lb2 = load_csv_as_dataframe(lb1_lb2_file)
lb1_lb2['LB1'] = pd.to_numeric(lb1_lb2['LB1'])
lb1_lb2['LB2'] = | pd.to_numeric(lb1_lb2['LB2']) | pandas.to_numeric |
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import io
from unittest.mock import patch
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from kedro.extras.datasets.pandas import CSVBlobDataSet
from kedro.io import DataSetError, Version
from kedro.io.core import VersionNotFoundError, generate_timestamp
TEST_FILE_NAME = "test.csv"
TEST_CONTAINER_NAME = "test_bucket"
TEST_CREDENTIALS = {"account_name": "ACCOUNT_NAME", "account_key": "ACCOUNT_KEY"}
@pytest.fixture(params=[None])
def load_version(request):
return request.param
@pytest.fixture(params=[None])
def save_version(request):
return request.param or generate_timestamp()
@pytest.fixture()
def dummy_dataframe():
return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
@pytest.fixture
def blob_csv_data_set():
def make_data_set(load_args=None, save_args=None):
return CSVBlobDataSet(
filepath=TEST_FILE_NAME,
container_name=TEST_CONTAINER_NAME,
blob_to_text_args={"to_extra": 42},
blob_from_text_args={"from_extra": 42},
credentials=TEST_CREDENTIALS,
load_args=load_args,
save_args=save_args,
)
return make_data_set
@pytest.fixture
def versioned_blob_csv_data_set(load_version, save_version):
return CSVBlobDataSet(
filepath=TEST_FILE_NAME,
container_name=TEST_CONTAINER_NAME,
credentials=TEST_CREDENTIALS,
blob_to_text_args={"to_extra": 41},
blob_from_text_args={"from_extra": 42},
version=Version(load_version, save_version),
)
@pytest.fixture
def save_path(save_version):
return "{0}/{1}/{0}".format(TEST_FILE_NAME, save_version)
class TestCSVBlobDataSetVersioned:
# pylint: disable=too-many-arguments
def test_save(
self,
versioned_blob_csv_data_set,
dummy_dataframe,
save_path,
save_version,
mocker,
):
"""Test that saving saves with a correct version"""
mocker.patch.object(
versioned_blob_csv_data_set,
"_lookup_load_version",
return_value=save_version,
)
mocker.patch.object(
versioned_blob_csv_data_set._blob_service, "exists", return_value=False
)
save_mock = mocker.patch.object(
versioned_blob_csv_data_set._blob_service, "create_blob_from_text"
)
versioned_blob_csv_data_set.save(dummy_dataframe)
save_mock.assert_called_with(
container_name=TEST_CONTAINER_NAME,
blob_name=save_path,
text=dummy_dataframe.to_csv(index=False),
from_extra=42,
)
def test_load(self, mocker, versioned_blob_csv_data_set):
mocked_load_version = "mocked_load_version"
mocker.patch.object(
versioned_blob_csv_data_set,
"_lookup_load_version",
return_value=mocked_load_version,
)
get_blob_mock = mocker.patch.object(
versioned_blob_csv_data_set._blob_service,
"get_blob_to_text",
return_value=BlobMock(),
)
# load_mock.return_value = TEST_FILE_NAME
# get_blob_mock.return_value = BlobMock()
result = versioned_blob_csv_data_set.load()
get_blob_mock.assert_called_once_with(
container_name=TEST_CONTAINER_NAME,
blob_name="{f}/{lv}/{f}".format(f=TEST_FILE_NAME, lv=mocked_load_version),
to_extra=41,
)
expected = pd.read_csv(io.StringIO(BlobMock().content))
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import datetime
import os
import re
import requests
import urllib.parse
import time
from bs4 import BeautifulSoup
import html2text
import numpy as np
import pandas
search_key_word = 'climate'
search_key = 's'
url = r'https://thebfd.co.nz/'
link_list_data_file_path = 'url-data.csv'
delay_time_min = 0.
delay_time_max = 0.1
quick_save_period = 10
def read_page_entries(page_soup, time_stamp, page_index, page_lists, do_print = True):
entries = page_soup.find_all('div', 'td_module_16')
if(do_print):
print(f'Page {page_index} has {len(entries)} entries')
for entry_index, entry in enumerate(entries):
title_html = entry.find('h3', 'entry-title')
link_html = title_html.find('a')
page_lists['time_stamp'].append(time_stamp)
page_lists['page'].append(page_index)
page_lists['index_in_page'].append(entry_index)
page_lists['title'].append(link_html.attrs['title'])
page_lists['link'].append(link_html.attrs['href'])
def quick_save(page_lists, data_file_path):
print(f'saving...')
data = | pandas.DataFrame(page_lists) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
from vega_datasets import data
df = pd.read_csv('suicide_population.csv')
def getCountry(s):
# Get country name from country-year string
country = ""
return country.join(re.findall(r"\D",s))
def getYear(s):
# Get year from country-year string
year = ""
return year.join(re.findall(r"\d",s))
def getSumData():
df = | pd.read_csv('suicide_population.csv') | pandas.read_csv |
#Rule 24 - Description and text cannot be same.
def description_text(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
file_name="Description_text_not_same.py"
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Description_text_not_same"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
print('true test-----------------------------------',files_to_apply=='ALL' , fleName + ".xlsx" in files_to_apply, files_to_apply=='ALL' or fleName + ".xlsx" in files_to_apply)
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index,row in df.iterrows():
text=row['TEXT']
description=row['DESCRIPTION']
if(description==text):
entry=[index,fleName,'Both description and text have same contents']
print('The row '+str(index)+' in the file '+fleName+' have same contents in both description and text')
data.append(entry)
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if(ExcelFile(target).sheet_names[0] == 'Sheet1'):
with ExcelWriter(target, engine='openpyxl', mode='w') as writer:
df1.to_excel(writer,sheet_name=rule,index=False)
else:
with | ExcelWriter(target, engine='openpyxl', mode='a') | pandas.ExcelWriter |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual( | ct('10ms') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
import json
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State, MATCH
import plotly.express as px
import pandas as pd
## DATA FROM https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
data_urls = {
"cases": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv",
"death": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv",
"recovery": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv",
}
def _read_and_melt(url):
df = | pd.read_csv(url) | pandas.read_csv |
import logging
import os
import os.path as op
import sys
from copy import copy, deepcopy
import cobra.flux_analysis
import cobra.manipulation
import numpy as np
import pandas as pd
from Bio import SeqIO
from cobra.core import DictList
from slugify import Slugify
import ssbio.core.modelpro
import ssbio.databases.ncbi
import ssbio.databases.patric
import ssbio.protein.sequence.properties.residues
import ssbio.protein.sequence.utils.alignment
import ssbio.protein.sequence.utils.blast
import ssbio.protein.sequence.utils.fasta
from ssbio import utils
from ssbio.core.object import Object
from ssbio.pipeline.gempro import GEMPRO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from IPython.display import clear_output
have_ipython = True
from tqdm import tqdm_notebook as tqdm
except ImportError:
have_ipython = False
from tqdm import tqdm
date = utils.Date()
custom_slugify = Slugify(safe_chars='-_.')
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
class ATLAS(Object):
"""Class to represent an ATLAS workflow to carry out multi-strain comparisons
Main steps are:
#. Strain-specific model construction based on orthologous genes & systems modeling
#. Phylogenetic analysis to pick out important genes
#. GEM-PRO of the "base strain"
#. Structure property calculation & integrated structural systems analysis
Each step may generate a report and also request additional files if something is missing
"""
def __init__(self, atlas_name, root_dir, reference_gempro, reference_genome_path=None, description=None):
"""Prepare a GEM-PRO model for ATLAS analysis
Args:
atlas_name (str): Name of your ATLAS project
root_dir (str): Path to where the folder named after ``atlas_name`` will be created.
reference_gempro (GEMPRO): GEM-PRO model to use as the reference genome
reference_genome_path (str): Path to reference genome FASTA file
description (str): Optional string to describe your project
"""
Object.__init__(self, id=atlas_name, description=description)
# Create directories
self._root_dir = None
self.root_dir = root_dir
self.strains = DictList()
self.df_orthology_matrix = pd.DataFrame()
# Mark if the orthology matrix has gene IDs (thus we need to retrieve seqs from the genome file) or if
# it is in the orthology matrix itself
self._orthology_matrix_has_sequences = False
# Load the GEM-PRO (could be a model, could just be a list of genes)
# Check if there is a genome file associated with this model - if not, write all sequences and use that
self.reference_gempro = reference_gempro
if not reference_genome_path and not self.reference_gempro.genome_path:
self.reference_gempro.genome_path = self.reference_gempro.write_representative_sequences_file(outname=self.reference_gempro.id)
else:
self.reference_gempro.genome_path = reference_genome_path
# TODO: must also check if reference_genome_path gene IDs can be matched to the reference_gempro
# Also create an attribute
self._empty_reference_gempro = None
if self.reference_gempro.model:
# If there is a SBML model associated with the GEMPRO, copy that model
self._empty_reference_gempro = GEMPRO(gem_name='Copied reference GEM-PRO', gem=self.reference_gempro.model.copy())
# Reset the GenePro attributes
for x in self._empty_reference_gempro.genes:
x.reset_protein()
else:
# Otherwise, just copy the list of genes over and rename the IDs
strain_genes = [x.id for x in self.reference_gempro.genes]
if len(strain_genes) == 0:
raise ValueError('GEM-PRO has no genes, unable to run multi-strain analysis')
self._empty_reference_gempro = GEMPRO(gem_name='Copied reference GEM-PRO', genes_list=strain_genes)
@property
def root_dir(self):
"""str: Directory where ATLAS project folder named after the attribute ``base_dir`` is located"""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
raise ValueError('No path specified')
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.info('Changing root directory of project "{}" from {} to {}'.format(self.id, self.root_dir, path))
if not op.exists(op.join(path, self.id)):
raise IOError('Project "{}" does not exist in folder {}'.format(self.id, path))
else:
log.info('Creating project directory in folder {}'.format(path))
self._root_dir = path
for d in [self.base_dir, self.model_dir, self.data_dir,
self.sequences_dir, self.sequences_by_gene_dir, self.sequences_by_organism_dir]:
ssbio.utils.make_dir(d)
log.info('{}: project location'.format(self.base_dir))
@property
def base_dir(self):
"""str: ATLAS project folder"""
if self.root_dir:
return op.join(self.root_dir, self.id)
else:
return None
@property
def model_dir(self):
"""str: Directory where strain-specific GEMs are stored"""
if self.base_dir:
return op.join(self.base_dir, 'model')
else:
return None
@property
def data_dir(self):
"""str: Directory where all data (dataframes and more) will be stored"""
if self.base_dir:
return op.join(self.base_dir, 'data')
else:
return None
@property
def sequences_dir(self):
"""str: Base directory for genome protein sequences and alignments"""
if self.base_dir:
return op.join(self.base_dir, 'sequences')
else:
return None
@property
def sequences_by_gene_dir(self):
"""str: Directory where all gene specific information and pairwise alignments are stored"""
if self.sequences_dir:
return op.join(self.sequences_dir, 'by_gene')
else:
return None
@property
def sequences_by_organism_dir(self):
"""str: Directory where all strain specific genome and BLAST files are stored"""
if self.sequences_dir:
return op.join(self.sequences_dir, 'by_organism')
else:
return None
# def _copy_reference_gempro(self, new_id):
# """Copy the base strain GEM-PRO into a new GEM-PRO with a specified ID.
#
# Appends the model to the strains attribute.
#
# Args:
# new_id (str): New ID to be assigned to the copied model
#
# Returns:
# GEMPRO: copied GEM-PRO to represent the new strain
#
# """
# logging.disable(logging.WARNING)
# if self.reference_gempro.model:
# # If there is a SBML model associated with the GEMPRO, copy that model
# copied_model = GEMPRO(gem_name=new_id, gem=self._model_to_copy.model.copy())
# copied_model.model.id = new_id
# else:
# # Otherwise, just copy the list of genes over and rename the IDs
# strain_genes = [x.id for x in self._model_to_copy.genes]
# copied_model = GEMPRO(gem_name=new_id, genes_list=strain_genes)
# # Re-enable logging
# logging.disable(logging.NOTSET)
#
# self.strains.append(copied_model)
# log.debug('{}: new model copied from base model'.format(new_id))
#
# return self.strains.get_by_id(new_id)
def load_strain(self, strain_id, strain_genome_file):
"""Load a strain as a new GEM-PRO by its ID and associated genome file. Stored in the ``strains`` attribute.
Args:
strain_id (str): Strain ID
strain_genome_file (str): Path to strain genome file
"""
# logging.disable(logging.WARNING)
strain_gp = GEMPRO(gem_name=strain_id, genome_path=strain_genome_file, write_protein_fasta_files=False)
# logging.disable(logging.NOTSET)
self.strains.append(strain_gp)
return self.strains.get_by_id(strain_id)
def download_patric_genomes(self, ids, force_rerun=False):
"""Download genome files from PATRIC given a list of PATRIC genome IDs and load them as strains.
Args:
ids (str, list): PATRIC ID or list of PATRIC IDs
force_rerun (bool): If genome files should be downloaded again even if they exist
"""
ids = ssbio.utils.force_list(ids)
counter = 0
log.info('Downloading sequences from PATRIC...')
for patric_id in tqdm(ids):
f = ssbio.databases.patric.download_coding_sequences(patric_id=patric_id, seqtype='protein',
outdir=self.sequences_by_organism_dir,
force_rerun=force_rerun)
if f:
self.load_strain(patric_id, f)
counter += 1
log.debug('{}: downloaded sequence'.format(patric_id))
else:
log.warning('{}: unable to download sequence'.format(patric_id))
log.info('Created {} new strain GEM-PROs, accessible at "strains" attribute'.format(counter))
def get_orthology_matrix(self, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None, filter_condition='OR',
remove_strains_with_no_orthology=True,
remove_strains_with_no_differences=False,
remove_genes_not_in_base_model=True):
"""Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
"""
# TODO: document and test other cutoffs
# Get the path to the reference genome
r_file = self.reference_gempro.genome_path
bbh_files = {}
log.info('Running bidirectional BLAST and finding best bidirectional hits (BBH)...')
for strain_gempro in tqdm(self.strains):
g_file = strain_gempro.genome_path
# Run bidirectional BLAST
log.debug('{} vs {}: Running bidirectional BLAST'.format(self.reference_gempro.id, strain_gempro.id))
r_vs_g, g_vs_r = ssbio.protein.sequence.utils.blast.run_bidirectional_blast(reference=r_file,
other_genome=g_file,
dbtype='prot',
outdir=self.sequences_by_organism_dir)
# Using the BLAST files, find the BBH
log.debug('{} vs {}: Finding BBHs'.format(self.reference_gempro.id, strain_gempro.id))
bbh = ssbio.protein.sequence.utils.blast.calculate_bbh(blast_results_1=r_vs_g, blast_results_2=g_vs_r,
outdir=self.sequences_by_organism_dir)
bbh_files[strain_gempro.id] = bbh
# Make the orthologous genes matrix
log.info('Creating orthology matrix from BBHs...')
ortho_matrix = ssbio.protein.sequence.utils.blast.create_orthology_matrix(r_name=self.reference_gempro.id,
genome_to_bbh_files=bbh_files,
pid_cutoff=pid_cutoff,
bitscore_cutoff=bitscore_cutoff,
evalue_cutoff=evalue_cutoff,
filter_condition=filter_condition,
outname='{}_{}_orthology.csv'.format(self.reference_gempro.id, 'prot'),
outdir=self.data_dir)
log.info('Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.'.format(ortho_matrix))
self.df_orthology_matrix = pd.read_csv(ortho_matrix, index_col=0)
# Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes
self._filter_orthology_matrix(remove_strains_with_no_orthology=remove_strains_with_no_orthology,
remove_strains_with_no_differences=remove_strains_with_no_differences,
remove_genes_not_in_base_model=remove_genes_not_in_base_model)
# def load_manual_orthology_matrix(self, df, clean_names=True,
# remove_strains_with_no_orthology=True,
# remove_strains_with_no_differences=False,
# remove_genes_not_in_base_model=True):
# """Load a manually curated orthology matrix to use in ATLAS. Genes = rows, strains = columns.
#
# Args:
# df (DataFrame): Pandas DataFrame with genes as the rows and strains as the columns
# clean_names (bool): Remove unwanted characters from gene names and strain IDs
# remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
# remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
# Default is False because since orthology is found using a PID cutoff, all genes may be present but
# differences may be on the sequence level.
# remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
# base model. This happens if we use a genome file for our model that has other genes in it.
#
# """
# self._orthology_matrix_has_sequences = True
#
# if clean_names:
# new_rows = [custom_slugify(x) for x in df.index]
# new_cols = [custom_slugify(y) for y in df.columns]
# df.index = new_rows
# df.columns = new_cols
#
# self.df_orthology_matrix = df
#
# # Make the copies of the base model
# for strain_id in tqdm(self.df_orthology_matrix.columns):
# self._copy_reference_gempro(new_id=strain_id)
#
# # Filter the strains and orthology matrix
# self._filter_orthology_matrix(remove_strains_with_no_orthology=remove_strains_with_no_orthology,
# remove_strains_with_no_differences=remove_strains_with_no_differences,
# remove_genes_not_in_base_model=remove_genes_not_in_base_model)
def _filter_orthology_matrix(self,
remove_strains_with_no_orthology=True,
remove_strains_with_no_differences=False,
remove_genes_not_in_base_model=True):
"""Filters the orthology matrix by removing genes not in our base model, and also
removes strains from the analysis which have: 0 orthologous genes or no difference from the base strain.
Args:
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
"""
if len(self.df_orthology_matrix) == 0:
raise RuntimeError('Empty orthology matrix')
initial_num_strains = len(self.strains)
# Adding names to the row and column of the orthology matrix
self.df_orthology_matrix = self.df_orthology_matrix.rename_axis('gene').rename_axis("strain", axis="columns")
# Gene filtering (of the orthology matrix)
if remove_genes_not_in_base_model:
# Check for gene IDs that are in the model and not in the orthology matrix
# This is probably because: the CDS FASTA file for the base strain did not contain the correct ID
# for the gene and consequently was not included in the orthology matrix
# Save these and report them
reference_strain_gene_ids = [x.id for x in self.reference_gempro.genes]
self.missing_in_orthology_matrix = [x for x in reference_strain_gene_ids if x not in self.df_orthology_matrix.index.tolist()]
self.missing_in_reference_strain = [y for y in self.df_orthology_matrix.index.tolist() if y not in reference_strain_gene_ids]
# Filter the matrix for genes within our base model only
self.df_orthology_matrix = self.df_orthology_matrix[self.df_orthology_matrix.index.isin(reference_strain_gene_ids)]
log.info('Filtered orthology matrix for genes present in base model')
log.warning('{} genes are in your base model but not your orthology matrix, see the attribute "missing_in_orthology_matrix"'.format(len(self.missing_in_orthology_matrix)))
log.warning('{} genes are in the orthology matrix but not your base model, see the attribute "missing_in_reference_strain"'.format(len(self.missing_in_reference_strain)))
# Strain filtering
for strain_gempro in self.strains.copy():
if remove_strains_with_no_orthology:
if strain_gempro.id not in self.df_orthology_matrix.columns:
self.strains.remove(strain_gempro)
log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_gempro.id))
continue
elif self.df_orthology_matrix[strain_gempro.id].isnull().all():
self.strains.remove(strain_gempro)
log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_gempro.id))
continue
if remove_strains_with_no_differences:
not_in_strain = self.df_orthology_matrix[pd.isnull(self.df_orthology_matrix[strain_gempro.id])][strain_gempro.id].index.tolist()
if len(not_in_strain) == 0:
self.strains.remove(strain_gempro)
log.info('{}: strain has no differences from the base, removed from analysis.')
continue
log.info('{} strains to be analyzed, {} strains removed'.format(len(self.strains), initial_num_strains - len(self.strains)))
def _pare_down_model(self, strain_gempro, genes_to_remove):
"""Mark genes as non-functional in a GEM-PRO. If there is a COBRApy model associated with it, the
COBRApy method delete_model_genes is utilized to delete genes.
Args:
strain_gempro (GEMPRO): GEMPRO object
genes_to_remove (list): List of gene IDs to remove from the model
"""
# Filter out genes in genes_to_remove which do not show up in the model
strain_genes = [x.id for x in strain_gempro.genes]
genes_to_remove.extend(self.missing_in_orthology_matrix)
genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes)))
if len(genes_to_remove) == 0:
log.info('{}: no genes marked non-functional'.format(strain_gempro.id))
return
else:
log.debug('{}: {} genes to be marked non-functional'.format(strain_gempro.id, len(genes_to_remove)))
# If a COBRApy model exists, utilize the delete_model_genes method
if strain_gempro.model:
strain_gempro.model._trimmed = False
strain_gempro.model._trimmed_genes = []
strain_gempro.model._trimmed_reactions = {}
# Delete genes!
cobra.manipulation.delete_model_genes(strain_gempro.model, genes_to_remove)
if strain_gempro.model._trimmed:
log.info('{}: marked {} genes as non-functional, '
'deactivating {} reactions'.format(strain_gempro.id, len(strain_gempro.model._trimmed_genes),
len(strain_gempro.model._trimmed_reactions)))
# Otherwise, just mark the genes as non-functional
else:
for g in genes_to_remove:
strain_gempro.genes.get_by_id(g).functional = False
log.info('{}: marked {} genes as non-functional'.format(strain_gempro.id, len(genes_to_remove)))
def _load_strain_sequences(self, strain_gempro):
"""Load strain sequences from the orthology matrix into the base model for comparisons, and into the
strain-specific model itself.
"""
if self._orthology_matrix_has_sequences: # Load directly from the orthology matrix if it contains sequences
strain_sequences = self.df_orthology_matrix[strain_gempro.id].to_dict()
else: # Otherwise load from the genome file if the orthology matrix contains gene IDs
# Load the genome FASTA file
log.debug('{}: loading strain genome CDS file'.format(strain_gempro.genome_path))
strain_sequences = SeqIO.index(strain_gempro.genome_path, 'fasta')
for strain_gene in strain_gempro.genes:
if strain_gene.functional:
if self._orthology_matrix_has_sequences:
strain_gene_key = strain_gene.id
else:
# Pull the gene ID of the strain from the orthology matrix
strain_gene_key = self.df_orthology_matrix.loc[strain_gene.id, strain_gempro.id]
log.debug('{}: original gene ID to be pulled from strain fasta file'.format(strain_gene_key))
# # Load into the base strain for comparisons
ref_gene = self.reference_gempro.genes.get_by_id(strain_gene.id)
new_id = '{}_{}'.format(strain_gene.id, strain_gempro.id)
if ref_gene.protein.sequences.has_id(new_id):
log.debug('{}: sequence already loaded into reference model'.format(new_id))
continue
ref_gene.protein.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id,
set_as_representative=False)
log.debug('{}: loaded sequence into reference model'.format(new_id))
# Load into the strain GEM-PRO
strain_gene.protein.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id,
set_as_representative=True)
log.debug('{}: loaded sequence into strain model'.format(new_id))
def build_strain_specific_models(self, save_models=False):
"""Using the orthologous genes matrix, create and modify the strain specific models based on if orthologous
genes exist.
Also store the sequences directly in the reference GEM-PRO protein sequence attribute for the strains.
"""
if len(self.df_orthology_matrix) == 0:
raise RuntimeError('Empty orthology matrix')
# Create an emptied copy of the reference GEM-PRO
for strain_gempro in tqdm(self.strains):
log.debug('{}: building strain specific model'.format(strain_gempro.id))
# For each genome, load the metabolic model or genes from the reference GEM-PRO
logging.disable(logging.WARNING)
if self._empty_reference_gempro.model:
strain_gempro.load_cobra_model(self._empty_reference_gempro.model)
elif self._empty_reference_gempro.genes:
strain_gempro.genes = [x.id for x in self._empty_reference_gempro.genes]
logging.disable(logging.NOTSET)
# Get a list of genes which do not have orthology in the strain
not_in_strain = self.df_orthology_matrix[pd.isnull(self.df_orthology_matrix[strain_gempro.id])][strain_gempro.id].index.tolist()
# Mark genes non-functional
self._pare_down_model(strain_gempro=strain_gempro, genes_to_remove=not_in_strain)
# Load sequences into the base and strain models
self._load_strain_sequences(strain_gempro=strain_gempro)
if save_models:
cobra.io.save_json_model(model=strain_gempro.model,
filename=op.join(self.model_dir, '{}.json'.format(strain_gempro.id)))
strain_gempro.save_pickle(op.join(self.model_dir, '{}_gp.pckl'.format(strain_gempro.id)))
log.info('Created {} new strain-specific models and loaded in sequences'.format(len(self.strains)))
def align_orthologous_genes_pairwise(self, gapopen=10, gapextend=0.5):
"""For each gene in the base strain, run a pairwise alignment for all orthologous gene sequences to it."""
for ref_gene in tqdm(self.reference_gempro.genes):
if len(ref_gene.protein.sequences) > 1:
alignment_dir = op.join(self.sequences_by_gene_dir, ref_gene.id)
if not op.exists(alignment_dir):
os.mkdir(alignment_dir)
ref_gene.protein.pairwise_align_sequences_to_representative(gapopen=gapopen, gapextend=gapextend,
outdir=alignment_dir, parse=True)
def align_orthologous_genes_multiple(self):
"""For each gene in the base strain, run a multiple alignment to all orthologous strain genes"""
pass
def get_atlas_summary_df(self):
"""Create a single data frame which summarizes all genes per row.
Returns:
DataFrame: Pandas DataFrame of the results
"""
all_info = []
for g in self.reference_gempro.genes_with_a_representative_sequence:
info = {}
info['Gene_ID'] = g.id
info['Gene_name'] = g.name
# Protein object
p = g.protein
info['Protein_sequences'] = len(p.sequences)
info['Protein_structures'] = len(p.structures)
# SeqProp
rseq = p.representative_sequence
info['RepSeq_ID'] = rseq.id
info['RepSeq_sequence_length'] = rseq.seq_len
info['RepSeq_num_sequence_alignments'] = len([x for x in p.sequence_alignments if x.annotations['ssbio_type'] == 'seqalign'])
info['RepSeq_num_structure_alignments'] = len([x for x in p.sequence_alignments if x.annotations['ssbio_type'] == 'structalign'])
# SeqRecord annotations (properties calculated that summarize the whole sequence)
for annotation_name, annotation in rseq.annotations.items():
info['RepSeq_' + annotation_name] = annotation
# SeqRecord alignment annotations
all_num_mutations = []
all_num_deletions = []
all_len_deletions = []
all_num_insertions = []
all_len_insertions = []
all_percent_identity = []
all_percent_similarity = []
for aln in p.sequence_alignments:
# Gather the strain speicific stuff
if '{}_'.format(p.id) not in aln.annotations['b_seq']:
continue
info[aln.annotations['b_seq'].split('{}_'.format(p.id))[1]] = aln.annotations['percent_identity']
# Gather the percent identities/similarities
all_percent_identity.append(aln.annotations['percent_identity'])
all_percent_similarity.append(aln.annotations['percent_similarity'])
# Gather the number of residues that are mutated (filter for different mutations of same residue)
num_mutations = len(list(set([x[1] for x in aln.annotations['mutations']])))
all_num_mutations.append(num_mutations)
# Gather the number of deletions as well as the length of the deletion
if not aln.annotations['deletions']:
num_deletions = 0
len_deletions = [0]
else:
num_deletions = len(aln.annotations['deletions'])
len_deletions = [x[1] for x in aln.annotations['deletions']]
all_num_deletions.append(num_deletions)
# Get the total length of the deletion for this one strain
avg_len_deletions = np.sum(len_deletions)
all_len_deletions.append(avg_len_deletions)
# Gather the number of insertions as well as the length of the insertion
if not aln.annotations['insertions']:
num_insertions = 0
len_insertions = [0]
else:
num_insertions = len(aln.annotations['insertions'])
len_insertions = [x[1] for x in aln.annotations['insertions']]
all_num_insertions.append(num_insertions)
# Get the total length of insertion for this one strain
avg_len_insertions = np.sum(len_insertions)
all_len_insertions.append(avg_len_insertions)
info['ATLAS_mean_num_mutations'] = np.mean(all_num_mutations)
info['ATLAS_mean_num_deletions'] = np.mean(all_num_deletions)
info['ATLAS_mean_len_deletions'] = np.mean(all_len_deletions)
info['ATLAS_mean_num_insertions'] = np.mean(all_num_insertions)
info['ATLAS_mean_len_insertions'] = np.mean(all_len_insertions)
info['ATLAS_mean_percent_identity'] = np.mean(all_percent_identity)
info['ATLAS_mean_percent_similarity'] = np.mean(all_percent_similarity)
# Other mutation analysis
single, fingerprint = p.sequence_mutation_summary()
# Mutations that show up in more than 10% of strains
singles = []
for k, v in single.items():
k = [str(x) for x in k]
if len(v) / len(p.sequence_alignments) >= 0.01:
singles.append(''.join(k)) # len(v) is the number of strains which have this mutation
info['ATLAS_popular_mutations'] = ';'.join(singles)
# Mutation groups that show up in more than 10% of strains
allfingerprints = []
for k, v in fingerprint.items():
if len(v) / len(p.sequence_alignments) >= 0.01:
fingerprints = []
for m in k:
y = [str(x) for x in m]
fingerprints.append(''.join(y))
allfingerprints.append('-'.join(fingerprints))
info['ATLAS_popular_mutation_groups'] = ';'.join(allfingerprints)
# StructProp
rstruct = p.representative_structure
if rstruct:
if rstruct.structure_file:
info['RepStruct_ID'] = rstruct.id
info['RepStruct_is_experimental'] = rstruct.is_experimental
info['RepStruct_description'] = rstruct.description
info['RepStruct_repseq_coverage'] = p.representative_chain_seq_coverage
# ChainProp
rchain = p.representative_chain
info['RepChain_ID'] = rchain
# ChainProp SeqRecord annotations
rchain_sr = rstruct.chains.get_by_id(rchain).seq_record
for annotation_name, annotation in rchain_sr.annotations.items():
info['RepChain_' + annotation_name] = annotation
all_info.append(info)
cols = ['Gene_ID', 'Gene_name', 'Protein_sequences', 'Protein_structures',
'RepSeq_ID', 'RepSeq_sequence_length',
'RepSeq_num_sequence_alignments', 'RepSeq_num_structure_alignments',
'RepStruct_ID', 'RepChain_ID', 'RepStruct_description',
'RepStruct_is_experimental', 'RepStruct_repseq_coverage',
'ATLAS_mean_percent_identity', 'ATLAS_mean_percent_similarity', 'ATLAS_mean_num_mutations',
'ATLAS_popular_mutations', 'ATLAS_popular_mutation_groups', 'ATLAS_mean_num_deletions',
'ATLAS_mean_num_insertions', 'ATLAS_mean_len_deletions', 'ATLAS_mean_len_insertions',
'RepSeq_aromaticity', 'RepSeq_instability_index', 'RepSeq_isoelectric_point', 'RepSeq_molecular_weight',
'RepSeq_monoisotopic', 'RepSeq_num_tm_helix-tmhmm', 'RepSeq_percent_acidic', 'RepSeq_percent_aliphatic',
'RepSeq_percent_aromatic', 'RepSeq_percent_B-sspro8', 'RepSeq_percent_basic',
'RepSeq_percent_buried-accpro', 'RepSeq_percent_buried-accpro20', 'RepSeq_percent_C-sspro',
'RepSeq_percent_C-sspro8', 'RepSeq_percent_charged', 'RepSeq_percent_E-sspro',
'RepSeq_percent_E-sspro8', 'RepSeq_percent_exposed-accpro', 'RepSeq_percent_exposed-accpro20',
'RepSeq_percent_G-sspro8', 'RepSeq_percent_H-sspro', 'RepSeq_percent_H-sspro8',
'RepSeq_percent_helix_naive', 'RepSeq_percent_I-sspro8', 'RepSeq_percent_non-polar',
'RepSeq_percent_polar', 'RepSeq_percent_S-sspro8', 'RepSeq_percent_small',
'RepSeq_percent_strand_naive', 'RepSeq_percent_T-sspro8', 'RepSeq_percent_tiny',
'RepSeq_percent_turn_naive', 'RepChain_percent_B-dssp', 'RepChain_percent_C-dssp',
'RepChain_percent_E-dssp', 'RepChain_percent_G-dssp', 'RepChain_percent_H-dssp',
'RepChain_percent_I-dssp', 'RepChain_percent_S-dssp', 'RepChain_percent_T-dssp',
'RepChain_SSBOND-biopython']
cols.extend([x.id for x in self.strains])
df_atlas_summary = pd.DataFrame(all_info, columns=cols)
# Drop columns that don't have anything in them
df_atlas_summary.dropna(axis=1, how='all', inplace=True)
return df_atlas_summary
def get_atlas_per_gene_mutation_df(self, gene_id):
"""Create a single data frame which summarizes a gene and its mutations.
Args:
gene_id (str): Gene ID in the base model
Returns:
DataFrame: Pandas DataFrame of the results
"""
# TODO: also count: number of unique mutations (have to consider position, amino acid change)
# TODO: keep track of strain with most mutations, least mutations
# TODO: keep track of strains that conserve the length of the protein, others that extend or truncate it
# need statistical test for that too (how long is "extended"/"truncated"?)
# TODO: number of strains with at least 1 mutations
# TODO: number of strains with <5% mutated, 5-10%, etc
g = self.reference_gempro.genes.get_by_id(gene_id)
single, fingerprint = g.protein.sequence_mutation_summary(alignment_type='seqalign')
structure_type_suffix = 'NA'
appender = []
for k, strains in single.items():
# Mutations in the strain
to_append = {}
orig_res = k[0]
resnum = int(k[1])
mutated_res = k[2]
num_strains_mutated = len(strains)
strain_ids = [str(x.split(g.id + '_')[1]) for x in strains]
to_append['ref_residue'] = orig_res
to_append['ref_resnum'] = resnum
to_append['strain_residue'] = mutated_res
to_append['num_strains_mutated'] = num_strains_mutated
to_append['strains_mutated'] = ';'.join(strain_ids)
to_append['at_disulfide_bridge'] = False
# Residue properties
origres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(orig_res)
mutres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(mutated_res)
to_append['ref_residue_prop'] = origres_props
to_append['strain_residue_prop'] = mutres_props
# Grantham score - score a mutation based on biochemical properties
grantham_s, grantham_txt = ssbio.protein.sequence.properties.residues.grantham_score(orig_res, mutated_res)
to_append['grantham_score'] = grantham_s
to_append['grantham_annotation'] = grantham_txt
# Get all per residue annotations - predicted from sequence and calculated from structure
to_append.update(g.protein.get_residue_annotations(seq_resnum=resnum, use_representatives=True))
# Check structure type
if g.protein.representative_structure:
if g.protein.representative_structure.is_experimental:
to_append['structure_type'] = 'EXP'
else:
to_append['structure_type'] = 'HOM'
# At disulfide bond?
repchain = g.protein.representative_chain
repchain_annotations = g.protein.representative_structure.chains.get_by_id(repchain).seq_record.annotations
if 'SSBOND-biopython' in repchain_annotations:
structure_resnum = g.protein.map_seqprop_resnums_to_structprop_resnums(resnums=resnum,
use_representatives=True)
if resnum in structure_resnum:
ssbonds = repchain_annotations['SSBOND-biopython']
ssbonds_res = []
for x in ssbonds:
ssbonds_res.append(x[0])
ssbonds_res.append(x[1])
if structure_resnum in ssbonds_res:
to_append['at_disulfide_bridge'] = True
appender.append(to_append)
if not appender:
return pd.DataFrame()
cols = ['ref_residue', 'ref_resnum', 'strain_residue', 'num_strains_mutated', 'strains_mutated',
'ref_residue_prop', 'strain_residue_prop', 'grantham_score', 'grantham_annotation',
'at_disulfide_bridge',
'seq_SS-sspro', 'seq_SS-sspro8', 'seq_RSA-accpro', 'seq_RSA-accpro20', 'seq_TM-tmhmm',
'struct_SS-dssp', 'struct_RSA-dssp', 'struct_ASA-dssp',
'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms',
'struct_PHI-dssp', 'struct_PSI-dssp',
'struct_resnum', 'struct_residue'
'strains_mutated']
df_gene_summary = | pd.DataFrame.from_records(appender, columns=cols) | pandas.DataFrame.from_records |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.