prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env/python
import argparse
import numpy as np
import os
import pandas as pd
import yaml
import micro_dl.inference.evaluation_metrics as metrics
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.preprocess_utils as preprocess_utils
import micro_dl.utils.image_utils as image_utils
import micro_dl.utils.normalize as normalize
def parse_args():
"""Parse command line arguments
In python namespaces are implemented as dictionaries
:return: namespace containing the arguments passed.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
required=True,
help='Directory containing model weights, config and csv files',
)
parser.add_argument(
'--model_fname',
type=str,
default=None,
help='File name of weights in model dir (.hdf5). If None grab newest.',
)
parser.add_argument(
'--test_data',
dest='test_data',
action='store_true',
help="Use test indices in split_samples.json",
)
parser.add_argument(
'--all_data',
dest='test_data',
action='store_false',
)
parser.set_defaults(test_data=True)
parser.add_argument(
'--image_dir',
type=str,
required=True,
help="Directory containing target images",
)
parser.add_argument(
'--metrics',
type=str,
required=True,
nargs='*',
help='Metrics for model evaluation'
)
parser.add_argument(
'--orientations',
type=str,
default='xyz',
nargs='*',
help='Evaluate metrics along these orientations (xy, xz, yz, xyz)'
)
parser.add_argument(
'--name_parser',
type=str,
default='parse_sms_name',
help="The function in aux_utils that will parse the file name for indices",
)
return parser.parse_args()
def compute_metrics(model_dir,
image_dir,
metrics_list,
orientations_list,
test_data=True,
name_parser='parse_sms_name'):
"""
Compute specified metrics for given orientations for predictions, which
are assumed to be stored in model_dir/predictions. Targets are stored in
image_dir.
Writes metrics csv files for each orientation in model_dir/predictions.
:param str model_dir: Assumed to contain config, split_samples.json and
subdirectory predictions/
:param str image_dir: Directory containing target images with frames_meta.csv
:param list metrics_list: See inference/evaluation_metrics.py for options
:param list orientations_list: Any subset of {xy, xz, yz, xyz}
(see evaluation_metrics)
:param bool test_data: Uses test indices in split_samples.json,
otherwise all indices
:param str name_parser: Type of name parser (default or parse_idx_from_name)
"""
# Load config file
config_name = os.path.join(model_dir, 'config.yml')
with open(config_name, 'r') as f:
config = yaml.safe_load(f)
preprocess_config = preprocess_utils.get_preprocess_config(config['dataset']['data_dir'])
# Load frames metadata and determine indices
frames_meta = pd.read_csv(os.path.join(image_dir, 'frames_meta.csv'))
if isinstance(metrics_list, str):
metrics_list = [metrics_list]
metrics_inst = metrics.MetricsEstimator(metrics_list=metrics_list)
split_idx_name = config['dataset']['split_by_column']
if test_data:
idx_fname = os.path.join(model_dir, 'split_samples.json')
try:
split_samples = aux_utils.read_json(idx_fname)
test_ids = np.sort(split_samples['test'])
except FileNotFoundError as e:
print("No split_samples file. Will predict all images in dir.")
else:
test_ids = np.sort(np.unique(frames_meta[split_idx_name]))
# Find other indices to iterate over than split index name
# E.g. if split is position, we also need to iterate over time and slice
test_meta = pd.read_csv(os.path.join(model_dir, 'test_metadata.csv'))
metadata_ids = {split_idx_name: test_ids}
iter_ids = ['slice_idx', 'pos_idx', 'time_idx']
for id in iter_ids:
if id != split_idx_name:
metadata_ids[id] = np.sort(np.unique(test_meta[id]))
# Create image subdirectory to write predicted images
pred_dir = os.path.join(model_dir, 'predictions')
target_channel = config['dataset']['target_channels'][0]
# If network depth is > 3 determine depth margins for +-z
depth = 1
if 'depth' in config['network']:
depth = config['network']['depth']
normalize_im = 'stack'
if 'normalize_im' in preprocess_config:
normalize_im = preprocess_config['normalize_im']
elif 'normalize_im' in preprocess_config['tile']:
normalize_im = preprocess_config['tile']['normalize_im']
# Get channel name and extension for predictions
parse_func = aux_utils.import_object('utils.aux_utils', name_parser, 'function')
pred_fnames = [f for f in os.listdir(pred_dir) if f.startswith('im')]
meta_row = parse_func(pred_fnames[0])
pred_channel = meta_row['channel_idx']
_, ext = os.path.splitext(pred_fnames[0])
if isinstance(orientations_list, str):
orientations_list = [orientations_list]
available_orientations = {'xy', 'xz', 'yz', 'xyz'}
assert set(orientations_list).issubset(available_orientations), \
"Orientations must be subset of {}".format(available_orientations)
fn_mapping = {
'xy': metrics_inst.estimate_xy_metrics,
'xz': metrics_inst.estimate_xz_metrics,
'yz': metrics_inst.estimate_yz_metrics,
'xyz': metrics_inst.estimate_xyz_metrics,
}
metrics_mapping = {
'xy': metrics_inst.get_metrics_xy,
'xz': metrics_inst.get_metrics_xz,
'yz': metrics_inst.get_metrics_yz,
'xyz': metrics_inst.get_metrics_xyz,
}
df_mapping = {
'xy': pd.DataFrame(),
'xz': | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range, timedelta_range
import pandas._testing as tm
class TestTimeSeries:
def test_contiguous_boolean_preserve_freq(self):
rng = date_range("1/1/2000", "3/1/2000", freq="B")
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq == rng.freq
tm.assert_index_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_promote_datetime_date(self):
rng = date_range("1/1/2000", periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq("4H", method="ffill")
expected = ts[5:].asfreq("4H", method="ffill")
tm.assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_series_map_box_timedelta(self):
# GH 11349
s = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
def f(x):
return x.total_seconds()
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_view_tz(self):
# GH#24024
ser = Series(pd.date_range("2000", periods=4, tz="US/Central"))
result = ser.view("i8")
expected = Series(
[
946706400000000000,
946792800000000000,
946879200000000000,
946965600000000000,
]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_asarray_object_dt64(self, tz):
ser = Series(pd.date_range("2000", periods=2, tz=tz))
with tm.assert_produces_warning(None):
# Future behavior (for tzaware case) with no warning
result = np.asarray(ser, dtype=object)
expected = np.array(
[pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)]
)
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
ser = Series(pd.date_range("2000", periods=2))
expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
result = np.asarray(ser)
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_aware(self):
tz = "US/Central"
ser = Series( | pd.date_range("2000", periods=2, tz=tz) | pandas.date_range |
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
import configparser
from dateutil.parser import parse
import os
from sklearn.metrics import roc_auc_score, f1_score, precision_score,\
recall_score, classification_report, accuracy_score
import logging
logger = logging.getLogger(__name__)
print = logger.info
def multilabel_from_tags(tag_list):
"""
function to generate pd dataframe for tags based on list of tag strings
tag_list: the raw list of tags from input. each row is "tag1, tag2, tag3..."
"""
# turn tag list strings into list for each row
tag_list = [[tag.strip() for tag in tag_text.split(',')] for tag_text in tag_list]
# obtain unique tags
unique_tags = list(set([tag for tags in tag_list for tag in tags]))
try:
unique_tags.remove('')
except:
print("Unique tags does not have empty situations")
# create df based on tags
tag_dict = {}
for tag in unique_tags:
tag_dict[f"Tag_{tag}"] = [1 if tag in tags else 0 for tags in tag_list]
tag_df = pd.DataFrame(tag_dict)
return tag_df
def create_tag_columns(train_df, tag_col='Tags'):
"""
function to create tags columns for a training dataframe
train_df: pd DataFrame of training text and tags
tag_col: str. Column name of the column that houses the multilabel tags
"""
tag_list = train_df[tag_col].to_list()
tag_df = multilabel_from_tags(tag_list)
train_df = pd.concat([train_df, tag_df], axis=1)
return train_df
def binary_tag_to_tags(text_df, tag_values):
"""
+++INPUT+++
text_df: dataframe with binary tags, fillna with 0
tag_values: array of tag strings
example: tag_values = text_df.columns[2:].values
+++OUTPUT+++
text_df: with Tags column added containing tags
"""
tags_list = []
for row_index in range(len(text_df)):
selector = text_df.loc[row_index, tag_values].values.astype(bool)
selected_tags = tag_values[selector]
tags_string = ", ".join(selected_tags)
tags_list.append(tags_string)
text_df['Tags'] = tags_list
return text_df
def df_to_json_form(sample_df, tag_col='Tags', ui_dir='../input/',
ui_filename='text_tags.json'):
"""
function to save a sampled text df to directory for human tags
sample_df: pd.DataFrame. Has "Text" and "UID" columns
tag_col: str. The expected name of the tags column. Blank fields will be
populated for human input
ui_dir: str. directory of the human input json form
ui_filename: str. file name for the human input. should be in json
"""
try:
assert "Text" in sample_df.columns
assert "UID" in sample_df.columns
except:
print("Make sure the DF has Text and UID columns!")
exit(1)
if tag_col not in sample_df.columns:
print(f"Column {tag_col} not in columns. Adding empty column for it.")
sample_df[tag_col] = ''
sample_df = sample_df.loc[:, ['Text', 'UID', tag_col]]
print("Saving the sampled texts as JSON for human tags")
Path(ui_dir).mkdir(parents=True, exist_ok=True)
sample_df.to_json(f'{ui_dir}{ui_filename}', orient='records', indent=2)
print("Done")
def kmeans_from_proba(scored_df, tsne_fig_name, score_col_prefix='proba_', random_state=0):
print("Extracting tag scores and training KMeans for clusters")
# extract tag scores into np.array
proba_scores = scored_df.loc[:, scored_df.columns.str.startswith(score_col_prefix)].values
# fit and extract kmeans clusters
kmeans = KMeans(n_clusters=proba_scores.shape[1] + 1, random_state=random_state)
kmeans.fit(proba_scores)
clusters = kmeans.predict(proba_scores).reshape((-1, 1))
print("Visualizing tag score-based KMeans clusters with tSNE")
# visualize the clusters using tsne
tsne_xy = TSNE(n_components=2).fit_transform(proba_scores)
visualize_df = pd.DataFrame(
np.concatenate((tsne_xy, clusters), axis=1), columns=['tsne_1', 'tsne_2', 'cluster_id'])
plt.figure(figsize=(10, 6))
sns.scatterplot(data=visualize_df,x='tsne_1',y='tsne_2',hue='cluster_id',
legend="full",alpha=0.5, palette='pastel')
plt.title("KMeans Cluster on TSNE 2D Transformation")
plt.savefig(tsne_fig_name, bbox_inches='tight')
plt.close()
# save cluster info back to scored_df
print("Saving cluster information back to dataframe")
scored_df['cluster_id'] = clusters
return scored_df, kmeans
def sample_by_cluster(scored_df, sample_size, cluster_col='cluster_id', row_key='UID'):
print("Sampling records based on cluster information...")
group_sample_n = sample_size // scored_df[cluster_col].nunique()
sample_df = scored_df.groupby(cluster_col).apply(lambda x: x.sample(n=group_sample_n)).reset_index(drop=True)
unsampled_count = sample_size - sample_df.shape[0]
print(f"A total of {sample_df.shape[0]:,} records were sampled based on clusters.")
if unsampled_count > 0:
print(f"{unsampled_count:,} remaining records are to be sampled from total population.")
unsampled_ids = scored_df[row_key][~np.isin(scored_df.UID, sample_df.UID)]
additional_ids = np.random.choice(unsampled_ids, unsampled_count, replace=False)
additional_df = scored_df.loc[np.isin(scored_df[row_key], additional_ids), :]
sample_df = pd.concat([sample_df, additional_df], ignore_index=True)
sample_df['Tags'] = ''
return sample_df
def sample_by_random(scored_df, sample_size, cluster_col='cluster_id', row_key='UID'):
print("Sampling records based on pure randomness...")
print(f"{sample_size:,} records are to be sampled from total population.")
sample_ids = np.random.choice(scored_df[row_key], sample_size, replace=False)
sample_df = scored_df.loc[np.isin(scored_df[row_key], sample_ids), :].reset_index(drop=True)
sample_df['Tags'] = ''
return sample_df
def coder_sim(samples_df, answers_df):
assert "UID" in samples_df.columns
assert "UID" in answers_df.columns
assert "Tags" in samples_df.columns
assert "Tags" in answers_df.columns
samples_df['Tags'] = answers_df.set_index("UID").loc[samples_df.UID, ['Tags']].values.flatten()
print("Samples have been tagged using the provided answers dataframe")
return samples_df
class MetaProject(object):
def __init__(self, project_path, rundir='./wrapper_al/'):
"""
Simple MetaProject class to analyze project output
project_path: path to the project folder of the active learning run
rundir: the path where the active learning ran, default './wrapper_al/'
"""
print(">>> Instantiate MetaProject class...")
self.project_path = project_path
self.rundir = rundir
self.cfg_path = os.path.abspath(f'{self.project_path}orchestration_record.cfg')
self.log_path = os.path.abspath(f'{self.project_path}orchestration_log.log')
self._load_config()
self.total_rounds = int(self.config.get('active_learning', 'total_rounds'))
self.round_sample = int(self.config.get('sampling', 'sample_size'))
self.total_sample = self.total_rounds * self.round_sample
# get abspath of the answer file since the exec path of project is different from analytics path
self.answer_file = os.path.abspath(os.path.join(
self.rundir, self.config.get('coder_sim', 'answer_file')))
print(self.answer_file)
self.max_tags = int(self.config.get('training', 'max_tags'))
self.run_sim = int(self.config.get('active_learning', 'run_sim'))
self.run_time = self._parse_log(self.log_path)
self._gen_tag_sum_df(self.answer_file)
def _load_config(self):
print(">>> Loading project orchestration config")
self.config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
self.config.read(self.cfg_path)
def _parse_log(self, log_path):
"""
Method to parse orchestration log file to obtain run duration in seconds
"""
print(">>> Parsing project execution run time")
with open(log_path, 'r') as logfile:
first_line = logfile.readline()
for last_line in logfile:
pass
try:
start_time = parse(first_line[:23])
end_time = parse(last_line[:23])
run_time = (end_time - start_time).seconds
except:
print(">>> Project did not run successfully based on log records!")
run_time = -1
return run_time
def _gen_tag_sum_df(self, tag_col='Tag_'):
"""
Method to generate tag positive ratios of a given DF (stored in JSON format)
"""
print(">>> Reading full dataset...")
df = pd.read_json(self.answer_file, orient='records')
df = create_tag_columns(df)
self.df = df
self.total_records = df.shape[0]
if self.run_sim == 1:
print(">>> Project ran as simulation...")
self.answer_tag_sum_df = df.loc[:, df.columns.str.startswith(tag_col)].sum().sort_values(
ascending=False).reset_index().rename(
{'index':'Tag_Name', 0: 'Pos_Count'}, axis=1)
self.answer_tag_sum_df['Pos_Rate'] = self.answer_tag_sum_df.Pos_Count / df.shape[0]
else:
print(">>> Project ran in real time with manual coders...")
self.answer_tag_sum_df = None
def describe(self):
"""
Method to describe the project with Meta Cfg and Logs
method only loads attributes of the object
"""
print(">>> Composing project high level description...")
self.stmts = []
self.stmts.append('INTRO\n-------')
self.stmts.append(f"\nThis Active Learning Run has a round count of {self.total_rounds:,},")
self.stmts.append(f"and a total of {self.total_sample:,} samples are included for model training.")
if self.run_sim == 1:
self.stmts.append("This run is a simulation with known tags already available.")
else:
self.stmts.append("This run is an actual application with manual coder input for tags on the fly.")
self.stmts.append(f"In each round, {int(self.config.get('sampling', 'sample_size')):,} samples are selected as additional training data.")
self.stmts.append(f"While the first round always runs random sampling to gather the samples,")
self.stmts.append(f"the second and beyond rounds use {self.config.get('sampling', 'sampling_method')} method.")
self.stmts.append('\n\nDATA\n-------')
self.stmts.append(f'\nThe input dataframe has a total of {self.total_records:,} records.')
if self.answer_tag_sum_df is not None:
self.stmts.append('The positive rates of each tag in the full answer dataset:')
self.stmts.append("\n" + self.answer_tag_sum_df.to_string())
self.stmts.append('\n\nMODELING\n-------')
self.stmts.append("\nThe training config for each round's Bi-Directional LSTM modeling is as below:")
for key, value in dict(self.config['training']).items():
self.stmts.append(f"\n\t{key}: {value}")
if self.config.get('training', 'random_embed') == 'True':
self.stmts.append('\nThe text embeddings are randomly initiated 300-length via Tensorflow 2.')
else:
self.stmts.append('\nThe text embeddings are GloVe 300-length text embeddings loaded via Spacy.')
self.stmts.append('\n\nRUNTIME\n-------')
if self.run_time > 0:
self.stmts.append(f"\nExecution of the run took {self.run_time / 60:,.2f} minutes to complete")
else:
self.stmts.append("Program log file indicates that this run was not successfully executed...")
self.description = " ".join(self.stmts)
print(">>> Displaying the description:")
print(self.description)
class RoundResult(object):
def __init__(self, round_path, answer_file, proba_cutoff, rundir='./wrapper_al/'):
self.round_path = os.path.abspath(os.path.join(rundir, round_path))
print(self.round_path)
self.config_dir = f"{self.round_path.rstrip('/')}/config/"
self.sample_dir = f"{self.round_path.rstrip('/')}/sample/"
self.label_dir = f"{self.round_path.rstrip('/')}/label/"
self.input_dir = f"{self.round_path.rstrip('/')}/input/"
self.output_dir = f"{self.round_path.rstrip('/')}/output/"
self.train_file = f"{self.output_dir.rstrip('/')}/train_df.csv"
self.scored_file = f"{self.output_dir.rstrip('/')}/scored/scored_output.json"
self.answer_file = os.path.abspath(os.path.join(rundir, answer_file))
self.proba_cutoff = proba_cutoff
self.load_outputs()
def load_outputs(self, proba_prefix='proba_', tag_prefix='Tag_', row_key='UID'):
# read the round related datasets
train_df = | pd.read_csv(self.train_file) | pandas.read_csv |
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.integer import (
Int8Dtype,
UInt32Dtype,
)
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": pd.array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
if op in {"sum", "prod", "min", "max"}:
assert isinstance(result, np.int64)
else:
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = pd.array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(pd.array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index._with_infer(np.array(other))
assert isinstance(idx, ABCIndex)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_copy():
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
# copy=True -> ensure both data and mask are actual copies
result = arr.astype("Int64", copy=True)
assert result is not arr
assert not tm.shares_memory(result, arr)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
# copy=False
result = arr.astype("Int64", copy=False)
assert result is arr
assert np.shares_memory(result._data, arr._data)
assert np.shares_memory(result._mask, arr._mask)
result[0] = 10
assert arr[0] == 10
result[0] = pd.NA
assert arr[0] is pd.NA
# astype to different dtype -> always needs a copy -> even with copy=False
# we need to ensure that also the mask is actually copied
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
result = arr.astype("Int32", copy=False)
assert not tm.shares_memory(result, arr)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
def test_astype_to_larger_numpy():
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = | pd.Series([1, 2, 3], dtype=dtype) | pandas.Series |
import numpy as np
import pandas as pd
class DictUtil:
ks = []
ds = []
def to_kv(self, src):
for k, v in src.items():
self.ks.append(k)
if type(v) == dict:
self.to_kv(v)
else:
self.ds.append(np.array([".".join(self.ks), v]))
self.ks.pop(-1)
# _dict: nested dict
def dict_to_df(self, _dict):
self.ks = []
self.ds = []
self.to_kv(_dict)
return pd.DataFrame(np.array(self.ds), columns=["key", "val"])
def separate_head(self, df):
tmp = df['key'].str.extract('(.+?)\.(.+)', expand=True)
df['t_key'] = tmp[0]
df['key'] = tmp[1]
return df
# df['key']: nested keys joined by dot.
# df['val']: dict value.
def df_to_dict(self, df):
t = {}
# set t_key
other_df = df[ ~(df['key'].str.contains('.', regex=False)) ]
other_df['t_key'] = other_df['key']
other_df['key'] = np.nan
target_df = df[ df['key'].str.contains('.', regex=False) ]
target_df = self.separate_head(target_df)
merge_df = pd.concat([other_df, target_df]).sort_index()
head_keys = list(dict.fromkeys(list(merge_df['t_key'])))
for k in head_keys:
r = merge_df[merge_df['t_key'] == k]
if len( r[ ~(r['key'].isnull()) ] ) > 0:
r = self.df_to_dict(r)
else:
r = r['val'].iloc[0]
t[k] = r
return t
def diff_df(self, left_df, right_df):
left_df = left_df.rename(columns={'val': 'val_left'}).copy()
right_df = right_df.rename(columns={'val': 'val_right'}).copy()
key_df = pd.DataFrame((list(set( list(left_df['key']) + list(right_df['key']) ))), columns=['key'])
m_df = pd.merge(key_df, left_df, on='key', how='left')
m_df = | pd.merge(m_df, right_df, on='key', how='left') | pandas.merge |
import properties
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_similarity
import json
import pandas as pd
import numpy as np
import utility
import ast
# Feature engineering family history
def create_cols_family_hist(x):
if x["tschq04-1"] == "YES":
if isinstance(x["tschq04-2"], str):
res = ast.literal_eval(x["tschq04-2"])
else:
res = x["tschq04-2"]
lst_sorted = sorted(res)
list_to_str = "_".join([val for val in lst_sorted])
return list_to_str
else:
return x["tschq04-1"]
def common_processing(df, item_list):
# Getting percentage between 0 to 1 rather than score values
if item_list in ["bg_tinnitus_history", "all"]:
df["tschq12"] = df["tschq12"].apply(lambda x: x / 100)
df["tschq16"] = df["tschq16"].apply(lambda x: x / 100)
df["tschq17"] = df["tschq17"].apply(lambda x: x / 100)
if item_list in ["bg_tinnitus_history", "all"]:
df["tschq04"] = df.apply(create_cols_family_hist, axis=1)
if item_list in ["modifying_influences", "related_conditions"]:
df["tschq12"] = df["tschq12"].apply(lambda x: x / 100)
return df
#Common elements
def get_common_cols(col1, col2):
common_elements = set(col1).intersection(col2)
return common_elements
from pathlib import Path
def check_access(location):
if location.exists() and location.is_file():
return True
else:
return False
def initial_processing(item_list, quest_cmbs=None, append_synthethic=False):
# Read the csv of the tschq data and make the necessary things
# tschq = pd.read_csv("data/input_csv/3_q.csv", index_col=0, na_filter=False)
tschq = pd.read_pickle(properties.registration_file_location)
hq = pd.read_pickle(properties.hearing_file_location)
# If append synthethic is true then add the synthethic data.
if append_synthethic:
path_access = Path(properties.simulate_registration_file_location)
hearing_path_access = Path(properties.simulate_hearing_file_location)
if check_access(path_access):
simulation_reg_file = pd.read_pickle(properties.simulate_registration_file_location)
# Append the simulation file alongside when True
tschq = tschq.append(simulation_reg_file)
else:
print("Simulated registration file is not created !!!")
if check_access(hearing_path_access):
simulation_hearing_file = | pd.read_pickle(properties.simulate_hearing_file_location) | pandas.read_pickle |
import matplotlib.pyplot as plt
# %matplotlib inline
# from utils import utils
# import utils.utils as utils
from utils.qedr.eval.hinton import hinton
import os
import numpy as np
from utils.qedr.eval.regression import normalize, entropic_scores, print_table_pretty, nrmse
from utils.qedr.zero_shot import get_gap_ids
from utils.qedr.utils import mkdir_p
import math
import pandas as pd
from utils import plot_utils
# split inputs and targets into sets: [train, dev, test, (zeroshot)]
def split_data(data, n_train, n_dev, n_test, zshot):
train = data[:n_train]
dev = data[n_train: n_train + n_dev]
test = data[n_train + n_dev: n_train + n_dev + n_test]
if zshot:
pass
# return [create_gap(train), create_gap(dev), create_gap(test), data[gap_ids]]
return [train, dev, test, None]
# normalize input and target datasets [train, dev, test, (zeroshot)]
def normalize_datasets(datasets, zshot):
datasets[0], mean, std, _ = normalize(datasets[0], remove_constant=False)
datasets[1], _, _, _ = normalize(datasets[1], mean, std, remove_constant=False)
datasets[2], _, _, _ = normalize(datasets[2], mean, std, remove_constant=False)
if zshot:
datasets[3], _, _, _ = normalize(datasets[3], mean, std, remove_constant=False)
return datasets
def fit_visualise_quantify(regressor, params, err_fn, importances_attr, test_time=False, save_plot=False, n_models=1, n_z=0, m_codes=None, gts = None, zshot=False, model_names = None, n_c=None, experiment_path = None, exp_params=None, used_data = None):#fig_dir
# lists to store scores
m_disent_scores = [] * n_models
m_complete_scores = [] * n_models
# arrays to store errors (+1 for avg)
train_errs = np.zeros((n_models, n_z + 1))
dev_errs = np.zeros((n_models, n_z + 1))
test_errs = np.zeros((n_models, n_z + 1))
zshot_errs = np.zeros((n_models, n_z + 1))
# init plot (Hinton diag)
fig, axs = plt.subplots(1, n_models, figsize=(12, 6), facecolor='w', edgecolor='k')
# axs = axs.ravel()
for i in range(n_models):
# init inputs
X_train, X_dev, X_test, X_zshot = m_codes[i][0], m_codes[i][1], m_codes[i][2], m_codes[i][3]
# R_ij = relative importance of c_i in predicting z_j
R = []
for j in range(n_z):
# init targets [shape=(n_samples, 1)]
y_train = gts[0].iloc[:, j]
y_dev = gts[1].iloc[:, j]
y_test = gts[2].iloc[:, j] if test_time else None
y_zshot = gts[3].iloc[:, j] if zshot else None
# fit model
model = regressor(**params[i][j])
model.fit(X_train, y_train.tolist())
# predict
y_train_pred = model.predict(X_train)
# print(model.feature_importance)
y_dev_pred = model.predict(X_dev)
y_test_pred = model.predict(X_test) if test_time else None
y_zshot_pred = model.predict(X_zshot) if zshot else None
# calculate errors
train_errs[i, j] = err_fn(y_train_pred, y_train)
print(train_errs)
dev_errs[i, j] = err_fn(y_dev_pred, y_dev)
test_errs[i, j] = err_fn(y_test_pred, y_test) if test_time else None
zshot_errs[i, j] = err_fn(y_zshot_pred, y_zshot) if zshot else None
# extract relative importance of each code variable in predicting z_j
r = getattr(model, importances_attr)[:, None] # [n_c, 1]
R.append(np.abs(r))
R = np.hstack(R) # columnwise, predictions of each z
# disentanglement
disent_scores = entropic_scores(R.T)
disent_scores = [0 if math.isnan(score) else score for score in disent_scores]
c_rel_importance = np.sum(R, 1) / np.sum(R) # relative importance of each code variable
disent_w_avg = np.sum(np.array(disent_scores) * c_rel_importance)
disent_scores.append(disent_w_avg)
m_disent_scores.append(disent_scores)
# completeness
complete_scores = entropic_scores(R)
complete_avg = np.mean(complete_scores)
complete_scores.append(complete_avg)
m_complete_scores.append(complete_scores)
# informativeness (append averages)
train_errs[i, -1] = np.mean(train_errs[i, :-1])
dev_errs[i, -1] = np.mean(dev_errs[i, :-1])
test_errs[i, -1] = np.mean(test_errs[i, :-1]) if test_time else None
zshot_errs[i, -1] = np.mean(zshot_errs[i, :-1]) if zshot else None
# visualise
colours = ['#efab00', 'black']#'dba45b',
backcolor = '#616161'
#$\text{\rotatebox[origin=c]{-90}
hinton(R, '$\mathbf{c}$', '$\mathbf{z}$', ax=axs,
fontsize=34, ls_own_colours = colours, background_color=backcolor)
# axs.set_title('{0}'.format(model_names[i]), fontsize=20)
title = model_names[0]
model_names =['VAE']
str_dis = print_table_pretty('Disentanglement', m_disent_scores, 'c', model_names)
print_table_pretty('Completeness', m_complete_scores, 'z', model_names)
print("Informativeness:")
print_table_pretty('Training Error', train_errs, 'z', model_names)
print_table_pretty('Validation Error', dev_errs, 'z', model_names)
if test_time:
print_table_pretty('Test Error', test_errs, 'z', model_names)
if zshot:
print_table_pretty('Zeroshot Error', zshot_errs, 'z', model_names)
plt.rc('text', usetex=True)
plt.title(title, fontsize=17, y=1.08)
if save_plot:
fig.tight_layout()
exp_params['used_data'] = used_data
plot_utils.save_figure(fig, experiment_path + 'images/', "hint_{0}".format(regressor.__name__),
dct_params=exp_params)
plot_utils.save_figure(fig, '../models/results/disentanglement-imgs/',
"hint_{0}".format(regressor.__name__), dct_params=exp_params)
plt.show()
plt.clf()
def run_disentanglement_eval(test_model, experiment_path, dct_params):
np_z_test = test_model.np_z_test
test_y = test_model.test_y
seed = 123
rng = np.random.RandomState(seed)
data_dir = '../data/lasso' # '../wgan/data/'
codes_dir = os.path.join(data_dir, 'codes/')
n_c = 10
zshot = False
description ="VAE \n"
for key, val in dct_params.items():
if(isinstance(val,float)):
val = str(val)[:4]
description = description + "{}:{} ".format(key,val)
description = description + "\n exp:{}".format(str(experiment_path).split(sep='/')[-2].replace('-', '-').replace('_','-'))
model_names = [description]
exp_names = [m.lower() for m in model_names]
n_models = len(model_names)
train_fract, dev_fract, test_fract = 0.8, 0.1, 0.1
# load inputs (model codes)
m_codes = []
for n in exp_names:
try:
# m_codes.append(np.load(os.path.join(codes_dir, n + '.npy')))
m_codes.append(np_z_test[:5000])
except IOError:
# .npz, e.g. pca with keys: codes, explained_variance
m_codes.append(np.load(os.path.join(codes_dir, n + '.npz'))['codes'])
# load targets (ground truths)
# gts = np.load(os.path.join(data_dir, 'teapots.npz'))['gts']
gts = test_y[:5000]
n_samples = len(gts)
n_train, n_dev, n_test = int(train_fract * n_samples), int(dev_fract * n_samples), int(test_fract * n_samples)
if(test_model.used_data == 'dsprites'):
gts = gts[:, 2:] # remove generative factor 'white' and 'shape'
n_z = gts.shape[1]
gts = | pd.DataFrame(data=gts) | pandas.DataFrame |
# import tabula
import pandas as pd
import numpy as np
# !pip install tabula-py
import camelot
import os
import string
import pytz
from datetime import datetime, timezone, timedelta
from tzlocal import get_localzone
from StatusMsg import StatusMsg
from tqdm import tqdm
from urllib.error import HTTPError
import re
import tabula
from tabulate import tabulate
import io
# from datetime import datetime,timedelta
#programe extracts the tabels from the PDF files.
# Need some Preprocessing to convert to RawCSV
#Have Done for KA and HR for reference
# a=b
#declare the path of your file
# file_path = r"../INPUT/2021-10-26/KA.pdf"
#Convert your file
# reads all the tables in the PDF
class FileFormatChanged(Exception):
pass
# def getAPData(file_path,date,StateCode):
# table = camelot.read_pdf(file_path,pages='1')
# if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
# os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
# table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode))
# df_districts.columns = df_districts.columns.str.replace("\n","")
# col_dict = {"TotalPositives":"Confirmed","TotalRecovered":"Recovered","TotalDeceased":"Deceased"}
# df_districts.rename(columns=col_dict,inplace=True)
# # df_districts.drop(columns=['S.No','PositivesLast 24 Hrs','TotalActive Cases'],inplace=True)
# df_districts = df_districts[df_districts['District']!="Total AP Cases"]
# df_summary = df_districts
# df_districts = df_districts[:-1]
# df_json = pd.read_json("../DistrictMappingMaster.json")
# dist_map = df_json['Andhra Pradesh'].to_dict()
# df_districts['District'].replace(dist_map,inplace=True)
# df_summary = df_summary.iloc[-1,:]
# # print(df_districts)
# # print(df_summary)
# # a=b
# return df_summary,df_districts
def combine_listItems(list):
combined_items = ' '.join([str(item) for item in list])
return combined_items
def getAPData(file_path, date, StateCode):
try:
# print(file_path)
file = tabula.read_pdf(file_path,pages=1,stream = True)
# print(file)
table = tabulate(file)
# print(table)
df_districts = pd.read_fwf(io.StringIO(table))
# remove junk on top and reset the index
df_districts.drop(df_districts.head(4).index, inplace=True)
df_districts = df_districts.reset_index()
# remove bottom junk
df_districts.drop(df_districts.tail(2).index, inplace=True)
df_other_cols = df_districts
# print(df_districts)
# remove unnecessary columns
cols = [0, 4, 6]
df_districts.drop(df_districts.columns[cols], axis=1, inplace=True)
# add column names
df_districts.columns = ['S.No','District', 'cumulativeConfirmedNumberForDistrict', 'District_1', 'Cases_2']
df_districts.drop('S.No', axis=1, inplace=True)
new_df = df_districts
# splitting the dataframe
N = 2
splitted_list_df = np.split(df_districts, np.arange(N, len(df_districts.columns), N), axis=1)
part_A = splitted_list_df[0]
part_B = splitted_list_df[1]
# print(type(part_B))
part_B_cols = {"District_1": "District", "Cases_2": "cumulativeConfirmedNumberForDistrict"}
part_B.rename(columns=part_B_cols, inplace=True)
# concatenate two splitted DF's
df_districts = pd.concat([part_A, part_B], ignore_index=True, sort=False)
# print(df_districts)
# base_csv= '../RAWCSV/2022-04-05/myGov/AP_raw.csv'
# base_csv= '../RAWCSV/2022-04-17/myGov/AP_raw.csv'
base_csv= '../RAWCSV/2022-04-19/AP_raw.csv'
df_base_csv = pd.read_csv(base_csv)
# print(df_base_csv)
# df_base_csv.drop(df_base_csv.index[[0,7]],inplace=True)
# df_base_csv = df_base_csv.reset_index(drop=True)
# distri = df_base_csv['District']
# con = df_base_csv['cumulativeConfirmedNumberForDistrict']
# print(con, distri)
# base_csv_forState = '../RAWCSV/2022-04-06/myGov/AP_raw.csv'
base_csv_forState = '../RAWCSV/2022-04-20/myGov/AP_raw.csv'
df_base_csv_forState = pd.read_csv(base_csv_forState)
# df_base_csv_forState.drop(df_base_csv_forState.index[[0,7]],inplace=True)
# df_base_csv_forState = df_base_csv_forState.reset_index(drop=True)
# distri = df_base_csv_forState['District']
# con = df_base_csv_forState['cumulativeConfirmedNumberForDistrict']
# print(con, distri)
for index, row in df_districts.iterrows():
# print(index, row)
cases_col = row['cumulativeConfirmedNumberForDistrict'].split(' ')[1:]
cases_col = list(filter(str.strip, cases_col))
# print(cases_col, len(cases_col))
district_col = row['District'].split(' ')[1:]
district_col = list(filter(str.strip, district_col))
# print(district_col,len(district_col))
if len(district_col) == 1:
s = ''
new_district_col = s.join(district_col)
else:
new_district_col = combine_listItems(district_col)
if len(cases_col) == 1:
s = ''
new_cases_col = s.join(cases_col)
else:
new_cases_col = combine_listItems(cases_col)
df_districts.loc[index, "District"] = new_district_col
# print(type(new_district_col))
df_districts.loc[index, "cumulativeConfirmedNumberForDistrict"] = new_cases_col
# dropping rows having Nan
df_districts.drop(df_districts.index[[13,14,15,16,30,33]],inplace=True)
df_districts = df_districts.reset_index(drop=True)
df_districts['cumulativeConfirmedNumberForDistrict'] =df_districts['cumulativeConfirmedNumberForDistrict'].astype(int)
# df_summary = df_districts
df_districts = df_districts[:-2]
# print(df_districts)
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Andhra Pradesh'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
for index,row in df_districts.iterrows():
filtered_base_df = df_base_csv[df_base_csv['District']==row['District']]
# cumulativeConfirmedNumberForDistrict_value = filtered_base_df['cumulativeConfirmedNumberForDistrict']
# print('printing value .....')
# print(cumulativeConfirmedNumberForDistrict_value)
filtered_base_forState_df= df_base_csv_forState[df_base_csv_forState['District']==row['District']]
if len(filtered_base_df) == 1 and len(filtered_base_forState_df) == 1:
# if len(filtered_base_df) == 1:
# print('printing district names',filtered_district)
cumulative_confirmed_forDistrict = filtered_base_df.iloc[0]['cumulativeConfirmedNumberForDistrict'].astype(int)
# print('cumulative_confirmed_forDistrict',cumulative_confirmed_forDistrict)
df_districts.loc[index, 'cumulativeConfirmedNumberForDistrict'] = cumulative_confirmed_forDistrict+int(row['cumulativeConfirmedNumberForDistrict'])
df_districts['cumulativeDeceasedNumberForDistrict'] = '0'
df_districts['cumulativeRecoveredNumberForDistrict'] = '0'
df_districts['cumulativeTestedNumberForDistrict'] = '0'
df_districts['cumulativeConfirmedNumberForState'] = df_districts['cumulativeConfirmedNumberForDistrict'].sum()
cumulativeDeceasedNumberForState = filtered_base_forState_df.iloc[0]['cumulativeDeceasedNumberForState'].astype(int)
df_districts['cumulativeDeceasedNumberForState'] = cumulativeDeceasedNumberForState
cumulativeRecoveredNumberForState = filtered_base_forState_df.iloc[0]['cumulativeRecoveredNumberForState'].astype(int)
df_districts['cumulativeRecoveredNumberForState'] = cumulativeRecoveredNumberForState
# df_districts['cumulativeTestedNumberForState'] = '33462024'
df_summary = df_districts
# print('printing df districts.....')
# print(df_districts)
# df_summary['cumulativeTestedNumberForState'] = '33462024'
# df_summary['cumulativeTestedNumberForState'] = '33469666'
df_addTest = pd.read_csv("../INPUT/AP_Tested.csv")
print(df_addTest)
try:
df_summary['cumulativeTestedNumberForState'] = df_addTest[df_addTest["Date"] == date]["Cumulative_Tested"].item()
# print(df_summary['Tested'])
except:
print("Please Enter AP Tested values in ../Input/AP_Tested.csv")
raise
df_summary.to_csv("../RAWCSV/{}/{}_raw.csv".format(date, StateCode))
return df_summary, df_districts
except Exception as e:
raise
# print(e)
def getRJData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1,2')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
df_districts_1 = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode),header=0)
df_districts_2 = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
frames = [df_districts_1,df_districts_2]
df_districts = pd.concat(frames,ignore_index=True)
df_districts.columns = df_districts.columns.str.replace("\n","")
print(df_districts.columns)
#Cumulative Sample
col_dict = {"Unnamed: 2":"Tested", "Cumulative Positive":"Confirmed", "Cumulative Recovered/Discharged":"Recovered","Cumulative Death":"Deceased","CumulativePositive":"Confirmed",
"CumulativeDeath":"Deceased","CumulativeRecovered/ Discharged":"Recovered"}
df_districts.rename(columns=col_dict,inplace=True)
print(df_districts.columns)
# df_districts.drop(columns=['S.No','Today\'s Positive','Today\'sDeath','Today\'sRecovered/ Discharged', 'Active Case'],inplace=True)
df_districts.dropna(how="all",inplace=True)
# print(df_districts)
# a=b
# df_summary = df_districts
# df_districts = df_districts[:-1]
# df_districts = df_districts[:-4]
# print(df_districts)
# a=b
df_summary = df_districts
print(df_districts)
df_districts = df_districts[:-1]
# df_districts.drop(labels=[0,1],axis=0,inplace=True)
# df = df[]
df_districts['District'] = df_districts['District'].str.capitalize()
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Rajasthan'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
# print(df_summary)
# a=b
return df_summary,df_districts
def getKAData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1,5')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# table[5].to_excel('foo.xlsx')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-5-table-1.csv'.format(date,StateCode),skiprows=3)
df_districts.columns = df_districts.columns.str.replace("\n","")
df_districts['District Name'] = df_districts['District Name'].str.replace("\n","")
df_districts['District Name'] = df_districts['District Name'].str.replace("#","")
df_districts['District Name'] = df_districts['District Name'].str.replace("*","")
df_districts['District Name'] = df_districts['District Name'].replace(r'\s+', ' ', regex=True)
# df_districts = df_districts.replace("nan",np.nan)
print(df_districts.columns)
# a=b
# df_summary = df_districts
# df_districts.columns = df_districts.columns.str.replace("\n","")
for idx in df_districts.index:
print(df_districts["Sl. No"][idx])
if df_districts["Sl. No"][idx] == "21 Mandya":
df_districts["Sl. No"][idx] = 21
df_districts["District Name"][idx] = "Mandya"
elif df_districts["Sl. No"][idx] == "22 Mysuru":
df_districts["Sl. No"][idx] = 22
df_districts["District Name"][idx] = "Mysuru"
if "Non-Covid" in df_districts.columns[-1]:
col_dict = {"District Name":"District","Total Positives":"Confirmed","Total Discharges":"Recovered","Total Covid Deaths":"Deceased" , df_districts.columns[-1]:"Other"}
else:
col_dict = {"District Name":"District","Total Positives":"Confirmed","Total Discharges":"Recovered","Total Covid Deaths":"Deceased" , df_districts.columns[-2]:"Other"}
df_districts.rename(columns=col_dict,inplace=True)
# print(df_districts.columns)
# df_districts.drop(columns=['Sl. No','Today’s Positives','Today’s Discharges','Total Active Cases','Today’s Reported Covid Deaths','Death due to Non-Covid reasons#'],inplace=True)
df_districts.dropna(how="all",inplace=True)
# print(df_districts)
# a=b
# a=b
for col in df_districts.columns:
df_districts[col] = df_districts[col].astype(str).str.replace("*","")
# df_districts.dropna(inplace=True)
# print(df_districts)
# a=b
df_summary = df_districts[df_districts["Sl. No"] == "Total"].iloc[0]
# df_summary = df_districts[df_districts["District"] == "Total"].iloc[0]
# print(df_summary)
# a=b
df_districts = df_districts[pd.to_numeric(df_districts['Sl. No'], errors='coerce').notnull()]
# print(df_districts)
# a=b
# df_districts = df_districts[:-1]
# print(df_districts)
# df = df[]
df_districts['notesForDistrict'] = df_districts['Other'].astype(str) + " cases were recorded as Deaths due to Non Covid Reasons"
df_summary['notesForState'] = df_summary['Other'] + " cases were recorded as Deaths due to Non Covid Reasons"
df_addTest = pd.read_csv("../INPUT/KA_Tested.csv")
try:
df_summary['Tested'] = df_addTest[df_addTest["Date"] == date]["Cumulative_Tested"].item()
except:
print("Please Enter KA Tested values in ../Input/KA_Tested.csv")
raise
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Karnataka'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
# df_summary.rename(columns={"District":"State/UT"},inplace=True)
# df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
# print(df_districts)
# print(df_summary)
# print(date)
# a=b
return df_summary,df_districts
def getTNData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='2,7')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# table[5].to_excel('foo.xlsx')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-7-table-1.csv'.format(date,StateCode))
df_districts.columns = df_districts.columns.str.replace("\n","")
df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
df_tests['COVID-19 STATISTICS'] = df_tests['COVID-19 STATISTICS'].str.replace("\n","")
df_tests['COVID-19 STATISTICS'] = df_tests['COVID-19 STATISTICS'].str.replace("*","")
# print(df_tests)
# print(df_tests[df_tests['COVID-19 STATISTICS'] == 'Total Number of samples tested by RT-PCR today/ till date']['DETAILS'].values)
# a=b
col_dict = {"Total Positive Cases":"Confirmed","Discharged":"Recovered","Death":"Deceased"}
df_districts.rename(columns=col_dict,inplace=True)
df_districts.drop(columns=['Sl. No','Active Cases'],inplace=True)
df_summary = df_districts
# print(df_districts)
# print(df_summary)
# a=b
df_districts = df_districts[:-4]
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Tamil Nadu'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
df_summary = df_summary.dropna()
df_summary["Tested"] = df_tests[df_tests['COVID-19 STATISTICS'] == 'Total Number of samples tested by RT-PCR today/ till date']['DETAILS'].values[0]#df_tests.loc[4,"DETAILS"][:-1]
# print(df_summary["Tested"])
df_summary["Tested"] = df_summary["Tested"].replace("\n","").split()[-1].replace("@","")
df_summary = df_summary.str.replace(',', '').astype(int)
# df_districts["Tested"] = df_summary["Tested"]
# print(df_summary)
# a=b
return df_summary,df_districts
def getHRData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1,2')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# table[5].to_excel('foo.xlsx')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
df_districts.columns = df_districts.columns.str.replace("\n","")
df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode),names = ["Details","Numbers"])
col_dict = {"Name of District":"District","Cumulative Positive Cases":"Confirmed","Cumulative Recovered/ Discharged Cases":"Recovered","No. of Deaths":"Deceased"}
df_districts.rename(columns=col_dict,inplace=True)
# df_districts.drop(columns=['Sr No','Positive Cases Today','Recovery Rate (%)','No of Active Cases','COVID-19, Vaccination Status (NHM, Haryana)'],inplace=True)
# print(df_districts)
# a=b
df_districts = df_districts[2:]
# print(df_districts.columns)
# print(df_districts['Recovered'].str.contains('['))
# if df_districts['Recovered'].str.contains('[').any():
df_districts["Recovered"] = df_districts["Recovered"].astype(str).str.split("[").str[0]
# if df_districts['Deceased'].str.contains('[').any():
df_districts["Deceased"] = df_districts["Deceased"].astype(str).str.split("[").str[0]
df_districts["Confirmed"] = df_districts["Confirmed"].astype(str).str.split("[").str[0]
df_summary = df_districts
df_districts = df_districts[:-1]
# df_districts.drop(labels=[0,1],axis=0,inplace=True)
# df = df[]
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Haryana'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
# print(df_summary)
prevdate = str((datetime.strptime(date,"%Y-%m-%d")- timedelta(days=1)).date())
# print(prevdate ,type(prevdate))
df_prevDay = pd.read_csv('../RAWCSV/{}/{}_raw.csv'.format(prevdate,StateCode))
df_prevDay["cumulativeTestedNumberForState"]
print(int(df_tests.loc[3,"Numbers"]) , int(df_prevDay["cumulativeTestedNumberForState"][0]))
df_summary["Tested"] = int(df_tests.loc[0,"Numbers"]) + int(df_prevDay["cumulativeTestedNumberForState"][0])
# df_districts["Tested"] = df_summary["Tested"]
print(df_districts)
print(df_summary)
# a=b
return df_summary,df_districts
def getWBData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1,2')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# table[5].to_excel('foo.xlsx')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
df_districts.columns = df_districts.iloc[0]
df_districts = df_districts[1:]
df_districts.columns = df_districts.columns.str.replace("\n","")
df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-2.csv'.format(date,StateCode))
# print(df_tests)
# a=b
col_dict = {"Total Cases":"Confirmed","Total Discharged":"Recovered","Total Deaths":"Deceased"}
df_districts.rename(columns=col_dict,inplace=True)
# df_districts.drop(columns=['S. No','Total Active Cases','Last Reported Case'],inplace=True)
df_districts["Confirmed"] = df_districts["Confirmed"].str.split("+").str[0]
df_districts["Recovered"] = df_districts["Recovered"].str.split("+").str[0]
df_districts["Deceased"] = df_districts["Deceased"].str.split("+").str[0]
df_districts["Confirmed"] = df_districts["Confirmed"].str.replace(",","")
df_districts["Recovered"] = df_districts["Recovered"].str.replace(",","")
df_districts["Deceased"] = df_districts["Deceased"].str.replace(",","")
df_summary = df_districts
df_districts = df_districts[:-1]
# df_districts.drop(labels=[0,1],axis=0,inplace=True)
# df = df[]
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['West Bengal'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
df_summary["Tested"] = df_tests.loc[1,"Number"]
# print(df_summary)
df_summary["Tested"] = int(df_summary["Tested"].replace(',', '')) #.astype(int)
# df_districts["Tested"] = df_summary["Tested"]
# print(df_summary)
# a=b
return df_summary,df_districts
def getMHData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,'1,2,3')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# print(table)
# table[5].to_excel('foo.xlsx')
df_districts_1 = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode))
df_districts_2 = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
df_districts_3 = pd.read_csv('../INPUT/{}/{}/foo-page-3-table-1.csv'.format(date,StateCode))
frames = [df_districts_1,df_districts_2,df_districts_3]
df_districts = pd.concat(frames,ignore_index=True)
df_districts.columns = df_districts.columns.str.replace("\n","")
col_dict = {"District/Municipal Corporation":"District","COVID-19 cases":"Confirmed","Recovered patients":"Recovered","Deaths":"Deceased","District/MunicipalCorporation":"District"}
df_districts.rename(columns=col_dict,inplace=True)
# df_districts.drop(columns=['Sr. No.','Deaths due to other causes', 'Active cases'],inplace=True)
df_summary = df_districts
df_districts = df_districts[:-1]
print(df_districts)
# df_districts.drop(labels=[0,1],axis=0,inplace=True)
# df = df[]
df_addTest = pd.read_csv("../INPUT/MH_Tested.csv")
# print(df_addTest)
try:
df_summary['Tested'] = df_addTest[df_addTest["Date"] == date]["Cumulative_Tested"].item()
# print(df_summary['Tested'])
except:
print("Please Enter MH Tested values in ../Input/MH_Tested.csv")
raise
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Maharashtra'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
return df_summary,df_districts
def getMLData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,pages='1')
# print(file_path,date,StateCode)
# print(table)
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode))#,header=0)
# print(df_districts)
df_districts.columns = df_districts.columns.str.replace("\n","")
# print(df_districts)
prevdate = str((datetime.strptime(date,"%Y-%m-%d")- timedelta(days=1)).date())
prev_df = pd.read_csv("../RAWCSV/"+prevdate+"/ML_final.csv")
# print(df_districts.columns)
df_districts["Total Recoveries"] = 0
for idx in df_districts.index:
if df_districts["District Name"][idx] != "Total":
df_districts["Total Recoveries"][idx] = prev_df[prev_df["District"] == df_districts["District Name"][idx]]["cumulativeRecoveredNumberForDistrict"].item() + df_districts["New Recoveries"][idx].item()
else:
# print(df_districts["Total Recoveries"].sum())
df_districts["Total Recoveries"][idx] = df_districts["Total Recoveries"].sum()
col_dict = {"District Name":"District","Total Cases":"Confirmed","Total Recoveries":"Recovered","Total Deaths":"Deceased"}
df_districts.rename(columns=col_dict,inplace=True)
df_summary = df_districts
df_districts = df_districts[:-1]
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Meghalaya'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
df_summary = df_summary.iloc[-1,:]
print(df_districts)
return df_summary,df_districts
# def getPBData(file_path,date,StateCode):
# table = camelot.read_pdf(file_path,'1,4')
# if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
# os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
# table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# # table[5].to_excel('foo.xlsx')
# df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-4-table-1.csv'.format(date,StateCode))
# df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode),names=["Details","Numbers"])
# df_districts.columns = df_districts.columns.str.replace("\n","")
# col_dict = {"Total ConfirmedCases":"Confirmed","Total Cured":"Recovered","Deaths":"Deceased","Total Confirmed Cases":"Confirmed"}
# df_districts.rename(columns=col_dict,inplace=True)
# # print(df_districts)
# # a=b
# # df_districts.drop(columns=['S. No.','Total ActiveCases'],inplace=True)
# df_summary = df_districts
# df_districts = df_districts[:-1]
# # df_districts.drop(labels=[0,1],axis=0,inplace=True)
# # df = df[]
# df_json = pd.read_json("../DistrictMappingMaster.json")
# dist_map = df_json['Punjab'].to_dict()
# df_districts['District'].replace(dist_map,inplace=True)
# df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
# df_tests["Confirmed"] = df_districts["Confirmed"].astype(str).str.split("*").str[0].astype(int)
# if type(df_tests.loc[1,"Numbers"]) == str:
# df_summary["Tested"] = df_tests.loc[1,"Numbers"].split('*')[1]
# else:
# df_summary["Tested"] = df_tests.loc[1,"Numbers"]
# # df_districts["Tested"] = df_summary["Tested"]
# # print(df_districts)
# # a=b
# return df_summary,df_districts
def getPBData(file_path,date,StateCode):
table = camelot.read_pdf(file_path,'1,4')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-4-table-1.csv'.format(date,StateCode))
df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-1-table-1.csv'.format(date,StateCode),header=None)
# removing the header part in df tests
df_tests.drop(df_tests.head(1).index, inplace=True)
# new coulmn is added in new PDF format removing that column.
cols = [2]
df_tests.drop(df_tests.columns[cols], axis=1, inplace=True)
col_dict = {0:"S.No",1:"Description",3:"Numbers"}
df_tests.rename(columns=col_dict,inplace=True)
# print(df_tests)
df_districts.columns = df_districts.columns.str.replace("\n","")
# print(df_districts.columns)
col_dict = {"District":"District",
"Unnamed: 3":"Confirmed",
"Unnamed: 5":"Recovered",
"Unnamed: 7":"Deceased"}
df_districts.rename(columns=col_dict,inplace=True)
df_summary = df_districts
df_districts = df_districts[:-1]
df_districts = df_districts.dropna()
# removing unwanted columns ,these got added in new PDF format.
# df_districts.drop(columns=['confirmed cases from 1st April','Recovered from 1st April',
# 'Deceased from 1st April',"Total Active Cases"],axis=0,inplace=True)
# print(df_districts.columns)
df_json = pd.read_json("../DistrictMappingMaster.json")
dist_map = df_json['Punjab'].to_dict()
df_districts['District'].replace(dist_map,inplace=True)
# df_summary = df_summary[:-1]
df_summary = df_summary.iloc[-1,:]
df_tests["Confirmed"] = df_districts["Confirmed"].astype(str).str.split("*").str[0].astype(int)
df_summary["Tested"] = df_tests.loc[1,"Numbers"]
print(df_tests.loc[1,"Numbers"])
return df_summary,df_districts
# def getUKData(file_path,date,StateCode):
# table = camelot.read_pdf(file_path,'2')
# if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
# os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
# table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
# df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-2.csv'.format(date,StateCode))
# df_districts.columns = df_districts.columns.str.replace("\n","")
# df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
# df_tests.columns = df_tests.columns.str.replace("\n","")
# col_dict = {"Districts":"District","Cases till Date":"Confirmed","Treated/ Cured till Date":"Recovered","Deaths":"Deceased","Migrated/ Others":"Other","Cumulative Samples Tested":"Tested"}
# df_districts.rename(columns=col_dict,inplace=True)
# df_tests.rename(columns=col_dict,inplace=True)
# df_tests = df_tests[['District',"Tested"]]
# df_districts["Confirmed"] = df_districts["Confirmed"].astype(str).str.split("*").str[0].astype(int)
# df_districts["Recovered"] = df_districts["Recovered"].astype(str).str.split("*").str[0].astype(int)
# # df_districts['Recovered'] += df_districts['Migrated']
# # df_districts.drop(columns=['Active Cases','Migrated'],inplace=True)
# for col in df_districts.columns:
# df_districts[col] = df_districts[col].astype(str).str.replace("*","")
# df_summary = df_districts
# df_districts = df_districts[:-1]
# df_json = pd.read_json("../DistrictMappingMaster.json")
# dist_map = df_json['Uttarakhand'].to_dict()
# df_districts['District'].replace(dist_map,inplace=True)
# df_tests['District'].replace(dist_map,inplace=True)
# df_total = pd.merge(df_districts, df_tests, on='District', how='inner')
# # print(df_districts)
# # print(df_tests)
# print(df_total)
# # a=b
# df_summary = df_summary.iloc[-1,:] #testcode needs to be updated later
# df_summary["Tested"] = int(df_tests.iloc[-1,-1])
# df_total['notesForDistrict'] = df_total['Other'].astype(str) + " cases were recorded as Migrated / Others"
# df_summary['notesForState'] = df_summary['Other'] + " cases were recorded as Migrated / Others"
# return df_summary,df_total
def getUKData(file_path,date,StateCode):
try:
table = camelot.read_pdf(file_path,'2')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-2.csv'.format(date,StateCode))
# change district name from U.S. nagar to Udham Singh Nagar
index_of_USnagar= df_districts[df_districts['Districts'] == 'U.S. Nagar'].index[0]
df_districts.at[index_of_USnagar, 'Districts'] = 'Udham <NAME>'
df_districts.columns = df_districts.columns.str.replace("\n","")
df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-2-table-1.csv'.format(date,StateCode))
df_tests.columns = df_tests.columns.str.replace("\n","")
except FileNotFoundError:
table = camelot.read_pdf(file_path,'3')
if not os.path.isdir('../INPUT/{}/{}/'.format(date,StateCode)):
os.mkdir('../INPUT/{}/{}/'.format(date,StateCode))
table.export('../INPUT/{}/{}/foo.csv'.format(date,StateCode), f='csv')
df_districts = pd.read_csv('../INPUT/{}/{}/foo-page-3-table-2.csv'.format(date,StateCode))
# change district name from U.S. nagar to Udham Singh Nagar
index_of_USnagar= df_districts[df_districts['Districts'] == 'U.S. Nagar'].index[0]
df_districts.at[index_of_USnagar, 'Districts'] = 'Udham Singh Nagar'
df_districts.columns = df_districts.columns.str.replace("\n","")
df_tests = pd.read_csv('../INPUT/{}/{}/foo-page-3-table-1.csv'.format(date,StateCode))
df_tests.columns = df_tests.columns.str.replace("\n","")
index_of_USnagar= df_tests[df_tests['Districts'] == 'US Nagar'].index[0]
df_tests.at[index_of_USnagar, 'Districts'] = 'Udham Singh Nagar'
col_dict = {"Districts":"District","No. of Positive Cases Since 01.01.2022":"Confirmed",
"No. of Positive Cases Treated/ Cured Since 01.01.2022":"Recovered",
"Deaths Since 01.01.2022":"Deceased","Migrated Positive Cases Since 01.01.2022":"Other","Cumulative Samples Tested Since 01.01.2022":"Tested"}
# "Cumulative Samples Tested":"Tested"
df_districts.rename(columns=col_dict,inplace=True)
df_tests.rename(columns=col_dict,inplace=True)
df_districts["Confirmed"] = df_districts["Confirmed"].astype(str).str.split("*").str[0].astype(int)
df_districts["Recovered"] = df_districts["Recovered"].astype(str).str.split("*").str[0].astype(int)
updated_data_frame = df_districts
base_csv= '../RAWCSV/2021-12-31/UT_raw.csv'
df_base_csv = | pd.read_csv(base_csv) | pandas.read_csv |
# load libraries
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
import pandas as pd
raw_data = {'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze'],
'age': [42, 52, 36, 24, 73],
'city': ['San Francisco', 'Baltimore', 'Miami', 'Douglas', 'Boston']}
df = pd.DataFrame(raw_data, columns = ['first_name', 'last_name', 'age', 'city'])
# create a dumy variables for every unique cateogry in df.city
| pd.get_dummies(df['city']) | pandas.get_dummies |
import pandas as pd
#set the Quarter, suold be yearQn
Quarter = "2020Q1"
#read CSV file: quote and contract
quotes = | pd.read_csv("./all quotes.csv") | pandas.read_csv |
import argparse, pandas, os, random, seaborn, sys, re
import numpy as np
from unicodedata import name
from numpy import median
import matplotlib.pyplot as plt
names_to_translate = {
'gflop_per_s_per_iter': 'Throughput [Gflop/s]',
'gbyte_per_s_per_iter': 'Bandwidth [GB/s]',
'runtime_problem_sizes_dict': 'Problem Size',
# Add benchmark function names.
'copy_2d': 'Copy2D',
'transpose_2d': 'Transpose2D',
'row_reduction_2d': 'RowRed2D',
'column_reduction_2d': 'ColRed2D',
'matmul_kmkn': '$A^TB$',
'matmul_mkkn': '$AB$',
'matmul_mknk': '$AB^T$',
'conv_1d_nwc_wcf_main': 'Conv1D',
'conv_2d_nhwc_hwcf_main': 'Conv2D',
'depthwise_conv_2d_nhwc_hwc': 'DepthwiseConv2D',
'depthwise_conv_1d_nwc_wc_1_1': 'str.=[1],dil.=[1]',
'depthwise_conv_1d_nwc_wc_1_2': 'str.=[1],dil.=[2]',
'depthwise_conv_1d_nwc_wc_2_1': 'str.=[2],dil.=[1]',
'depthwise_conv_1d_nwc_wc_2_2': 'str.=[2],dil.=[2]',
'conv_1d_nwc_wcf_main_1_1': 'str.=[1],dil.=[1]',
'conv_1d_nwc_wcf_main_2_1': 'str.=[2],dil.=[1]',
'conv_1d_nwc_wcf_main_1_2': 'str.=[1],dil.=[2]',
'conv_1d_nwc_wcf_main_2_2': 'str.=[2],dil.=[2]',
'conv_2d_nhwc_hwcf_main_11_11': 'str.=[1,1],dil.=[1,1]',
'conv_2d_nhwc_hwcf_main_22_11': 'str.=[2,2],dil.=[1,1]',
'conv_2d_nhwc_hwcf_main_11_22': 'str.=[1,1],dil.=[2,2]',
'conv_2d_nhwc_hwcf_main_22_22': 'str.=[2,2],dil.=[2,2]',
}
def _parse_arguments() -> argparse.Namespace:
"""Plot argument parser.
"""
parser = argparse.ArgumentParser(description="Plot")
parser.add_argument(
"--inputs",
type=str,
required=True,
help=
"comma-separated list of input data filenames (e.g., --input input1,input2)\n"
+ "The data for multiple files is concatenated into a single graph.")
parser.add_argument("--output",
type=str,
required=True,
help="output plot filename (e.g., --output output)")
parser.add_argument("--plot_name",
type=str,
required=True,
help="plot name (e.g., --plot_name name)")
parser.add_argument("--print_available_benchmarks",
type=bool,
required=False,
help="print the existing list of benchmarks in the data")
parser.add_argument("--benchmarks_to_plot",
type=str,
required=False,
help="comma-separated names of benchmarks to plot",
default='all')
parser.add_argument("--sizes_to_plot",
type=str,
required=False,
help="semicolon-separated lost of problem sizes to plot "
"(e.g., --sizes_to_plot=\"m=32,n=48;m=90,n=32\")",
default='all')
parser.add_argument("--num_sizes_to_plot",
type=int,
required=False,
help="sample the given number of problem sizes to plot",
default=-1)
parser.add_argument("--metric_to_plot",
type=str,
required=True,
choices=["gflop_per_s_per_iter", "gbyte_per_s_per_iter"])
parser.add_argument("--group_by_strides_and_dilations",
type=bool,
required=False,
help="plot separate bars for strides and dilations")
###############################################################################
# Not used atm
###############################################################################
parser.add_argument("--peak_compute",
type=int,
nargs="?",
help="peak compute (e.g., --peak_compute 192)",
default=192)
parser.add_argument("--peak_bandwidth_hi",\
type=int,
nargs="?",
help="high peak bandwidth (e.g., --peak_bandwidth_hi 281)",
default=281)
parser.add_argument("--peak_bandwidth_lo",
type=int,
nargs="?",
help="low peak bandwidth (e.g., -peak_bandwidth_lo 281)",
default=281)
return parser.parse_args(sys.argv[1:])
def add_peak_lines(args, plot, key):
if key == 'gflop_per_s_per_iter':
plot.set(ylim=(0, args.peak_compute + 10))
plot.axhline(args.peak_compute,
label=f'Peak Compute ({args.peak_compute} GFlop/s)')
elif key == 'gbyte_per_s_per_iter':
plot.set(ylim=(0, args.peak_bandwidth_hi * 1.1))
plot.axhline(args.peak_bandwidth_hi,
label=f'Peak BW ({args.peak_bandwidth_hi} GB/s)')
if args.peak_bandwidth_lo != args.peak_bandwidth_hi:
plot.axhline(args.peak_bandwidth_lo,
label=f'Peak BW ({args.peak_bandwidth_lo} GB/s (low range))')
###############################################################################
# End Not used atm
###############################################################################
#### Tools to compute labels
def compress_problem_sizes_label(labels):
"""Shorten the problem size lables by removing redundant information.
Plotting the entire problem size configuration for every axis tick
requires a lot of space and results in overlapping labels. The method
identifies the dimensions that take different values and filters out
the dimensions that are constant for the entire plot. Additionally,
the dimension names (it suffices to plot them once) and sizes values
are returned seperately.
Example:
["H=64,W=32", "H=64,W=64"]
->
["W"], ["32", "64"]
"""
label_dicts = []
for label in labels:
groups = re.findall(r"""([a-zA-Z]+)=(\d+|\[[0-9, ]+\])""", label)
label_dicts.append(dict(groups))
# Collect all values for a specific key.
value_dict = {}
for label_dict in label_dicts:
for k, v in label_dict.items():
if k in value_dict:
value_dict[k].add(v)
else:
value_dict[k] = set([v])
# Collect the keys that have multiple values.
keys = []
for k, v in value_dict.items():
if len(v) != 1:
keys.append(k)
# Collect the keys for every label
new_labels = []
for label_dict in label_dicts:
new_labels.append(",".join([label_dict[k] for k in keys]))
return keys, new_labels
def get_strides_and_dilations(size):
match1 = re.search(r"""strides=\[([0-9, ]+)\]""", size)
match2 = re.search(r"""dilations=\[([0-9, ]+)\]""", size)
suffixes = []
if match1:
suffixes.append("".join([x.strip() for x in match1.group(1).split(",")]))
if match2:
suffixes.append("".join([x.strip() for x in match2.group(1).split(",")]))
if suffixes:
return "_" + "_".join([suffix.strip() for suffix in suffixes])
return ""
def remove_strides_and_dilations(size):
size = re.sub(r"""[,]*strides=\[[0-9, ]+\]""", "", size)
size = re.sub(r"""[,]*dilations=\[[0-9, ]+\]""", "", size)
return size
#### Tools to query benchmarks info from dataframe
def benchmark_key(data):
return data.keys()[0]
def get_unique_benchmarks(data):
return np.unique(data[benchmark_key(data)].values)
def print_available_benchmarks_and_exit(data, args):
print(get_unique_benchmarks(data))
exit()
def get_benchmarks_to_plot(data, args):
if args.benchmarks_to_plot != 'all':
specified_benchmarks = args.benchmarks_to_plot.split(',')
print(f'Specified benchmark filter: {specified_benchmarks}')
available_benchmarks = get_unique_benchmarks(data)
print(f'Available benchmarks in the data set: {available_benchmarks}')
return list(
filter(lambda x: x in available_benchmarks, specified_benchmarks))
return list(get_unique_benchmarks(data))
def get_translated_name(name):
return names_to_translate.get(name, name)
#### Tools to query problem_size info from dataframe
def problem_size_key(data):
return data.keys()[1]
def get_unique_sizes(data):
return np.unique(data[problem_size_key(data)].values)
def print_available_sizes_and_exit(data, args):
print(get_unique_sizes(data))
def get_sizes_to_plot(data, args):
if args.sizes_to_plot != 'all':
specified_sizes = args.sizes_to_plot.split(';')
print(f'Specified size filter: {specified_sizes}')
available_sizes = get_unique_sizes(data)
print(f'Available sizes in the data set: {available_sizes}')
return list(filter(lambda x: x in available_sizes, specified_sizes))
if args.num_sizes_to_plot <= 0:
return get_unique_sizes(data)
random.seed(42)
return random.sample(list(get_unique_sizes(data)), args.num_sizes_to_plot)
#### Start
def main():
args = _parse_arguments()
data = None
for file in args.inputs.split(','):
print(f'Processing {file}')
if not os.path.exists(file):
print(f'{file} does not exist')
return
read_data = | pandas.read_json(file) | pandas.read_json |
"""add comment in script explaining what its for
This is where the scripts to prepross the data go
save files in data/targets/
"""
import itertools
import json
import os
import sys
from datetime import datetime
import numpy as np
import pandas as pd
from google_drive_downloader import GoogleDriveDownloader as gdd
from autumn.settings import PROJECTS_PATH
from autumn.settings import INPUT_DATA_PATH
# shareable google drive links
PHL_doh_link = "1mt9SXyvEG78pQ3ZYPFjrPFciREbhDOxh" # sheet 05 daily report
PHL_fassster_link = "106ZJf_4rjuWQEORxEBT_HKsUrN12TEU2"
# destination folders filepaths
phl_inputs_dir = os.path.join(INPUT_DATA_PATH, "covid_phl")
PHL_doh_dest = os.path.join(phl_inputs_dir, "PHL_icu.csv")
PHL_fassster_dest = os.path.join(phl_inputs_dir, "PHL_ConfirmedCases.zip")
icu_dest = os.path.join(phl_inputs_dir, "PHL_icu_processed.csv")
deaths_dest = os.path.join(phl_inputs_dir, "PHL_deaths_processed.csv")
notifications_dest = os.path.join(phl_inputs_dir, "PHL_notifications_processed.csv")
# start date to calculate time since Dec 31, 2019
COVID_BASE_DATETIME = datetime(2019, 12, 31, 0, 0, 0)
def main():
fetch_phl_data()
fassster_filename = fassster_data_filepath()
# Process DoH data
working_df = pd.read_csv(PHL_doh_dest) # copy_davao_city_to_region(PHL_doh_dest)
working_df = rename_regions(
working_df,
"region",
"NATIONAL CAPITAL REGION (NCR)",
"REGION IV-A (CALABAR ZON)",
"REGION VII (CENTRAL VISAYAS)",
"REGION XI (DAVAO REGION)",
)
working_df = duplicate_data(working_df, "region")
working_df = filter_df_by_regions(working_df, "region")
process_icu_data(working_df)
# Now fassster data
working_df = pd.read_csv(fassster_filename) #copy_davao_city_to_region(fassster_filename)
working_df = rename_regions(working_df, "Region", "NCR", "4A", "07", "11")
working_df = duplicate_data(working_df, "Region")
working_df = filter_df_by_regions(working_df, "Region")
process_accumulated_death_data(working_df)
process_notifications_data(working_df)
update_calibration_phl()
remove_files(fassster_filename)
# function to fetch data
def fetch_phl_data():
gdd.download_file_from_google_drive(file_id=PHL_doh_link, dest_path=PHL_doh_dest)
gdd.download_file_from_google_drive(
file_id=PHL_fassster_link, dest_path=PHL_fassster_dest, unzip=True
)
def fassster_data_filepath():
fassster_filename = [
filename
for filename in os.listdir(phl_inputs_dir)
if filename.startswith("ConfirmedCases_Final_")
]
fassster_filename = os.path.join(phl_inputs_dir, fassster_filename[0])
return fassster_filename
def rename_regions(df: pd.DataFrame, regionSpelling, ncrName, calName, cenVisName, davName):
# df = pd.read_csv(filePath)
df[regionSpelling] = df[regionSpelling].replace(
{
ncrName: "manila",
calName: "calabarzon",
cenVisName: "central_visayas",
davName: "davao_region",
}
)
return df
def duplicate_data(df: pd.DataFrame, regionSpelling):
# df = pd.read_csv(filePath)
data_dup = df.copy()
data_dup[regionSpelling] = "philippines"
newdf = df.append(data_dup)
return newdf
def filter_df_by_regions(df: pd.DataFrame, regionSpelling):
regions = [
"calabarzon",
"central_visayas",
"manila",
"davao_city",
"davao_region",
"philippines",
]
df_regional = df[df[regionSpelling].isin(regions)]
return df_regional
def process_icu_data(df: pd.DataFrame):
df.loc[:, "reportdate"] = pd.to_datetime(df["reportdate"])
df["times"] = df.reportdate - COVID_BASE_DATETIME
df["times"] = df["times"] / np.timedelta64(1, "D")
icu_occ = df.groupby(["region", "times"], as_index=False).sum(min_count=1)[
["region", "times", "icu_o"]
]
icu_occ.to_csv(icu_dest)
def process_accumulated_death_data(df: pd.DataFrame):
fassster_data_deaths = df[df["Date_Died"].notna()]
fassster_data_deaths.loc[:, "Date_Died"] = pd.to_datetime(fassster_data_deaths["Date_Died"])
fassster_data_deaths.loc[:, "times"] = (
fassster_data_deaths.loc[:, "Date_Died"] - COVID_BASE_DATETIME
)
fassster_data_deaths["times"] = fassster_data_deaths["times"] / np.timedelta64(
1, "D"
) # warning
accum_deaths = fassster_data_deaths.groupby(["Region", "times"]).size()
accum_deaths = accum_deaths.to_frame(name="daily_deaths").reset_index()
accum_deaths["accum_deaths"] = accum_deaths.groupby("Region")["daily_deaths"].transform(
pd.Series.cumsum
)
cumulative_deaths = accum_deaths[["Region", "times", "accum_deaths"]]
cumulative_deaths.to_csv(deaths_dest)
def process_notifications_data(df: pd.DataFrame):
fassster_data_agg = df.groupby(["Region", "Report_Date"]).size()
fassster_data_agg = fassster_data_agg.to_frame(name="daily_notifications").reset_index()
fassster_data_agg["Report_Date"] = pd.to_datetime(fassster_data_agg["Report_Date"])
# make sure all dates within range are included
fassster_data_agg["times"] = fassster_data_agg.Report_Date - COVID_BASE_DATETIME
fassster_data_agg["times"] = fassster_data_agg["times"] / np.timedelta64(1, "D")
timeIndex = np.arange(
min(fassster_data_agg["times"]), max(fassster_data_agg["times"]), 1.0
).tolist()
regions = ["calabarzon", "central_visayas", "manila", "davao_city","davao_region", "philippines"]
all_regions_x_times = pd.DataFrame(
list(itertools.product(regions, timeIndex)), columns=["Region", "times"]
)
fassster_agg_complete = pd.merge(
fassster_data_agg, all_regions_x_times, on=["Region", "times"], how="outer"
)
fassster_agg_complete.loc[
fassster_agg_complete["daily_notifications"].isna() == True, "daily_notifications"
] = 0
# calculate a 7-day rolling window value
fassster_agg_complete = fassster_agg_complete.sort_values(
["Region", "times"], ascending=[True, True]
)
fassster_agg_complete["mean_daily_notifications"] = (
fassster_agg_complete.groupby("Region")
.rolling(7)["daily_notifications"]
.mean()
.reset_index(0, drop=True)
)
fassster_agg_complete["mean_daily_notifications"] = np.round(
fassster_agg_complete["mean_daily_notifications"]
)
fassster_data_final = fassster_agg_complete[fassster_agg_complete.times > 60]
fassster_data_final = fassster_data_final[
fassster_data_final.times < max(fassster_data_final.times)
]
fassster_data_final.to_csv(notifications_dest)
def update_calibration_phl():
phl_regions = ["calabarzon", "central_visayas", "manila", "davao_city","davao_region", "philippines"]
# read in csvs
icu = | pd.read_csv(icu_dest) | pandas.read_csv |
"""
json 불러와서 캡션 붙이는 것
"""
import json
import pandas as pd
path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json'
with open(path) as question:
question = json.load(question)
# question['questions'][0]
# question['questions'][1]
# question['questions'][2]
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
del df_addcap['file_path']
########################################################################################################################
"""
pandas to json
"""
df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table')
with open('./datasets/caption/train_cap2.json') as train_cap:
train_cap = json.load(train_cap)
########################################################################################################################
########################################################################################################################
"""
answer + cap
"""
path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json'
path = './datasets/vqa/v2_mscoco_val2014_annotations.json'
with open(path) as answer:
answer = json.load(answer)
answer['annotations'][0]
df_ans = pd.DataFrame(answer['annotations'])
df_ans[:0]
del df_ans['question_type']
del df_ans['answers']
del df_ans['answer_type']
del df_ans['image_id']
df_ans[df_ans['question_id']==458752000]
df_addcap2 = pd.merge(df_addcap, df_ans, how='left', on='question_id')
df_addcap2[:0]
df_addcap2['multiple_choice_answer']
# del df_addcap['file_path']
df_addcap2.to_json('./datasets/caption/val_qacap.json', orient='table')
with open('./datasets/caption/train_qacap.json') as train_qacap:
train_qacap = json.load(train_qacap)
########################################################################################################################
"""val test도 마찬가지"""
path = './datasets/vqa/v2_OpenEnded_mscoco_val2014_questions.json'
with open(path) as question:
question = json.load(question)
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
df_addcap[:0]
del df_addcap['file_path']
df_addcap.to_json('./datasets/caption/val_cap.json', orient='table')
#test
path = './datasets/vqa/v2_OpenEnded_mscoco_test-dev2015_questions.json'
with open(path) as question:
question = json.load(question)
df = pd.DataFrame(question['questions'])
df
df['image_id'] = df.image_id.astype(int)
caption_path = './datasets/caption/vis_st_test.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = | pd.DataFrame(cap) | pandas.DataFrame |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': | pd.StringDtype() | pandas.StringDtype |
# 导入相关库
import requests
import json
import time
import pandas as pd
# import fool
from PIL import Image,ImageSequence
import numpy as np
from wordcloud import WordCloud,ImageColorGenerator
import matplotlib.pyplot as plt
# 获取微博ID
def getWeibo_id():
content_parameter = [] # 用来存放weibo_id值
# 获取每条微博的id值
url = 'https://m.weibo.cn/api/container/getIndex?uid=1773294041&luicode=10000011&lfid=100103type%3D1%26q%3D%E7%8E%8B%E8%8F%8A&\featurecode=20000320&type=uid&value=1773294041&containerid=1076031773294041'
c_r = requests.get(url)
for i in range(2, 11):
c_parameter = (json.loads(c_r.text))['data']['cards'][i]['mblog']['id']
content_parameter.append(c_parameter)
return content_parameter
# 获取微博评论url
def getWeibo_comment_url(content_parameter):
comment_url = [] # 用来存放weibo_url
# 获取每条微博评论url
c_url_base = 'https://m.weibo.cn/api/comments/show?id='
for parameter in content_parameter:
# 101
for page in range(1, 3): # 提前知道每条微博只可抓取前100页评论
c_url = c_url_base + str(parameter) + '&page=' + str(page)
comment_url.append(c_url)
return comment_url
# 获取user_id和comment
def getWeibo_user_idAndComment(comment_url):
# 获取每个user_id和comment
user_id = [] # 用来存放user_id
comment = [] # 用来存放comment
for url in comment_url:
u_c_r = requests.get(url)
try:
for m in range(0, 9): # 提前知道每个url会包含9条用户信息
one_id = json.loads(u_c_r.text)["data"]["data"][m]["user"]["id"]
user_id.append(one_id)
one_comment = json.loads(u_c_r.text)["data"]["data"][m]["text"]
comment.append(one_comment)
except:
pass
return user_id,comment
# 获取用户containerid
def getUserContainerid(user_id):
containerid = []
user_base_url = "https://m.weibo.cn/api/container/getIndex?type=uid&value="
for id in set(user_id): # 需要对user_id去重
containerid_url = user_base_url + str(id)
try:
con_r = requests.get(containerid_url)
one_containerid = json.loads(con_r.text)["data"]['tabsInfo']['tabs'][0]["containerid"]
containerid.append(one_containerid)
except:
containerid.append(0)
return containerid
# 获取用户基本信息
def getUserInfo(user_id,containerid):
# 这里需要设置headers以及cookie模拟登陆
feature = [] # 存放用户基本信息
comment = [] # 评论内容
id_lose = [] # 存放请求失败id
user_agent = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"
headers = {"User-Agent": user_agent}
m = 1
for num in zip(user_id, containerid):
# url = "https://m.weibo.cn/api/container/getIndex?uid=" + str(
# num[0]) + "&luicode=10000011&lfid=100103type%3D1%26q%3D&featurecode=20000320&type=uid&value=" + str(
# num[0]) + "&containerid=" + str(num[1])
# https://m.weibo.cn/api/container/getIndex?containerid=2302831662434310_-_INFO&luicode=10000011&lfid=2302831662434310&featurecode=20000320
url = "https://m.weibo.cn/api/container/getIndex?containerid=" + str(num[1]) + "_-_INFO&luicode=10000011&lfid=2302831662434310&featurecode=20000320"
print(url)
try:
# r = requests.get(url, headers=headers, cookies=cookie)
r = requests.get(url, headers=headers, cookies=None)
feature.append([json.loads(r.text)["data"]["cards"][1]["card_group"][1]["item_content"], "999岁", '未知', json.loads(r.text)["data"]["cards"][1]["card_group"][2]["item_content"]])
# feature.append("999岁")
# feature.append('未知')
# feature.append(json.loads(r.text)["data"]["cards"][1]["card_group"][2]["item_content"])
comment.append()
print("成功第{}条".format(m))
m = m + 1
time.sleep(1) # 设置睡眠一秒钟,防止被封
except:
id_lose.append(num[0])
# 将featrue建立成DataFrame结构便于后续分析
print('=====',feature)
user_info = | pd.DataFrame(feature, columns=["性别", "年龄", "星座", "国家城市"]) | pandas.DataFrame |
import copy
import os
import pandas as pd
import numpy as np
import tempfile
import skimage.io as io
from toffy import rosetta
import toffy.rosetta_test_cases as test_cases
from ark.utils import test_utils
from ark.utils.load_utils import load_imgs_from_tree
from ark.utils.io_utils import list_folders, list_files
from toffy.rosetta import create_rosetta_matrices
import pytest
from pytest_cases import parametrize_with_cases
parametrize = pytest.mark.parametrize
def test_compensate_matrix_simple():
inputs = np.ones((2, 40, 40, 4))
# each channel is an increasing multiple of original
inputs[0, :, :, 1] *= 2
inputs[0, :, :, 2] *= 3
inputs[0, :, :, 3] *= 4
# second fov is 10x greater than first
inputs[1] = inputs[0] * 10
# define coefficient matrix; each channel has a 2x higher multiplier than previous
coeffs = np.array([[0.01, 0, 0, 0.02], [0.02, 0, 0, 0.040],
[0.04, 0, 0, 0.08], [0.08, 0, 0, 0.16]])
# calculate amount that should be removed from first channel
total_comp = (coeffs[0, 0] * inputs[0, 0, 0, 0] + coeffs[1, 0] * inputs[0, 0, 0, 1] +
coeffs[2, 0] * inputs[0, 0, 0, 2] + coeffs[3, 0] * inputs[0, 0, 0, 3])
out_indices = np.arange(inputs.shape[-1])
out = rosetta._compensate_matrix_simple(inputs, coeffs, out_indices)
# non-affected channels are identical
assert np.all(out[:, :, :, 1:-1] == inputs[:, :, :, 1:-1])
# first channel is changed by baseline amount
assert np.all(out[0, :, :, 0] == inputs[0, :, :, 0] - total_comp)
# first channel in second fov is changed by baseline amount * 10 due to fov multiplier
assert np.all(out[1, :, :, 0] == inputs[1, :, :, 0] - total_comp * 10)
# last channel is changed by baseline amount * 2 due to multiplier in coefficient matrix
assert np.all(out[0, :, :, -1] == inputs[0, :, :, -1] - total_comp * 2)
# last channel in second fov is changed by baseline * 2 * 10 due to fov and coefficient
assert np.all(out[1, :, :, -1] == inputs[1, :, :, -1] - total_comp * 10 * 2)
# don't generate output for first channel
out_indices = out_indices[1:]
out = rosetta._compensate_matrix_simple(inputs, coeffs, out_indices)
# non-affected channels are identical
assert np.all(out[:, :, :, :-1] == inputs[:, :, :, 1:-1])
# last channel is changed by baseline amount * 2 due to multiplier in coefficient matrix
assert np.all(out[0, :, :, -1] == inputs[0, :, :, -1] - total_comp * 2)
# last channel in second fov is changed by baseline * 2 * 10 due to fov and coefficient
assert np.all(out[1, :, :, -1] == inputs[1, :, :, -1] - total_comp * 10 * 2)
def test_validate_inputs():
with tempfile.TemporaryDirectory() as top_level_dir:
data_dir = os.path.join(top_level_dir, 'data_dir')
os.makedirs(data_dir)
# make fake data for testing
fovs, chans = test_utils.gen_fov_chan_names(num_fovs=3, num_chans=3)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
data_dir, fovs, chans, img_shape=(10, 10), fills=True)
# generate default, correct values for all parameters
masses = [71, 76, 101]
comp_mat_vals = np.random.rand(len(masses), len(masses)) / 100
comp_mat = pd.DataFrame(comp_mat_vals, columns=masses, index=masses)
acquired_masses = copy.copy(masses)
acquired_targets = copy.copy(chans)
input_masses = copy.copy(masses)
output_masses = copy.copy(masses)
all_masses = copy.copy(masses)
save_format = 'raw'
raw_data_sub_folder = ''
batch_size = 1
gaus_rad = 1
input_dict = {'raw_data_dir': data_dir, 'comp_mat': comp_mat,
'acquired_masses': acquired_masses, 'acquired_targets': acquired_targets,
'input_masses': input_masses,
'output_masses': output_masses, 'all_masses': all_masses, 'fovs': fovs,
'save_format': save_format, 'raw_data_sub_folder': raw_data_sub_folder,
'batch_size': batch_size, 'gaus_rad': gaus_rad}
# check that masses are sorted
input_dict_disorder = copy.copy(input_dict)
input_dict_disorder['acquired_masses'] = masses[1:] + masses[:1]
with pytest.raises(ValueError, match='Masses must be sorted'):
rosetta.validate_inputs(**input_dict_disorder)
# check that all masses are present
input_dict_missing = copy.copy(input_dict)
input_dict_missing['acquired_masses'] = masses[1:]
with pytest.raises(ValueError, match='acquired masses and list compensation masses'):
rosetta.validate_inputs(**input_dict_missing)
# check that images and channels are the same
input_dict_img_name = copy.copy(input_dict)
input_dict_img_name['acquired_targets'] = chans + ['chan15']
with pytest.raises(ValueError, match='given in list listed channels'):
rosetta.validate_inputs(**input_dict_img_name)
# check that input masses are valid
input_dict_input_mass = copy.copy(input_dict)
input_dict_input_mass['input_masses'] = masses + [17]
with pytest.raises(ValueError, match='list input masses'):
rosetta.validate_inputs(**input_dict_input_mass)
# check that output masses are valid
input_dict_output_mass = copy.copy(input_dict)
input_dict_output_mass['output_masses'] = masses + [17]
with pytest.raises(ValueError, match='list output masses'):
rosetta.validate_inputs(**input_dict_output_mass)
# check that comp_mat has no NAs
input_dict_na = copy.copy(input_dict)
comp_mat_na = copy.copy(comp_mat)
comp_mat_na.iloc[0, 2] = np.nan
input_dict_na['comp_mat'] = comp_mat_na
with pytest.raises(ValueError, match='no missing values'):
rosetta.validate_inputs(**input_dict_na)
# check that save_format is valid
input_dict_save_format = copy.copy(input_dict)
input_dict_save_format['save_format'] = 'bad'
with pytest.raises(ValueError, match='list save format'):
rosetta.validate_inputs(**input_dict_save_format)
# check that batch_size is valid
input_dict_batch_size = copy.copy(input_dict)
input_dict_batch_size['batch_size'] = 1.5
with pytest.raises(ValueError, match='batch_size parameter'):
rosetta.validate_inputs(**input_dict_batch_size)
# check that gaus_rad is valid
input_dict_gaus_rad = copy.copy(input_dict)
input_dict_gaus_rad['gaus_rad'] = -1
with pytest.raises(ValueError, match='gaus_rad parameter'):
rosetta.validate_inputs(**input_dict_gaus_rad)
def test_flat_field_correction():
input_img = np.random.rand(10, 10)
corrected_img = rosetta.flat_field_correction(img=input_img)
assert corrected_img.shape == input_img.shape
assert not np.array_equal(corrected_img, input_img)
def test_get_masses_from_channel_names():
targets = ['chan1', 'chan2', 'chan3']
masses = [1, 2, 3]
test_df = pd.DataFrame({'Target': targets,
'Mass': masses})
all_masses = rosetta.get_masses_from_channel_names(targets, test_df)
assert np.array_equal(masses, all_masses)
subset_masses = rosetta.get_masses_from_channel_names(targets[:2], test_df)
assert np.array_equal(masses[:2], subset_masses)
with pytest.raises(ValueError, match='channel names'):
rosetta.get_masses_from_channel_names(['chan4'], test_df)
@parametrize('output_masses', [None, [25, 50, 101], [25, 50]])
@parametrize('input_masses', [None, [25, 50, 101], [25, 50]])
@parametrize('gaus_rad', [0, 1, 2])
@parametrize('save_format', ['raw', 'normalized', 'both'])
@parametrize_with_cases('panel_info', cases=test_cases.CompensateImageDataPanel)
@parametrize_with_cases('comp_mat', cases=test_cases.CompensateImageDataMat)
def test_compensate_image_data(output_masses, input_masses, gaus_rad, save_format, panel_info,
comp_mat):
with tempfile.TemporaryDirectory() as top_level_dir:
data_dir = os.path.join(top_level_dir, 'data_dir')
output_dir = os.path.join(top_level_dir, 'output_dir')
os.makedirs(data_dir)
os.makedirs(output_dir)
# make fake data for testing
fovs, chans = test_utils.gen_fov_chan_names(num_fovs=3, num_chans=3)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
data_dir, fovs, chans, img_shape=(10, 10), fills=True)
# create compensation matrix
comp_mat_path = os.path.join(data_dir, 'comp_mat.csv')
comp_mat.to_csv(comp_mat_path)
# call function
rosetta.compensate_image_data(data_dir, output_dir, comp_mat_path, panel_info,
input_masses=input_masses, output_masses=output_masses,
save_format=save_format, gaus_rad=gaus_rad,
ffc_channels=['chan1'], correct_streaks=True,
streak_chan='chan1')
# all folders created
output_folders = list_folders(output_dir)
assert set(fovs) == set(output_folders)
# determine output directory structure
format_folders = ['raw', 'normalized']
if save_format in format_folders:
format_folders = [save_format]
for folder in format_folders:
# check that all files were created
output_files = list_files(os.path.join(output_dir, fovs[0], folder), '.tif')
output_files = [chan.split('.tif')[0] for chan in output_files]
if output_masses is None or len(output_masses) == 3:
assert set(output_files) == set(panel_info['Target'].values)
else:
assert set(output_files) == set(panel_info['Target'].values[:-1])
output_data = load_imgs_from_tree(data_dir=output_dir, img_sub_folder=folder)
assert np.issubdtype(output_data.dtype, np.floating)
# all channels are smaller than original
for i in range(output_data.shape[0]):
for j in range(output_data.shape[-1]):
assert np.sum(output_data.values[i, :, :, j]) <= \
np.sum(data_xr.values[i, :, :, j])
@parametrize('dir_num', [2, 3])
def test_create_tiled_comparison(dir_num):
with tempfile.TemporaryDirectory() as top_level_dir:
num_chans = 3
num_fovs = 4
output_dir = os.path.join(top_level_dir, 'output_dir')
os.makedirs(output_dir)
dir_names = ['input_dir_{}'.format(i) for i in range(dir_num)]
# create matching input directories
for input_dir in dir_names:
full_path = os.path.join(top_level_dir, input_dir)
os.makedirs(full_path)
fovs, chans = test_utils.gen_fov_chan_names(num_fovs=num_fovs, num_chans=num_chans)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
full_path, fovs, chans, img_shape=(10, 10), fills=True, sub_dir='normalized')
# pass full paths to function
paths = [os.path.join(top_level_dir, img_dir) for img_dir in dir_names]
rosetta.create_tiled_comparison(paths, output_dir)
# check that each tiled image was created
for i in range(num_chans):
chan_name = 'chan{}_comparison.tiff'.format(i)
chan_img = io.imread(os.path.join(output_dir, chan_name))
row_len = num_fovs * 10
col_len = dir_num * 10
assert chan_img.shape == (col_len, row_len)
# check that directories with different images are okay if overlapping channels specified
for i in range(num_fovs):
os.remove(os.path.join(top_level_dir, dir_names[1], 'fov{}'.format(i),
'normalized/chan0.tiff'))
# no error raised if subset directory is specified
rosetta.create_tiled_comparison(paths, output_dir, channels=['chan1', 'chan2'])
# but one is raised if no subset directory is specified
with pytest.raises(ValueError, match='1 of 1'):
rosetta.create_tiled_comparison(paths, output_dir)
def test_add_source_channel_to_tiled_image():
with tempfile.TemporaryDirectory() as top_level_dir:
num_fovs = 5
num_chans = 4
im_size = 10
# create directory containing raw images
raw_dir = os.path.join(top_level_dir, 'raw_dir')
os.makedirs(raw_dir)
fovs, chans = test_utils.gen_fov_chan_names(num_fovs=num_fovs, num_chans=num_chans)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
raw_dir, fovs, chans, img_shape=(im_size, im_size), fills=True)
# create directory containing stitched images
tiled_shape = (im_size * 3, im_size * num_fovs)
tiled_dir = os.path.join(top_level_dir, 'tiled_dir')
os.makedirs(tiled_dir)
for i in range(2):
vals = np.random.rand(im_size * 3 * im_size * num_fovs).reshape(tiled_shape)
io.imsave(os.path.join(tiled_dir, 'tiled_image_{}.tiff'.format(i)), vals)
output_dir = os.path.join(top_level_dir, 'output_dir')
os.makedirs(output_dir)
rosetta.add_source_channel_to_tiled_image(raw_img_dir=raw_dir, tiled_img_dir=tiled_dir,
output_dir=output_dir, source_channel='chan1')
# each image should now have an extra row added on top
tiled_images = list_files(output_dir)
for im_name in tiled_images:
image = io.imread(os.path.join(output_dir, im_name))
assert image.shape == (tiled_shape[0] + im_size, tiled_shape[1])
@parametrize('fovs', [None, ['fov1']])
@parametrize('replace', [True, False])
def test_replace_with_intensity_image(replace, fovs):
with tempfile.TemporaryDirectory() as top_level_dir:
# create directory containing raw images
run_dir = os.path.join(top_level_dir, 'run_dir')
os.makedirs(run_dir)
fov_names, chans = test_utils.gen_fov_chan_names(num_fovs=2, num_chans=2)
chans = [chan + '_intensity' for chan in chans]
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
run_dir, fov_names, chans, img_shape=(10, 10), fills=True,
sub_dir='intensities')
rosetta.replace_with_intensity_image(run_dir=run_dir, channel='chan1',
replace=replace, fovs=fovs)
# loop through all fovs to check that correct image was written
for current_fov in range(2):
if fovs is not None and current_fov == 0:
# this fov was skipped, no images should be present here
files = list_files(os.path.join(run_dir, 'fov0'))
assert len(files) == 0
else:
# ensure correct extension is present
if replace:
suffix = '.tiff'
else:
suffix = '_intensity.tiff'
file = os.path.join(run_dir, 'fov{}'.format(current_fov), 'chan1' + suffix)
assert os.path.exists(file)
def test_remove_sub_dirs():
with tempfile.TemporaryDirectory() as temp_dir:
fovs = ['fov1', 'fov2', 'fov3']
sub_dirs = ['sub1', 'sub2', 'sub3']
# make directory structure
for fov in fovs:
os.makedirs(os.path.join(temp_dir, fov))
for sub_dir in sub_dirs:
os.makedirs(os.path.join(temp_dir, fov, sub_dir))
rosetta.remove_sub_dirs(run_dir=temp_dir, sub_dirs=sub_dirs[1:], fovs=fovs[:-1])
# check that last fov has all sub_dirs, all other fovs have appropriate sub_dirs removed
for fov in fovs:
if fov == fovs[-1]:
expected_dirs = sub_dirs
else:
expected_dirs = sub_dirs[:1]
for sub_dir in sub_dirs:
if sub_dir in expected_dirs:
assert os.path.exists(os.path.join(temp_dir, fov, sub_dir))
else:
assert not os.path.exists(os.path.join(temp_dir, fov, sub_dir))
def test_create_rosetta_matrices():
with tempfile.TemporaryDirectory() as temp_dir:
# create baseline rosetta matrix
test_channels = [23, 71, 89, 113, 141, 142, 143]
base_matrix = np.random.randint(1, 50, size=[len(test_channels), len(test_channels)])
base_rosetta = pd.DataFrame(base_matrix, index=test_channels, columns=test_channels)
base_rosetta_path = os.path.join(temp_dir, 'rosetta_matrix.csv')
base_rosetta.to_csv(base_rosetta_path)
# validate output when all channels are included
multipliers = [0.5, 2, 4]
create_rosetta_matrices(base_rosetta_path, temp_dir, multipliers)
for multiplier in multipliers:
rosetta_path = os.path.join(temp_dir, 'rosetta_matrix_mult_%s.csv'
% (str(multiplier)))
# grabs output of create_rosetta_matrices
test_matrix = | pd.read_csv(rosetta_path, index_col=0) | pandas.read_csv |
"""This module contains the HydroMonitor object for reading groundwater
head measurements from a HydroMonitor csv export file
"""
from collections import OrderedDict
import warnings
import os.path
import errno
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import acequia as aq
class HydroMonitor:
"""Read and manage data from hydromonitor csv export file
Parameters:
---------
header : pandas dataframe, optional
header data from hydromonitor file as pandas dataframe
metadata : pandas dataframe, optional
metadata data from hydromonitor file as pandas dataframe
data : pandas dataframe, optional
measurements from hydromonitor file as pandas dataframe
Examples:
-------
Read hydromonitor csv export file:
>>>hm = HydroMonitor.from_csv(filepath=<path>)
Convert to list of GwSeries objects:
>>>mylist = hm.to_list()
Iterate over all series and return GwSeries objects one at a time:
>>>for i in range(len(hm)):
gw = next(hm)
Iterate over raw data and returnGwSeries objects:
>>>for (loc,fil),data in hm.iterdata():
gw = hm.get_series(data=data,loc=loc,fil=fil)
Save all series to json files in <filedir>:
>>>hm.to_json(<filedir>)
"""
CSVSEP = ";"
METATAG = 'StartDateTime' #;XCoordinate;YCoordinate;'
DATATAG = 'DateTime' #;LoggerHead;ManualHead;'
mapping_tubeprops = OrderedDict([
('startdate','startdatetime'),
('mplevel','welltoplevel'),
('filtop','filtertoplevel'),
('filbot','filterbottomlevel'),
('surfacedate',''),
('surfacelevel','surfacelevel'),
])
def __repr__(self):
return (f'{self.__class__.__name__} instance')
def __init__(self,header=None,metadata=None,data=None):
""" Initialize HydroMonitor object """
if isinstance(header, pd.Series):
self.header = header
else:
if not header:
self.header = DataFrame()
else:
message = 'header must be type None or type DataFrame'
raise TypeError(message)
if isinstance(metadata, pd.DataFrame):
self.metadata = metadata
else:
if not metadata:
self.metadata = DataFrame()
else:
message = 'meta must be type None or type DataFrame'
raise TypeError(message)
if isinstance(data, pd.DataFrame):
self.data = data
else:
if not data:
self.data = DataFrame()
else:
message = 'data must be type None or type DataFrame'
raise TypeError(message)
# create generator
data = self.delete_duplicate_data()
self.srgen = data.groupby(self.idkeys()).__iter__()
self.itercount = 0
@classmethod
def from_csv(cls,filepath):
"""
read menyanthes hydromonitor csv export file
parameters
----------
filepath : str
path to hydromonitor csv export file
returns
-------
HydroMonitor object
"""
header,metadata,data = cls._readcsv(cls,filepath)
return cls(header=header,metadata=metadata,data=data)
def _readcsv(self,filepath):
""" Read hydromonitor csv export file """
try:
self.filepath = filepath
textfile = open(self.filepath)
except IOError:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), filepath)
textfile = None
else:
self.header,self.line_numbers = self._read_header(self,textfile)
textfile.close()
content_list = [x.lower() for x in self.header['file_contents']]
if 'metadata' in content_list:
self.metadata = self._read_metadata(self)
else:
self.metadata = None
warnings.warn("Metadata not specified.")
if 'data' in content_list:
if self.metadata is None:
raise(f'Heads from {filepath} can not be read ',
f'because metadata are not available.')
self.data = self._read_data(self)
else:
self.data = None
warnings.warn("Data not specified.")
#finally:
return self.header,self.metadata,self.data
def _read_header(self,textfile):
"""
read header and linenumbers from hydromonitor export file
returns
-------
header : pandas dataframe
header items as pandas dataframe
filelines : tuple
line numbers
"""
metatag_found = False
datatag_found = False
header = OrderedDict()
for i,line in enumerate(textfile):
line = line.rstrip()
linelist = line.split(self.CSVSEP)
# read header tags
if line.startswith('Format Name'):
header['format_name']=linelist[1]
elif line.startswith('Format Version'):
header['format_version']=linelist[1]
elif line.startswith('Format Definition'):
header['format_definition']=linelist[1]
elif line.startswith('File Type'):
header['file_type']=linelist[1]
elif line.startswith('File Contents'):
values = list(filter(None, linelist[1:]))
header['file_contents']=values
elif line.startswith('Object Type'):
header['object_type']=linelist[1]
elif line.startswith('Object Identification'):
values = list(filter(None, linelist[1:]))
header['object_identification']=values
# read metadata line number and column names
elif self.METATAG in line:
metatag_found = True
self.metafirst = i+2
self.metacols = [x.lower() for x in linelist]
# read data line number column names
elif self.DATATAG in line:
datatag_found = True
self.datafirst = i+2
self.metalast = i-2
self.datacols = [x.lower() for x in linelist]
break # avoid iterating over lines after metadata
# warnings
if not metatag_found:
msg = f'Metadata header {self.METATAG} not found.'
warnings.warn(msg)
if not datatag_found:
msg = f'Data header {self.DATATAG} not found.'
warnings.warn(msg)
# return variables
self.header = Series(header)
self.line_numbers = (self.metafirst,self.metalast,self.datafirst)
return self.header,self.line_numbers
def _read_metadata(self):
""" read metadata from hydromonitor csv export file """
#Name;NITGCode;OLGACode;FilterNo;StartDateTime;XCoordinate;
#YCoordinate;SurfaceLevel;WellTopLevel;FilterTopLevel;
#FilterBottomLevel;WellBottomLevel;LoggerSerial;LoggerDepth;
#Comment;CommentBy;Organization;Status;
meta_nrows = self.metalast - self.metafirst + 1
dfmeta = pd.read_csv(
self.filepath,
sep=self.CSVSEP,
index_col=False,
header=None,
names=self.metacols,
skiprows=self.metafirst,
nrows=meta_nrows,
dtype=str,
encoding='latin-1',
)
# delete empty last column
if '' in list(dfmeta.columns):
dfmeta = dfmeta.drop([''], axis=1)
dfmeta['startdatetime'] = pd.to_datetime(dfmeta['startdatetime'],
format='%d-%m-%Y %H:%M',
errors='coerce')
numcols = ['xcoordinate','ycoordinate','surfacelevel',
'welltoplevel','filtertoplevel','filterbottomlevel',
'wellbottomlevel',]
for col in numcols:
dfmeta[col] = pd.to_numeric(dfmeta[col],errors='coerce')
return dfmeta
def _read_data(self):
""" read data from hydromonitor csv export file """
#read data
dfdata = pd.read_csv(
self.filepath,
sep=self.CSVSEP,
index_col=False,
header=None,
names=self.datacols,
skiprows=self.datafirst,
#parse_dates=['datetime'], # don't, this takes a lot of time
dtype=str,
encoding='latin-1',
)
if 'loggerhead' not in self.datacols:
# when no loggerhead is available, menyanthes only exports
# the column manualheads and the column loggerheads is simply missing
# this happens when loggerdata without manual control measurments
# are imported from another source; Menyanthes marks these
# measurements as manual heads.
pos = dfdata.columns.get_loc('datetime')+1
dfdata.insert(loc=pos,column='loggerhead',value=np.nan)
msg = f'Missing data column loggerhead added and filled with NaNs'
warnings.warn(msg)
if 'manualhead' not in self.datacols:
# this is a variation on the previous missing loggerhead issue
pos = len(dfdata.columns)
dfdata.insert(loc=pos,column='manualhead',value=np.nan)
#dfdata['manualhead'] = np.nan
msg = f'Missing data column loggerhead added and filled with NaNs'
warnings.warn(msg)
# delete empty last column
if '' in list(dfdata.columns):
dfdata = dfdata.drop([''], axis=1)
# delete repeating headers deep down list of data as a result of
# annoying bugs in menyanthes export module
dfdata = dfdata[dfdata.nitgcode!='NITGCode'].copy()
dfdata = dfdata[dfdata.nitgcode!='[String]'].copy()
# parsing these dates is very time consuming
dfdata['datetime'] = pd.to_datetime(dfdata['datetime'],
dayfirst=True,format='%d-%m-%Y %H:%M',errors='coerce')
dfdata['loggerhead'] = pd.to_numeric(dfdata['loggerhead'],
errors='coerce')
dfdata['manualhead'] = pd.to_numeric(dfdata['manualhead'],
errors='coerce')
return dfdata
def idkeys(self):
"""Return column names that give a unique identification of a
series """
# bug in hydromonitor export? id is allways nitgcode, filterno
# idkeys = ['nitgcode','filterno']
if len(self.header['object_identification'])>2:
message.warn('More than two object identification keys given')
return [x.lower() for x in self.header['object_identification']]
def delete_duplicate_data(self):
"""Remove duplicate data from data and return pd.DataFrame
Duplicates occur in groundwater head measurments in
hydromonitor export when loggervalue and manual control
measurement have the same timestamp."""
sortcols = self.idkeys() + ['datetime','manualhead']
data = self.data.sort_values(by=sortcols)
dropcols = self.idkeys() + ['datetime']
self.data_no_dups = data.drop_duplicates(subset=dropcols,
keep='first').copy()
return self.data_no_dups
def get_series(self,data=None,loc=None,fil=None):
"""Return GwSeries object from HydroMonitor object
Parameters
----------
data : pd.Dataframe
Table with groundwater head values
loc : str
Well location name
fil : str
Tube name
Returns
-------
GwSeries object
"""
gws = aq.GwSeries()
# select metadata for series
bool1 = self.metadata[self.idkeys()[0]]==loc
bool2 = self.metadata[self.idkeys()[1]]==fil
metadata = self.metadata[bool1&bool2]
firstindex = metadata.index[0]
# set tubeprops from hydro,onitor metadata
for prop in list(gws._tubeprops):
metakey = self.mapping_tubeprops[prop]
if len(metakey)>0:
gws._tubeprops[prop] = metadata[metakey].values
if prop not in self.mapping_tubeprops.keys():
warnings.warn(f"Unknown property {prop} in {type(gws)}")
# set locprops from hydro,onitor metadata
for prop in list(gws._locprops.index):
if prop=='locname':
gws._locprops[prop] = metadata.at[firstindex,self.idkeys()[0]]
if prop=='filname':
gws._locprops[prop] = metadata.at[firstindex,self.idkeys()[1]]
if prop=='alias':
if 'nitgcode' in self.idkeys():
alias_key = 'name'
else:
alias_key = 'nitgcode'
gws._locprops[prop] = metadata.at[firstindex,alias_key]
if prop=='xcr':
gws._locprops[prop] = metadata.at[firstindex,'xcoordinate']
if prop=='ycr':
gws._locprops[prop] = metadata.at[firstindex,'ycoordinate']
if prop=='height_datum':
gws._locprops[prop] = 'mnap'
if prop=='grid_reference':
gws._locprops[prop] = 'rd'
if prop not in ['locname','filname','alias','xcr','ycr',
'height_datum','grid_reference']:
warnings.warn(f"Unknown property {prop} in {type(gws)}")
# set gwseries
datetimes = data['datetime'].values
heads = np.where(
np.isnan(data['loggerhead'].values),
data['manualhead'].values,
data['loggerhead'].values)
heads = | Series(data=heads,index=datetimes) | pandas.Series |
import pandas as pd
import transformer.result.generator as generator
from transformer.result.result_config import ResultFormatterConfig, ResultFieldFormat
class AbstractResultFormatter:
def run(self, config:dict, frames: dict[str, pd.DataFrame]): pass
class DefaultArrayResultFormatter(AbstractResultFormatter):
def run(self, config: ResultFormatterConfig, frames: dict[str, pd.DataFrame]) -> list:
if not config.formats:
return self._map_default(frames)
else:
data = []
max_count = 0
for f in frames:
current_length = len(frames[f].index)
if current_length > max_count:
max_count = current_length
for key in config.formats:
d = self._map_segment(config.formats[key], frames)
if len(d.index) == 1:
# Multiple Content
data.append(pd.DataFrame({
key: d.to_dict('records') * max_count
}))
else:
data.append(pd.DataFrame({
key: d.to_dict('records')
}))
if len(data) == 1:
return data[0].to_dict('records')
else:
return | pd.concat(data, axis=1) | pandas.concat |
#!/usr/bin/env python3
"""Module to calculate reliability of samples of raw accelerometer files."""
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import argparse
import os
def main():
"""
Application entry point responsible for parsing command line requests
"""
parser = argparse.ArgumentParser(description='Process accelerometer data.')
parser.add_argument('input_file', metavar='file', type=str, nargs='+',
help='filename for csv accelerometer data')
# parse command line arguments
args = parser.parse_args()
for file in args.input_file:
reliability_score(file)
def reliability_score(input_file):
""" calculate reliability score based on input file
:param str input_file: CSV from provided dataset
:return: New file written to csv output naming convention and new png image of plot
:rtype: void
"""
sampling_rate=20 # Sample rate (Hz) for target device data
# save file name
base_input_name = os.path.splitext(input_file)[0]
# timestamp for filename
now = datetime.datetime.now()
timestamp = str(now.strftime("%Y%m%d_%H-%M-%S"))
df = | pd.read_csv(input_file) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
"""
Wrapper function to estimate rolling sum.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series sum over the past 'window' days.
"""
return df.rolling(window).sum()
def ts_prod(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).prod()
def sma(df, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series SMA over the past 'window' days.
"""
return df.rolling(window).mean()
def ema(df, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param df: a pandas DataFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = df.copy()
for i in range(1,len(df)):
result.iloc[i]= (m*df.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(df, n):
"""
Wrapper function to estimate WMA.
:param df: a pandas DataFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = pd.Series(0.9*np.flipud(np.arange(1,n+1)))
result = pd.Series(np.nan, index=df.index)
for i in range(n-1,len(df)):
result.iloc[i]= sum(df[i-n+1:i+1].reset_index(drop=True)*weights.reset_index(drop=True))
return result
def stddev(df, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).std()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The rank of the last value in the array.
"""
return rankdata(na)[-1]
def ts_rank(df, window=10):
"""
Wrapper function to estimate rolling rank.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series rank over the past window days.
"""
return df.rolling(window).apply(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in pd.rolling_apply
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(df, window=10):
"""
Wrapper function to estimate rolling product.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series product over the past 'window' days.
"""
return df.rolling(window).apply(rolling_prod)
def ts_min(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series min over the past 'window' days.
"""
return df.rolling(window).min()
def ts_max(df, window=10):
"""
Wrapper function to estimate rolling min.
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: a pandas DataFrame with the time-series max over the past 'window' days.
"""
return df.rolling(window).max()
def delta(df, period=1):
"""
Wrapper function to estimate difference.
:param df: a pandas DataFrame.
:param period: the difference grade.
:return: a pandas DataFrame with today’s value minus the value 'period' days ago.
"""
return df.diff(period)
def delay(df, period=1):
"""
Wrapper function to estimate lag.
:param df: a pandas DataFrame.
:param period: the lag grade.
:return: a pandas DataFrame with lagged time series
"""
return df.shift(period)
def rank(df):
"""
Cross sectional rank
:param df: a pandas DataFrame.
:return: a pandas DataFrame with rank along columns.
"""
#return df.rank(axis=1, pct=True)
return df.rank(pct=True)
def scale(df, k=1):
"""
Scaling time serie.
:param df: a pandas DataFrame.
:param k: scaling factor.
:return: a pandas DataFrame rescaled df such that sum(abs(df)) = k
"""
return df.mul(k).div(np.abs(df).sum())
def ts_argmax(df, window=10):
"""
Wrapper function to estimate which day ts_max(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmax) + 1
def ts_argmin(df, window=10):
"""
Wrapper function to estimate which day ts_min(df, window) occurred on
:param df: a pandas DataFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return df.rolling(window).apply(np.argmin) + 1
def decay_linear(df, period=10):
"""
Linear weighted moving average implementation.
:param df: a pandas DataFrame.
:param period: the LWMA period
:return: a pandas DataFrame with the LWMA.
"""
try:
df = df.to_frame() #Series is not supported for the calculations below.
except:
pass
# Clean data
if df.isnull().values.any():
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
df.fillna(value=0, inplace=True)
na_lwma = np.zeros_like(df)
na_lwma[:period, :] = df.iloc[:period, :]
na_series = df.values
divisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, df.shape[0]):
x = na_series[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return | pd.DataFrame(na_lwma, index=df.index, columns=['CLOSE']) | pandas.DataFrame |
import torch
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
import src.config as config
import src.model_utils as mutils
from src.dataset import CustomDataset
def predict(df, model, device, label_list, description_col=config.TEXT_COLUMN):
test_dataset = CustomDataset(
description=df[description_col].values
)
test_sampler = torch.utils.data.SequentialSampler(test_dataset)
test_data_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=config.VALID_BATCH_SIZE,
sampler=test_sampler,
num_workers=2
)
all_logits = None
model.eval()
tk0 = tqdm(test_data_loader, total=len(test_data_loader))
for step, batch in enumerate(tk0):
input_ids = batch['input_id']
input_mask= batch['input_mask']
segment_ids = batch['segment_id']
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits, _ = model(input_ids, segment_ids, input_mask)
logits = logits.sigmoid()
if all_logits is None:
all_logits = logits.detach().cpu().numpy()
else:
all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)
return pd.merge(df, pd.DataFrame(all_logits, columns=label_list), left_index=True, right_index=True)
def serve_inference(data, label_list: list, model: str = None, description_col=config.TEXT_COLUMN, thresh=None):
if isinstance(data, str):
df = | pd.read_csv(data) | pandas.read_csv |
from scipy import signal
import numpy as np
import pandas as pd
from scipy.signal import filtfilt, butter
import sympy as sp
import math
def interpolate(data):
print("STATUS: Filling NaNs")
unfixed = 0
for index in range(0, data.shape[0]):
amount_before = data.loc[index, "interval_data"].isnull().sum().sum()
if amount_before > 0:
# First we perform a linear fill
data.set_value(index, "interval_data",
data.loc[index, "interval_data"].astype(float).interpolate(
method='linear'))
# Then we cover our last tracks with a backward fill (forward fill not required)
data.set_value(index, "interval_data",
data.loc[index, "interval_data"].fillna(method='bfill'))
# Code to check if any NaN remain
amount_after = data.loc[index, "interval_data"].isnull().sum().sum()
if amount_after > 0:
unfixed += amount_after
print("STATUS: Completed filling NaNs")
print("There are {} unfixed NaN".format(unfixed))
return data
# TODO: check this code
# TODO: <NAME>
def reduce_noise(data, technique):
if technique == "butter":
for index in range(0, data.shape[0]):
interval_data = data.loc[index, "interval_data"]
b, a= butter(3, 0.3)
# Use filtfilt to apply the filter.
data.set_value(index, "interval_data", interval_data.apply(lambda x: filtfilt(b, a, x), axis=0))
elif technique == "gaussian":
for index in range(0, data.shape[0]):
filtered_interval_data = []
for x in range(12):
interval_data = data.loc[index, "interval_data"]
window = signal.general_gaussian(51, p=0.5, sig=4)
filtered = signal.fftconvolve(window, interval_data.iloc[:, x])
filtered = np.average(interval_data.iloc[:, x]) / np.average(filtered) * filtered
filtered_interval_data.append(filtered)
data.set_value(index, "interval_data", | pd.DataFrame(filtered_interval_data) | pandas.DataFrame |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@ | Substitution(name="rolling") | pandas.util._decorators.Substitution |
from __future__ import unicode_literals, division, print_function
import os
import unittest
import pandas as pd
import numpy as np
import warnings
from itertools import product
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
from sklearn.dummy import DummyRegressor, DummyClassifier
from matminer.utils.caching import _get_all_nearest_neighbors
from matminer.featurizers.base import BaseFeaturizer, MultipleFeaturizer, \
StackedFeaturizer
from matminer.featurizers.function import FunctionFeaturizer
from matminer.featurizers.structure import SiteStatsFingerprint
class SingleFeaturizer(BaseFeaturizer):
def feature_labels(self):
return ['y']
def featurize(self, x):
return [x + 1]
def citations(self):
return ["A"]
def implementors(self):
return ["Us"]
class SingleFeaturizerMultiArgs(SingleFeaturizer):
def featurize(self, *x):
return [x[0] + x[1]]
class MultipleFeatureFeaturizer(BaseFeaturizer):
def feature_labels(self):
return ['w', 'z']
def featurize(self, x):
return [x - 1, x + 2]
def citations(self):
return ["A"]
def implementors(self):
return ["Them"]
class MatrixFeaturizer(BaseFeaturizer):
def feature_labels(self):
return ['representation']
def featurize(self, *x):
return [np.eye(2, 2)]
def citations(self):
return ["C"]
def implementors(self):
return ["Everyone"]
class MultiArgs2(SingleFeaturizerMultiArgs):
def featurize(self, *x):
# Making a 2D array to test whether MutliFeaturizer
# can handle featurizers that have both 1D vectors with
# singleton dimensions (e.g., shape==(4,1)) and those
# without (e.g., shape==(4,))
return [super(MultiArgs2, self).featurize(*x)]
def feature_labels(self):
return ['y2']
class FittableFeaturizer(BaseFeaturizer):
"""
This test featurizer tests fitting qualities of BaseFeaturizer, including
refittability and different results based on different fits.
"""
def fit(self, X, y=None, **fit_kwargs):
self._features = ['a', 'b', 'c'][:len(X)]
return self
def featurize(self, x):
return [x + 3, x + 4, 2 * x][:len(self._features)]
def feature_labels(self):
return self._features
def citations(self):
return ["Q"]
def implementors(self):
return ["A competing research group"]
class MultiTypeFeaturizer(BaseFeaturizer):
"""A featurizer that returns multiple dtypes"""
def featurize(self, *x):
return ['a', 1]
def feature_labels(self):
return ['label', 'int_label']
class TestBaseClass(PymatgenTest):
def setUp(self):
self.single = SingleFeaturizer()
self.multi = MultipleFeatureFeaturizer()
self.matrix = MatrixFeaturizer()
self.multiargs = SingleFeaturizerMultiArgs()
self.fittable = FittableFeaturizer()
@staticmethod
def make_test_data():
return pd.DataFrame({'x': [1, 2, 3]})
def test_dataframe(self):
data = self.make_test_data()
data = self.single.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
data = self.multi.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['w'], [0, 1, 2])
self.assertArrayAlmostEqual(data['z'], [3, 4, 5])
def test_matrix(self):
"""Test the ability to add features that are matrices to a dataframe"""
data = self.make_test_data()
data = self.matrix.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(np.eye(2, 2), data['representation'][0])
def test_inplace(self):
data = self.make_test_data()
self.single.featurize_dataframe(data, 'x', inplace=False)
self.assertNotIn('y', data.columns)
self.single.featurize_dataframe(data, 'x', inplace=True)
self.assertIn('y', data)
def test_indices(self):
data = self.make_test_data()
data.index = [4, 6, 6]
data = self.single.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
def test_multiple(self):
# test iterating over both entries and featurizers
for iter_entries in [True, False]:
multi_f = MultipleFeaturizer([self.single, self.multi],
iterate_over_entries=iter_entries)
data = self.make_test_data()
self.assertArrayAlmostEqual([2, 0, 3], multi_f.featurize(1))
self.assertArrayEqual(['A'], multi_f.citations())
implementors = multi_f.implementors()
self.assertIn('Us', implementors)
self.assertIn('Them', implementors)
self.assertEqual(2, len(implementors))
# Ensure BaseFeaturizer operation without overriden featurize_dataframe
with warnings.catch_warnings(record=True) as w:
multi_f.featurize_dataframe(data, 'x')
self.assertEqual(len(w), 0)
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
self.assertArrayAlmostEqual(data['w'], [0, 1, 2])
self.assertArrayAlmostEqual(data['z'], [3, 4, 5])
f = MatrixFeaturizer()
multi_f = MultipleFeaturizer([self.single, self.multi, f])
data = self.make_test_data()
with warnings.catch_warnings(record=True) as w:
multi_f.featurize_dataframe(data, 'x')
self.assertEqual(len(w), 0)
self.assertArrayAlmostEqual(data['representation'][0],
[[1.0, 0.0], [0.0, 1.0]])
# Leaving this here fore now in case this issue crops up again.
# def test_multifeatures(self):
# multiargs2 = MultiArgs2()
#
# # test iterating over both entries and featurizers
# for iter_entries in [True, False]:
# # Make a test dataset with two input variables
# data = self.make_test_data()
# data['x2'] = [4, 5, 6]
#
# # Create featurizer
# multi_f = MultipleFeaturizer([self.multiargs, multiargs2],
# iterate_over_entries=iter_entries)
#
# # Test featurize with multiple arguments
# features = multi_f.featurize(0, 2)
# self.assertArrayAlmostEqual([2, 2], features)
#
# # Test dataframe
# data = multi_f.featurize_dataframe(data, ['x', 'x2'])
# self.assertEqual(['y', 'y2'], multi_f.feature_labels())
# self.assertArrayAlmostEqual([[5, 5], [7, 7], [9, 9]],
# data[['y', 'y2']])
def test_featurize_many(self):
# Single argument
s = self.single
s.set_n_jobs(2)
mat = s.featurize_many([1, 2, 3])
self.assertArrayAlmostEqual(mat, [[2], [3], [4]])
# Multi-argument
s = self.multiargs
s.set_n_jobs(2)
mat = s.featurize_many([[1, 4], [2, 5], [3, 6]])
self.assertArrayAlmostEqual(mat, [[5], [7], [9]])
def test_multiprocessing_df(self):
# Single argument
s = self.single
data = self.make_test_data()
s.set_n_jobs(2)
data = s.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
# Multi-argument
s = self.multiargs
data = self.make_test_data()
s.set_n_jobs(2)
data['x2'] = [4, 5, 6]
data = s.featurize_dataframe(data, ['x', 'x2'])
self.assertArrayAlmostEqual(data['y'], [5, 7, 9])
def test_fittable(self):
data = self.make_test_data()
ft = self.fittable
# Test fit and featurize separately
ft.fit(data['x'][:2])
ft.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['a'], [4, 5, 6])
self.assertRaises(Exception, data.__getattr__, 'c')
# Test fit + featurize methods on new fits
data = self.make_test_data()
transformed = ft.fit_transform([data['x'][1]])
self.assertArrayAlmostEqual(transformed[0], [5])
data = self.make_test_data()
ft.fit_featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['a'], [4, 5, 6])
self.assertArrayAlmostEqual(data['b'], [5, 6, 7])
self.assertArrayAlmostEqual(data['c'], [2, 4, 6])
def test_stacked_featurizer(self):
data = self.make_test_data()
data['y'] = [1, 2, 3]
# Test for a regressor
model = DummyRegressor()
model.fit(self.multi.featurize_many(data['x']), data['y'])
# Test the predictions
f = StackedFeaturizer(self.single, model)
self.assertEqual([2], f.featurize(data['x'][0]))
# Test the feature names
self.assertEqual(['prediction'], f.feature_labels())
f.name = 'ML'
self.assertEqual(['ML prediction'], f.feature_labels())
# Test classifier
model = DummyClassifier("prior")
data['y'] = [0, 0, 1]
model.fit(self.multi.featurize_many(data['x']), data['y'])
# Test the prediction
f.model = model
self.assertEqual([2. / 3], f.featurize(data['x'][0]))
# Test the feature labels
self.assertRaises(ValueError, f.feature_labels)
f.class_names = ['A', 'B']
self.assertEqual(['ML P(A)'], f.feature_labels())
# Test with three classes
data['y'] = [0, 2, 1]
model.fit(self.multi.featurize_many(data['x']), data['y'])
self.assertArrayAlmostEqual([1. / 3] * 2, f.featurize(data['x'][0]))
f.class_names = ['A', 'B', 'C']
self.assertEqual(['ML P(A)', 'ML P(B)'], f.feature_labels())
def test_multiindex_inplace(self):
df_1lvl = pd.DataFrame({'x': [1, 2, 3]})
df_2lvl = pd.DataFrame({'x': [1, 2, 3]})
df_2lvl.columns = pd.MultiIndex.from_product((["Custom"],
df_2lvl.columns.values))
df_3lvl = pd.DataFrame({'x': [1, 2, 3]})
df_3lvl.columns = pd.MultiIndex.from_product((["Custom"],
["Custom2"],
df_3lvl.columns.values))
# If input dataframe has flat column index
self.multi.featurize_dataframe(df_1lvl, 'x', multiindex=True)
self.assertEqual(df_1lvl[("Input Data", "x")].iloc[0], 1)
self.assertEqual(df_1lvl[("MultipleFeatureFeaturizer", "w")].iloc[0], 0)
# If input dataframe has 2-lvl column index
self.multi.featurize_dataframe(df_2lvl, ("Custom", 'x'),
multiindex=True)
self.assertEqual(df_2lvl[("Custom", "x")].iloc[0], 1)
self.assertEqual(df_2lvl[("MultipleFeatureFeaturizer", "w")].iloc[0], 0)
# If input dataframe has 2+ lvl column index
with self.assertRaises(IndexError):
self.multi.featurize_dataframe(df_3lvl, ("Custom", "Custom2", 'x'),
multiindex=True)
# Make sure error is thrown when input df is multiindexed, but multiindex not enabled
df_compoundkey = pd.DataFrame({'x': [1, 2, 3]})
df_compoundkey.columns = pd.MultiIndex.from_product((["CK"],
df_compoundkey.columns.values))
with self.assertRaises(ValueError):
self.multi.featurize_dataframe(df_compoundkey, ("CK", "x"))
def test_multiindex_return(self):
# For inplace=False, where the method of assigning keys is different
df_1lvl = pd.DataFrame({'x': [1, 2, 3]})
df_2lvl = pd.DataFrame({'x': [1, 2, 3]})
df_2lvl.columns = pd.MultiIndex.from_product((["Custom"],
df_2lvl.columns.values))
df_3lvl = pd.DataFrame({'x': [1, 2, 3]})
df_3lvl.columns = pd.MultiIndex.from_product((["Custom"],
["Custom2"],
df_3lvl.columns.values))
# If input dataframe has flat column index
df_1lvl = self.multi.featurize_dataframe(df_1lvl, 'x', inplace=False,
multiindex=True)
self.assertEqual(df_1lvl[("Input Data", "x")].iloc[0], 1)
self.assertEqual(df_1lvl[("MultipleFeatureFeaturizer", "w")].iloc[0], 0)
# If input dataframe has 2-lvl column index
df_2lvl = self.multi.featurize_dataframe(df_2lvl, ("Custom", 'x'),
inplace=False, multiindex=True)
self.assertEqual(df_2lvl[("Custom", "x")].iloc[0], 1)
self.assertEqual(df_2lvl[("MultipleFeatureFeaturizer", "w")].iloc[0], 0)
# If input dataframe has 2+ lvl column index
with self.assertRaises(IndexError):
_ = self.multi.featurize_dataframe(df_3lvl,
("Custom", "Custom2", 'x'),
inplace=False, multiindex=True)
def test_multiindex_in_multifeaturizer(self):
# Make sure multiplefeaturizer returns the correct sub-featurizer multiindex keys
# test both iteration over entries and featurizers
for iter_entries in [True, False]:
mf = MultipleFeaturizer([self.multi, self.single],
iterate_over_entries=iter_entries)
df_1lvl = | pd.DataFrame({'x': [1, 2, 3]}) | pandas.DataFrame |
#!/usr/bin/python3
# Functions to handle Input
#############################################################################################
def read_csv():
# simple function to read data from a file
data = pd.read_csv('out.csv', sep=';')
return data
def read_sacct():
# function to read the data directly from sacct
print('not implemented yet')
# Functions to handle string/value conversion
#############################################################################################
# function converts format (DD-)HH:MM:SS to seconds
def ave2sec(x):
if ( '-' in x ):
vals = x.split('-')
times = vals[1].split(':')
sec = 24*3600*int(vals[0])+3600*int(times[0])+60*int(times[1])+int(times[2])
else:
times = x.split(':')
sec = 3600*int(times[0])+60*int(times[1])+int(times[2])
return (sec)
def scalingref(x):
# returns reference scaling factor for MPI jobs based on 1.5 factor:
# doubling cores should make parformance x1.5 (or better)
if int(x) == 1:
ref = 1
else:
ref = np.power(1/1.5,np.log2(int(x)))
return ref
def rss2g(x):
return int(float(x[:-1]))/1024
def reqmem2g(x):
return int(float(x[:-2]))/1024
# Functions to handle DataFrames
#############################################################################################
def parse_df(data):
# convert multi-line DataFrame to more compact form for analysis
from datetime import datetime
data[['id','subid']] = data.JobID.str.split('_',1, expand=True)
data.drop(['subid'],axis=1, inplace=True)
df= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 21 11:14:57 2021
@author: carlos
"""
import pandas as pd
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from parse_abstracts import *
import seaborn as sns
import numpy as np
import json
import re
import nltk
from nltk.corpus import stopwords
import pingouin as pg
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
root = "/home/javier/git_repos/sepex_ontology/"
root = "/Users/carlos/documents/GitHub/sepex_ontology/"
def is_wj_here(year,lexicon):
print(f"Analyzing {year} SEPEX {lexicon}...")
# create "SEPEX lexicon" by loading abstracts and checking overlap with specific lexicon
abstracts = read_abstracts(year)
prevalence = search_lexicon(abstracts,lexicon)
prevalence_any = prevalence[prevalence[0] != 0]
masked_concepts = list(prevalence_any.index)
features = pd.DataFrame(columns=masked_concepts)
# load raw wj
paragraphs = json.load(open(root + "wj/william_james.json","r"))
count = 1
for pid,paragraph in paragraphs.items():
if len(paragraph) > 0:
words = processText(paragraph)
text = " ".join(words)
#print("Parsing paragraph %s, %s of %s" %(pid,count,len(paragraphs)))
# search for each cognitive atlas term, take a count
for concept in features.columns:
processed_concept = " ".join(processText(str(concept)))
features.loc[pid,concept] = len(re.findall(processed_concept,text))
#print("Found %s total concept occurrences for %s" %(features.loc[pid].sum(),pid))
count +=1
else:
#print("No text found for paragraph %s" %(pid))
features.loc[pid] = np.zeros(len(masked_concepts))
# Get concepts that overlap between WJ and SEPEX lexicon
prevalence = features.sum()
prevalence_any = prevalence.loc[~(prevalence==0)]
departures = prevalence.loc[(prevalence==0)]
overlap = (len(prevalence_any) / len(prevalence))*100
overlap_length = len(prevalence_any) # to compute weighted overlap
print(f"Proportion of {year} SEPEX {lexicon} in WJ's principles: {overlap}%.")
print(f"Prevalence of {year} SEPEX {lexicon} in WJ's principles: {overlap_length}.")
return overlap, overlap_length
sepex_editions = [2012, 2014, 2016, 2018, 2022]
lexicons = ["cognitive-atlas_disorders",
"cognitive-atlas_concepts",
"cognitive-atlas_tasks",
"NIF-GrossAnatomy_edited"]
overlap = pd.DataFrame(columns=lexicons)
overlap_weighted = pd.DataFrame(columns=lexicons)
for idx_sepex,sepex_year in enumerate(sepex_editions):
overlap_length = pd.DataFrame(columns=lexicons)
for idx_lexicon,lexicon in enumerate(lexicons):
overlap.loc[idx_sepex,lexicon], overlap_length.loc[0,lexicon] = is_wj_here(sepex_year, lexicon)
# normalize by corpus size
overlap_weighted.loc[idx_sepex,:] = np.array([(overlap_length.loc[0])]) / int(overlap_length.sum(axis=1))
''' PLOT WEIGHTED RESULTS and do ANOVA'''
# rename columns
overlap_weighted.columns = ['disorders', 'concepts', 'tasks', 'anatomy']
# average across editions
g = plt.figure()
sns.set_style("white")
sns.set_context("poster",font_scale=0.75,rc={"figure.figsize":(20, 20)})
g = sns.catplot(data=overlap_weighted,kind="bar")
g.despine(right=True)
g.set_axis_labels("", "weighted overlap with James")
g.figure.savefig(root + 'figures_nospanish/w_overlap_WJ_across_editions.png',dpi=600,bbox_inches="tight")
# transform to long for stats purposes
overlap_weighted_wide = overlap_weighted.copy()
overlap_weighted_wide["year"] = [2012, 2014, 2016, 2018, 2022]
overlap_weighted_wide = overlap_weighted_wide.apply(pd.to_numeric)
overlap_weighted_long = pd.melt(overlap_weighted_wide,
id_vars=['year'],
var_name = "lexicon",
value_name = "WPI")
# perform one-way anova
aov = pg.rm_anova(dv='WPI', within='lexicon',
subject='year', data=overlap_weighted_long, detailed=True,
effsize="np2", correction = True)
aov.round(3)
# and post hoc tests
post_hocs = pg.pairwise_ttests(dv='WPI',
within='lexicon',
subject='year',
data=overlap_weighted_long,
padjust='bonf',
effsize = 'cohen')
post_hocs.round(3)
g = plt.figure()
sns.set_style("white")
sns.set_context("notebook",font_scale=1.5,rc={"figure.figsize":(20, 20)})
g = sns.lineplot(data=overlap_weighted)
g.set_xticks(range(len(sepex_editions))) # <--- set the ticks first
g.set_xticklabels(sepex_editions)
g.set_ylabel("weighted prevalence")
g.set_xlabel("year")
g.get_legend().remove()
g.figure.savefig(root + 'figures_nospanish/w_WJ_prevalence.png',dpi=600,bbox_inches="tight")
''' PLOT RELATIVE RESULTS'''
# convert to percentage
labels = ['disorders', 'concepts', 'tasks', 'anatomy']
overlap_percentage = overlap_weighted.copy()
overlap_percentage[labels] = overlap_percentage[labels].div(overlap_percentage[labels].sum(axis=1), axis=0).multiply(100)
overlap_percentage['year']= ['2012','2014','2016','2018','2022'] # add year column
# transform to long format
overlap_percentage_long = pd.melt(overlap_percentage,
id_vars=['year'],
value_vars=labels,
var_name='lexicon',
value_name='percentage')
# plot stacked bar plot
g = plt.figure()
sns.set_style("white")
sns.set_context("notebook",font_scale=1.5,rc={"figure.figsize":(20, 20)})
ax = sns.histplot(overlap_percentage_long,
x='year', hue='lexicon', weights='percentage', multiple='stack', shrink=0.8)
legend = ax.get_legend()
legend.set_bbox_to_anchor((1, 1))
ax.set_ylabel(" relative overlap with James (%)")
ax.figure.savefig(root + 'figures_nospanish/w_WJ_prevalence_stacked.png',dpi=600,bbox_inches="tight")
# create "SEPEX lexicon" by loading abstracts and checking overlap with cogat
abstracts = read_abstracts(2018)
cogneuro_prevalence = search_lexicon(abstracts,"cognitive-atlas_concepts") # get prevalence of cogat concepts in sepex abstracts
cogneuro_prevalence_any = cogneuro_prevalence[cogneuro_prevalence[0] != 0]
masked_sepex_concepts = list(cogneuro_prevalence_any.index)
features_concepts = | pd.DataFrame(columns=masked_sepex_concepts) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime as dt
import pickle
import bz2
from .analyzer import summarize_returns
DATA_PATH = '../backtest/'
class Portfolio():
"""
Portfolio is the core class for event-driven backtesting. It conducts the
backtesting in the following order:
1. Initialization:
Set the capital base we invest and the securities we
want to trade.
2. Receive the price information with .receive_price():
Insert the new price information of each securities so that the
Portfolio class will calculated and updated the relevant status such
as the portfolio value and position weights.
3. Rebalance with .rebalance():
Depending on the signal, we can choose to change the position
on each securities.
4. Keep position with .keep_position():
If we don't rebalance the portfolio, we need to tell it to keep
current position at the end of the market.
Example
-------
see Vol_MA.ipynb, Vol_MA_test_robustness.ipynb
Parameters
----------
capital: numeric
capital base we put into the porfolio
inception: datetime.datetime
the time when we start backtesting
components: list of str
tikers of securities to trade, such as ['AAPL', 'MSFT', 'AMZN]
name: str
name of the portfolio
is_share_integer: boolean
If true, the shares of securities will be rounded to integers.
"""
def __init__(self, capital, inception, components,
name='portfolio', is_share_integer=False):
# -----------------------------------------------
# initialize parameters
# -----------------------------------------------
self.capital = capital # initial money invested
if isinstance(components, str):
components = [components] # should be list
self.components = components # equities in the portfolio
# self.commission_rate = commission_rate
self.inception = inception
self.component_prices = pd.DataFrame(columns=self.components)
self.name = name
self.is_share_integer = is_share_integer
# self.benchmark = benchmark
# -----------------------------------------------
# record portfolio status to series and dataFrames
# -----------------------------------------------
# temoprary values
self._nav = pd.Series(capital,index=[inception])
self._cash = pd.Series(capital,index=[inception])
self._security = pd.Series(0,index=[inception])
self._component_prices = pd.DataFrame(columns=self.components) # empty
self._shares = pd.DataFrame(0, index=[inception], columns=self.components)
self._positions = pd.DataFrame(0, index=[inception], columns=self.components)
self._weights = pd.DataFrame(0, index=[inception], columns=self.components)
self._share_changes = pd.DataFrame(columns=self.components) # empty
self._now = self.inception
self._max_nav = pd.Series(capital,index=[inception])
self._drawdown = pd.Series(0, index=[inception])
self._relative_drawdown = pd.Series(0, index=[inception])
# series
self.nav_open = pd.Series()
self.nav_close = pd.Series()
self.cash_open = pd.Series()
self.cash_close = pd.Series()
self.security_open = pd.Series()
self.security_close = pd.Series()
self.max_nav = pd.Series()
self.drawdown_open = pd.Series()
self.drawdown_close = pd.Series()
self.relative_drawdown_open = | pd.Series() | pandas.Series |
# -*- coding: UTF-8 -*-
'''
@author: Andrewzhj
@contact: <EMAIL>
@file: comment_words_cloud.py
@time: 10/16/18 3:53 PM
@desc: 提取评论数据,进行热词展示
@note:
'''
import jieba
from wordcloud import WordCloud, ImageColorGenerator
import pandas as pd
from pymongo import MongoClient
import numpy
from PIL import Image
import matplotlib.pyplot as plt
import os
'''
https://blog.csdn.net/qq_34777600/article/details/77460380
'''
conn = MongoClient('10.1.210.50', 27017)
ctrip_db = conn.ctrip
def word_cloud():
producer_list = ctrip_db.comment.find().distinct("producer")
print(producer_list)
for producer in producer_list:
comment_list = ctrip_db.comment.find({"producer": str(producer)})
words_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2018-2021 The Salish Sea NEMO Project and
# The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# may requires python version 3.5 or higher for recursive glob
"""Flexible functions for model evalution tasks
"""
import datetime as dt
import numpy as np
import netCDF4 as nc
import pandas as pd
import glob
from salishsea_tools import geo_tools, places
import gsw
import os
import pytz
import pickle
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import cmocean as cmo
import warnings
import re
import f90nml
import sys
import xarray as xr
# Check which Excel reader engine is available, if any, and set variable excelEngine
try:
import openpyxl
excelEngine='openpyxl'
except ImportError as iE:
try:
import xlrd
excelEngine='xlrd'
except ImportError as iE:
excelEngine=None
warnings.warn("Neither Python Excel module ('openpyxl','xlrd') found",UserWarning)
# :arg dict varmap: dictionary mapping names of data columns to variable names, string to string, model:data
def matchData(
data,
filemap,
fdict,
mod_start=None,
mod_end=None,
mod_nam_fmt='nowcast',
mod_basedir='/results/SalishSea/nowcast-green/',
mod_flen=1,
method='bin',
meshPath=None,
maskName='tmask',
wrapSearch=False,
fastSearch=False,
wrapTol=1,
e3tvar='e3t',
fid=None,
sdim=3,
quiet=False,
preIndexed=False
):
"""Given a discrete sample dataset, find match model output
note: only one grid mask is loaded so all model variables must be on same grid; defaults to tmask;
call multiple times for different grids (eg U,W)
:arg data: pandas dataframe containing data to compare to. Must include the following:
'dtUTC': column with UTC date and time
'Lat': decimal latitude
'Lon': decimal longitude
'Z': depth, positive NOT required if method='ferry' or sdim=2
:type :py:class:`pandas.DataFrame`
:arg dict filemap: dictionary mapping names of model variables to filetypes containing them
:arg dict fdict: dictionary mapping filetypes to their time resolution in hours
:arg mod_start: first date of time range to match
:type :py:class:`datetime.datetime`
:arg mod_end: end of date range to match (not included)
:type :py:class:`datetime.datetime`
:arg str mod_nam_fmt: naming format for model files. options are 'nowcast' or 'long'
'nowcast' example: 05may15/SalishSea_1h_20150505_20150505_ptrc_T.nc
'long' example: SalishSea_1h_20150206_20150804_ptrc_T_20150427-20150506.nc
'long' will recursively search subdirectories (to match Vicky's storage style)
:arg str mod_basedir: path to search for model files; defaults to nowcast-green
:arg int mod_flen: length of model files in days; defaults to 1, which is how nowcast data is stored
:arg str method: method to use for matching. options are:
'bin'- return model value from grid/time interval containing observation
'vvlBin' - same as 'bin' but consider tidal change in vertical grid
'vvlZ' - consider tidal change in vertical grid and interpolate in the vertical
'ferry' - match observations to top model layer
'vertNet' - match observations to mean over a vertical range defined by
Z_upper and Z_lower; first try will include entire cell containing end points
and use e3t_0 rather than time-varying e3t
:arg str meshPath: path to mesh file; defaults to None, in which case set to:
'/results/forcing/atmospheric/GEM2.5/operational/ops_y2015m01d01.nc' if maskName is 'ops'
'/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc' else (SalishSeaCast)
:arg str maskName: variable name for mask in mesh file (check code for consistency if not tmask)
for ops vars use 'ops'
:arg boolean wrapSearch: if True, use wrapper on find_closest_model_point that assumes
nearness of subsequent values
:arg int wrapTol: assumed search radius from previous grid point if wrapSearch=True
:arg str e3tvar: name of tgrid thicknesses variable; only for method=interpZe3t, which only works on t grid
:arg Dataset fid: optionally include name of a single dataset when looping is not necessary and all matches come from
a single file
:arg int sdim: optionally enter number of spatial dimensions (must be the same for all variables per call);
defaults to 3; use to match to 2d fields like ssh
:arg boolean quiet: if True, suppress non-critical warnings
:arg boolean preIndexed: set True if horizontal grid indices already in input dataframe; for
speed; not implemented with all options
"""
# define dictionaries of mesh lat and lon variables to use with different grids:
lonvar={'tmask':'nav_lon','umask':'glamu','vmask':'glamv','fmask':'glamf'}
latvar={'tmask':'nav_lat','umask':'gphiu','vmask':'gphiv','fmask':'gphif'}
# check that required columns are in dataframe:
if method == 'ferry' or sdim==2:
reqsubset=['dtUTC','Lat','Lon']
if preIndexed:
reqsubset=['dtUTC','i','j']
elif method == 'vertNet':
reqsubset=['dtUTC','Lat','Lon','Z_upper','Z_lower']
if preIndexed:
reqsubset=['dtUTC','i','j','Z_upper','Z_lower']
else:
reqsubset=['dtUTC','Lat','Lon','Z']
if preIndexed:
reqsubset=['dtUTC','i','j','k']
if not set(reqsubset) <= set(data.keys()):
raise Exception('{} missing from data'.format([el for el in set(reqsubset)-set(data.keys())],'%s'))
fkeysVar=list(filemap.keys()) # list of model variables to return
# don't load more files than necessary:
ftypes=list(fdict.keys())
for ikey in ftypes:
if ikey not in set(filemap.values()):
fdict.pop(ikey)
if len(set(filemap.values())-set(fdict.keys()))>0:
print('Error: file(s) missing from fdict:',set(filemap.values())-set(fdict.keys()))
ftypes=list(fdict.keys()) # list of filetypes to containing the desired model variables
# create inverted version of filemap dict mapping file types to the variables they contain
filemap_r=dict()
for ift in ftypes:
filemap_r[ift]=list()
for ikey in filemap:
filemap_r[filemap[ikey]].append(ikey)
# if mod_start and mod_end not provided, use min and max of data datetimes
if mod_start is None:
mod_start=np.min(data['dtUTC'])
print(mod_start)
if mod_end is None:
mod_end=np.max(data['dtUTC'])
print(mod_end)
# adjustments to data dataframe to avoid unnecessary calculations
data=data.loc[(data.dtUTC>=mod_start)&(data.dtUTC<mod_end)].copy(deep=True)
data=data.dropna(how='any',subset=reqsubset) #.dropna(how='all',subset=[*varmap.keys()])
if maskName=='ops':
# set default mesh file for ops data (atmos forcing)
if meshPath==None:
meshPath='/results/forcing/atmospheric/GEM2.5/operational/ops_y2015m01d01.nc'
# load lat, lon, and mask (all ones for ops - no land in sky)
with nc.Dataset(meshPath) as fmesh:
navlon=np.squeeze(np.copy(fmesh.variables['nav_lon'][:,:]-360))
navlat=np.squeeze(np.copy(fmesh.variables['nav_lat'][:,:]))
omask=np.expand_dims(np.ones(np.shape(navlon)),axis=(0,1))
nemops='GEM2.5'
else:
# set default mesh file for SalishSeaCast data
if meshPath==None:
meshPath='/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc'
# load lat lon and ocean mask
with nc.Dataset(meshPath) as fmesh:
omask=np.copy(fmesh.variables[maskName])
navlon=np.squeeze(np.copy(fmesh.variables[lonvar[maskName]][:,:]))
navlat=np.squeeze(np.copy(fmesh.variables[latvar[maskName]][:,:]))
if method == 'vertNet':
e3t0=np.squeeze(np.copy(fmesh.variables['e3t_0'][0,:,:,:]))
if maskName != 'tmask':
print('Warning: Using tmask thickness for variable on different grid')
nemops='NEMO'
# handle horizontal gridding as necessary; make sure data is in order of ascending time
if not preIndexed:
# find location of each obs on model grid and add to data as additional columns 'i' and 'j'
data=_gridHoriz(data,omask,navlon,navlat,wrapSearch,wrapTol,fastSearch, quiet=quiet,nemops=nemops)
data=data.sort_values(by=[ix for ix in ['dtUTC','Z','j','i'] if ix in reqsubset]) # preserve list order
else:
data=data.sort_values(by=[ix for ix in ['dtUTC','k','j','i'] if ix in reqsubset]) # preserve list order
data.reset_index(drop=True,inplace=True)
# set up columns to accept model values; prepend 'mod' to distinguish from obs names
for ivar in filemap.keys():
data['mod_'+ivar]=np.full(len(data),np.nan)
# create dictionary of dataframes of filename, start time, and end time for each file type
flist=dict()
for ift in ftypes:
flist[ift]=index_model_files(mod_start,mod_end,mod_basedir,mod_nam_fmt,mod_flen,ift,fdict[ift])
# call a function to carry out vertical matching based on specified method
if method == 'bin':
data = _binmatch(data,flist,ftypes,filemap_r,omask,maskName,sdim,preIndexed=preIndexed)
elif method == 'ferry':
print('data is matched to shallowest model level')
data = _ferrymatch(data,flist,ftypes,filemap_r,omask,fdict)
elif method == 'vvlZ':
data = _interpvvlZ(data,flist,ftypes,filemap,filemap_r,omask,fdict,e3tvar)
elif method == 'vvlBin':
data= _vvlBin(data,flist,ftypes,filemap,filemap_r,omask,fdict,e3tvar)
elif method == 'vertNet':
data = _vertNetmatch(data,flist,ftypes,filemap_r,omask,e3t0,maskName)
else:
print('option '+method+' not written yet')
return
data.reset_index(drop=True,inplace=True)
return data
def _gridHoriz(data,omask,navlon,navlat,wrapSearch,wrapTol,fastSearch=False, resetIndex=False,quiet=False,nemops='NEMO'):
""" this function finds the horizontal grid (i,j) indices for each model point and adds them
to the dataframe 'data' as additional columns
NOTE: points that are matched are dropped from the dataFrame; with quiet=False, the unmatched
lats and lons are printed
"""
lmask=-1*(omask[0,0,:,:]-1) # NEMO masks have ocean = 1, but the functions called below require land = 1
if wrapSearch:
# this speeds up the matching process for ferry data where there is a high likelihood each point
# is close to the point before it
jj,ii = geo_tools.closestPointArray(data['Lon'].values,data['Lat'].values,navlon,navlat,
tol2=wrapTol,land_mask = lmask)
data['j']=[-1 if np.isnan(mm) else int(mm) for mm in jj]
data['i']=[-1 if np.isnan(mm) else int(mm) for mm in ii]
elif fastSearch:
jjii = xr.open_dataset('~/MEOPAR/grid/grid_from_lat_lon_mask999.nc')
print (data['Lat'])
mylats = xr.DataArray(data['Lat'])
mylons = xr.DataArray(data['Lon'])
jj = jjii.jj.sel(lats=mylats, lons=mylons, method='nearest').values
ii = jjii.ii.sel(lats=mylats, lons=mylons, method='nearest').values
print (jj.shape, jj)
data['j'] = [-1 if mm==-999 else mm for mm in jj]
data['i'] = [-1 if mm==-999 else mm for mm in ii]
else:
data['j']=-1*np.ones((len(data))).astype(int)
data['i']=-1*np.ones((len(data))).astype(int)
for la,lo in np.unique(data.loc[:,['Lat','Lon']].values,axis=0):
try:
jj, ii = geo_tools.find_closest_model_point(lo, la, navlon,
navlat, grid=nemops,land_mask = lmask,checkTol=True)
except:
print('lo:',lo,'la:',la)
raise
if isinstance(jj,int):
data.loc[(data.Lat==la)&(data.Lon==lo),['j','i']]=jj,ii
else:
if not quiet:
print('(Lat,Lon)=',la,lo,' not matched to domain')
data.drop(data.loc[(data.i==-1)|(data.j==-1)].index, inplace=True)
if resetIndex==True:
data.reset_index(drop=True,inplace=True)
return data
def _vertNetmatch(data,flist,ftypes,filemap_r,gridmask,e3t0,maskName='tmask'):
""" basic vertical matching of model output to data
returns model value from model grid cell that would contain the observation point with
no interpolation; no consideration of the changing of grid thickenss with the tides (vvl)
strategy: loop through data, openening and closing model files as needed and storing model data
"""
if len(data)>5000:
pprint=True
lendat=len(data)
else:
pprint= False
# set up columns to hold indices for upper and lower end of range to average over
data['k_upper']=-1*np.ones((len(data))).astype(int)
data['k_lower']=-1*np.ones((len(data))).astype(int)
for ind, row in data.iterrows():
if (pprint==True and ind%5000==0):
print('progress: {}%'.format(ind/lendat*100))
if ind==0: # special case for start of loop; load first files
fid=dict()
fend=dict()
torig=dict()
for ift in ftypes:
fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist)
# handle NEMO files time reference
if 'time_centered' in fid[ftypes[0]].variables.keys():
torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_centered'].time_origin,'%Y-%m-%d %H:%M:%S')
else:
torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_counter'].time_origin,'%Y-%m-%d %H:%M:%S')
# loop through each file type to extract data from the appropriate time and location
for ift in ftypes:
if row['dtUTC']>=fend[ift]:
fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist)
# now read data
# find time index
try:
if 'time_centered_bounds' in fid[ift].variables.keys(): # no problem!
ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift])
else: # annoying!
hpf=(flist[ift]['t_n'][0]-flist[ift]['t_0'][0]).total_seconds()/3600 #hours per file
ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift],hpf=hpf)
except:
print(row['dtUTC'],ift,torig[ift])
tlist=fid[ift].variables['time_centered_bounds'][:,:]
for el in tlist:
print(el)
print((row['dtUTC']-torig[ift]).total_seconds())
print(tlist[-1,1])
raise
# find depth indices (assume they may be reversed)
z_l=max(row['Z_upper'],row['Z_lower'])
z_u=min(row['Z_upper'],row['Z_lower'])
if len(set(fid[ift].variables.keys()).intersection(set(('deptht_bounds','depthu_bounds','depthv_bounds'))))>0: # no problem!
ik_l=_getZInd_bin(z_l,fid[ift],maskName=maskName)
ik_u=_getZInd_bin(z_u,fid[ift],maskName=maskName)
else: # workaround for missing variable
ik_l=_getZInd_bin(z_l,fid[ift],boundsFlag=True,maskName=maskName)
ik_u=_getZInd_bin(z_u,fid[ift],boundsFlag=True,maskName=maskName)
# assign values for each var assoc with ift
if (not np.isnan(ik_l)) and (not np.isnan(ik_u)) and \
(gridmask[0,ik_u,row['j'],row['i']]==1):
data.loc[ind,['k_upper']]=int(ik_u)
data.loc[ind,['k_lower']]=int(ik_l)
for ivar in filemap_r[ift]:
var=fid[ift].variables[ivar][ih,ik_u:(ik_l+1),row['j'],row['i']]
e3t=e3t0[ik_u:(ik_l+1),row['j'],row['i']]
imask=gridmask[0,ik_u:(ik_l+1),row['j'],row['i']]
meanvar=np.sum(var*e3t*imask)/np.sum(e3t*imask)
data.loc[ind,['mod_'+ivar]]=meanvar
if gridmask[0,ik_l,row['j'],row['i']]==0:
print(f"Warning: lower limit is not an ocean value:",
f" i={row['i']}, j={row['j']}, k_upper={ik_u}, k_lower={ik_l},",
f"k_seafloor={np.sum(imask)}",
f"Lon={row['Lon']}, Lat={row['Lat']}, dtUTC={row['dtUTC']}")
else:
print(f"Warning: upper limit is not an ocean value:",
f" i={row['i']}, j={row['j']}, k_upper={ik_u},Lat={row['Lat']},",
f"Lon={row['Lon']},dtUTC={row['dtUTC']}")
return data
def _binmatch(data,flist,ftypes,filemap_r,gridmask,maskName='tmask',sdim=3,preIndexed=False):
""" basic vertical matching of model output to data
returns model value from model grid cell that would contain the observation point with
no interpolation; no consideration of the changing of grid thickenss with the tides (vvl)
strategy: loop through data, openening and closing model files as needed and storing model data
"""
if len(data)>5000:
pprint=True
lendat=len(data)
else:
pprint= False
if not preIndexed:
data['k']=-1*np.ones((len(data))).astype(int)
for ind, row in data.iterrows():
if (pprint==True and ind%5000==0):
print('progress: {}%'.format(ind/lendat*100))
if ind==0: # special case for start of loop; load first files
fid=dict()
fend=dict()
torig=dict()
for ift in ftypes:
fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist)
if ift=='ops': # specially handle time origin for ops forcing files
torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_counter'].time_origin,'%Y-%b-%d %H:%M:%S')
else: # handle NEMO files time reference
if 'time_centered' in fid[ftypes[0]].variables.keys():
torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_centered'].time_origin,'%Y-%m-%d %H:%M:%S')
else:
torig[ift]=dt.datetime.strptime(fid[ftypes[0]].variables['time_counter'].time_origin,'%Y-%m-%d %H:%M:%S')
# loop through each file type to extract data from the appropriate time and location
for ift in ftypes:
if row['dtUTC']>=fend[ift]:
fid,fend=_nextfile_bin(ift,row['dtUTC'],flist[ift],fid,fend,flist)
# now read data
# find time index
if ift=='ops': # special handling for ops atm forcing files
ih=_getTimeInd_bin_ops(row['dtUTC'],fid[ift],torig[ift])
else: # NEMO files
try:
if 'time_centered_bounds' in fid[ift].variables.keys(): # no problem!
ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift])
else: # annoying!
hpf=(flist[ift]['t_n'][0]-flist[ift]['t_0'][0]).total_seconds()/3600 #hours per file
ih=_getTimeInd_bin(row['dtUTC'],fid[ift],torig[ift],hpf=hpf)
except:
print('fend',fend)
print('flist[ift]',flist[ift]['paths'][0])
print(row['dtUTC'],ift,torig[ift])
tlist=fid[ift].variables['time_centered_bounds'][:,:]
for el in tlist:
print(el)
print((row['dtUTC']-torig[ift]).total_seconds())
print(tlist[-1,1])
raise
# find depth index if vars are 3d
if sdim==3:
if preIndexed:
ik=row['k']
# assign values for each var assoc with ift
if (not np.isnan(ik)) and (gridmask[0,ik,row['j'],row['i']]==1):
for ivar in filemap_r[ift]:
try:
data.loc[ind,['mod_'+ivar]]=fid[ift].variables[ivar][ih,ik,row['j'],row['i']]
except:
print(ind,ift,ih,ik,row['j'],row['i'])
raise
else:
if len(set(fid[ift].variables.keys()).intersection(set(('deptht_bounds','depthu_bounds','depthv_bounds'))))>0: # no problem!
ik=_getZInd_bin(row['Z'],fid[ift],maskName=maskName)
else: #workaround for missing variables in postprocessed files
ik=_getZInd_bin(row['Z'],fid[ift],boundsFlag=True,maskName=maskName)
# assign values for each var assoc with ift
if (not np.isnan(ik)) and (gridmask[0,ik,row['j'],row['i']]==1):
data.loc[ind,['k']]=int(ik)
for ivar in filemap_r[ift]:
data.loc[ind,['mod_'+ivar]]=fid[ift].variables[ivar][ih,ik,row['j'],row['i']]
elif sdim==2:
# assign values for each var assoc with ift
if (gridmask[0,0,row['j'],row['i']]==1):
for ivar in filemap_r[ift]:
data.loc[ind,['mod_'+ivar]]=fid[ift].variables[ivar][ih,row['j'],row['i']]
else:
raise('invalid sdim')
return data
def _vvlBin(data,flist,ftypes,filemap,filemap_r,tmask,fdict,e3tvar):
""" vertical matching of model output to data by bin method but considering vvl change in
grid thickness with tides
"""
data['k']=-1*np.ones((len(data))).astype(int)
ifte3t=filemap[e3tvar]
pere3t=fdict[ifte3t]
pers=np.unique([i for i in fdict.values()])
# reverse fdict
fdict_r=dict()
for iii in pers:
fdict_r[iii]=list()
for ikey in fdict:
fdict_r[fdict[ikey]].append(ikey)
# so far we have only allowed for 1 file duration for all input files, so all indices equivalent
# also, we are only dealing with data saved at same interval as e3t
test=fdict_r.copy()
test.pop(pere3t)
if len(test)>0: # loop through and print eliminated variables
print('Warning: variables excluded because save interval mismatched with e3t:')
for aa in test:
for bb in fdict_r[aa]:
print(filemap_r[bb])
data['indf'] = [int(flist[ifte3t].loc[(aa>=flist[ifte3t].t_0)&(aa<flist[ifte3t].t_n)].index[0])
for aa in data['dtUTC']]
t2=[flist[ifte3t].loc[aa,['t_0']].values[0] for aa in data['indf'].values]
data['ih']=[int(np.floor((aa-bb).total_seconds()/(pere3t*3600))) for aa,bb in zip(data['dtUTC'],t2)]
# now get appropriate e3t for each set of data points:
for indf,grp0 in data.groupby(['indf']):
with nc.Dataset(flist[ifte3t].loc[indf,['paths']].values[0]) as fe3t:
ff=dict()
for ift in fdict_r[pere3t]:
ff[ift]=nc.Dataset(flist[ift].loc[indf,['paths']].values[0])
for (ih,jj,ii),grp1 in grp0.groupby(['ih','j','i']):
e3t=fe3t.variables[e3tvar][ih,:,jj,ii][tmask[0,:,jj,ii]==1]
zl=np.zeros((len(e3t),2))
zl[1:,0]=np.cumsum(e3t[:-1])
zl[:,1]=np.cumsum(e3t)
ztar=grp1['Z'].values
for ift in fdict_r[pere3t]:
for iz, iind in zip(ztar,grp1.index):
ik=[iii for iii,hhh in enumerate(zl) if hhh[1]>iz][0] # return first index where latter endpoint is larger
# assign values for each var assoc with ift
if (not np.isnan(ik)) and (tmask[0,ik,jj,ii]==1):
data.loc[iind,['k']]=int(ik)
for ivar in filemap_r[ift]:
data.loc[iind,['mod_'+ivar]]=ff[ift].variables[ivar][ih,ik,jj,ii]
for ift in fdict_r[pere3t]:
ff[ift].close()
return data
def _interpvvlZ(data,flist,ftypes,filemap,filemap_r,tmask,fdict,e3tvar):
""" vertical interpolation of model output to observation depths considering vvl change in
grid thickness with tides
"""
ifte3t=filemap.pop(e3tvar)
pere3t=fdict.pop(ifte3t)
pers=np.unique([i for i in fdict.values()])
# reverse fdict
fdict_r=dict()
for iii in pers:
fdict_r[iii]=list()
for ikey in fdict:
fdict_r[fdict[ikey]].append(ikey)
# so far we have only allowed for 1 file duration for all input files, so all indices equivalent
# also, we are only dealing with data saved at same interval as e3t
test=fdict_r.copy()
test.pop(pere3t)
if len(test)>0: # loop through and print eliminated variables
print('Warning: variables excluded because save interval mismatched with e3t:')
for aa in test:
for bb in fdict_r[aa]:
print(filemap_r[bb])
data['indf'] = [int(flist[ifte3t].loc[(aa>=flist[ifte3t].t_0)&(aa<flist[ifte3t].t_n)].index[0])
for aa in data['dtUTC']]
t2=[flist[ifte3t].loc[aa,['t_0']].values[0] for aa in data['indf'].values]
data['ih']=[int(np.floor((aa-bb).total_seconds()/(pere3t*3600))) for aa,bb in zip(data['dtUTC'],t2)]
# now get appropriate e3t for each set of data points:
for indf,grp0 in data.groupby(['indf']):
with nc.Dataset(flist[ifte3t].loc[indf,['paths']].values[0]) as fe3t:
ff=dict()
for ift in fdict_r[pere3t]:
ff[ift]=nc.Dataset(flist[ift].loc[indf,['paths']].values[0])
for (ih,jj,ii),grp1 in grp0.groupby(['ih','j','i']):
e3t=fe3t.variables[e3tvar][ih,:,jj,ii][tmask[0,:,jj,ii]==1]
zs=np.cumsum(e3t)-.5*e3t
ztar=grp1['Z'].values
for ift in fdict_r[pere3t]:
for ivar in filemap_r[ift]:
vals=ff[ift].variables[ivar][ih,:,jj,ii][tmask[0,:,jj,ii]==1]
data.loc[grp1.index,['mod_'+ivar]]=np.where(ztar<np.sum(e3t),np.interp(ztar,zs,vals),np.nan)
for ift in fdict_r[pere3t]:
ff[ift].close()
return data
def _ferrymatch(data,flist,ftypes,filemap_r,gridmask,fdict):
""" matching of model output to top grid cells (for ferry underway measurements)
"""
# loop through data, openening and closing model files as needed and storing model data
# extract average of upper 3 model levels (approx 3 m)
# set file name and hour
if len(data)>5000:
pprint=True
lendat=len(data)
else:
pprint= False
for ift in ftypes:
data['indf_'+ift] = [int(flist[ift].loc[(aa>=flist[ift].t_0)&(aa<flist[ift].t_n)].index[0]) for aa in data['dtUTC']]
t2=[flist[ift].loc[aa,['t_0']].values[0] for aa in data['indf_'+ift].values]
data['ih_'+ift]=[int(np.floor((aa-bb).total_seconds()/(fdict[ift]*3600))) for aa,bb in zip(data['dtUTC'],t2)]
print('done index '+ift,dt.datetime.now())
indflast=-1
for ind, row in data.iterrows():
if (pprint==True and ind%np.round(lendat/10)==0):
print(ift,'progress: {}%'.format(ind/lendat*100))
if not row['indf_'+ift]==indflast:
if not indflast==-1:
fid.close()
fid=nc.Dataset(flist[ift].loc[row['indf_'+ift],['paths']].values[0])
indflast=row['indf_'+ift]
for ivar in filemap_r[ift]:
data.loc[ind,['mod_'+ivar]] = fid.variables[ivar][row['ih_'+ift], 0, row['j'], row['i']]
return data
def _nextfile_bin(ift,idt,ifind,fid,fend,flist): # to do: replace flist[ift] with ifind and get rid of flist argument
""" close last file and open the next one"""
if ift in fid.keys():
fid[ift].close()
frow=flist[ift].loc[(ifind.t_0<=idt)&(ifind.t_n>idt)]
#print('idt:',idt)
#print(frow)
#print('switched files: ',frow['paths'].values[0])
fid[ift]=nc.Dataset(frow['paths'].values[0])
fend[ift]=frow['t_n'].values[0]
return fid, fend
def _getTimeInd_bin(idt,ifid,torig,hpf=None):
""" find time index for SalishSeaCast output interval including observation time """
if 'time_centered_bounds' in ifid.variables.keys():
tlist=ifid.variables['time_centered_bounds'][:,:]
# return first index where latter endpoint is larger
ih=[iii for iii,hhh in enumerate(tlist) if hhh[1]>(idt-torig).total_seconds()][0]
else: # hacky fix because time_centered_bounds missing from post-processed daily files
nt=len(ifid.variables['time_counter'][:])
tlist=[ii+hpf/(nt*2)*3600 for ii in ifid.variables['time_counter'][:]]
ih=[iii for iii,hhh in enumerate(tlist) if hhh>(idt-torig).total_seconds()][0]
return ih
def _getTimeInd_bin_ops(idt,ifid,torig):
""" find time index for ops file"""
tlist=ifid.variables['time_counter'][:].data
tinterval=ifid.variables['time_counter'].time_step
#ih=[iii for iii,hhh in enumerate(tlist) if (hhh+tinterval/2)>(idt-torig).total_seconds()][0]
## NEMO is reading in files as if they were on the half hour so do the same:
# return first index where latter endpoint is larger
ih=[iii for iii,hhh in enumerate(tlist) if (hhh+tinterval)>(idt-torig).total_seconds()][0]
return ih
def _getZInd_bin(idt,ifid=None,boundsFlag=False,maskName='tmask'):
""" get vertical index of cell containing observation depth """
if boundsFlag==True:
if maskName=='tmask':
with nc.Dataset('/results/SalishSea/nowcast-green.201812/01jan16/SalishSea_1h_20160101_20160101_ptrc_T.nc') as ftemp:
tlist=ftemp.variables['deptht_bounds'][:,:]
elif maskName=='umask':
with nc.Dataset('/results/SalishSea/nowcast-green.201812/01jan16/SalishSea_1h_20160101_20160101_grid_U.nc') as ftemp:
tlist=ftemp.variables['depthu_bounds'][:,:]
elif maskName=='vmask':
with nc.Dataset('/results/SalishSea/nowcast-green.201812/01jan16/SalishSea_1h_20160101_20160101_grid_V.nc') as ftemp:
tlist=ftemp.variables['depthv_bounds'][:,:]
else:
raise('choice not coded')
else:
dboundvar={'tmask':'deptht_bounds','umask':'depthu_bounds','vmask':'depthv_bounds'}
tlist=ifid.variables[dboundvar[maskName]][:,:]
if idt<=np.max(tlist):
ih=[iii for iii,hhh in enumerate(tlist) if hhh[1]>idt][0] # return first index where latter endpoint is larger
else:
ih=np.nan
return ih
def index_model_files(start,end,basedir,nam_fmt,flen,ftype=None,tres=1):
"""
See inputs for matchData above.
outputs pandas dataframe containing columns 'paths','t_0', and 't_1'
where paths are all the model output files of a given type in the time interval (start,end)
with end not included
"""
if ftype not in ('ptrc_T','grid_T','grid_W','grid_U','grid_V','dia1_T','carp_T','None',None):
print('ftype={}, are you sure? (if yes, add to list)'.format(ftype))
if tres==24:
ftres='1d'
else:
ftres=str(int(tres))+'h'
ffmt='%Y%m%d'
dfmt='%d%b%y'
wfmt='y%Ym%md%d'
if nam_fmt=='nowcast':
stencil='{0}/SalishSea_'+ftres+'_{1}_{2}_'+ftype+'.nc'
elif nam_fmt=='long':
stencil='**/SalishSea_'+ftres+'*'+ftype+'_{1}-{2}.nc'
elif nam_fmt=='sockeye':
stencil=f'*/SalishSea_{ftres}*{ftype}_{{1}}-{{2}}.nc'
elif nam_fmt == 'optimum':
stencil = f'???????/SalishSea_{ftres}*{ftype}_{{1}}-{{2}}.nc'
elif nam_fmt=='wind':
stencil='ops_{3}.nc'
elif nam_fmt=='ops':
stencil='ops_{3}.nc'
elif nam_fmt=='gemlam':
stencil='gemlam_{3}.nc'
elif nam_fmt=='forcing': # use ftype as prefix
stencil=ftype+'_{3}.nc'
else:
raise Exception('nam_fmt '+nam_fmt+' is not defined')
#Note fix: to avoid errors if hour and second included with start and end time, strip them!
iits=dt.datetime(start.year,start.month,start.day)
iite=iits+dt.timedelta(days=(flen-1))
# check if start is a file start date and if not, try to identify the file including it
# (in case start date is in the middle of a multi-day file)
nday=0
while True:
try:
ipathstr=os.path.join(basedir,stencil.format(iits.strftime(dfmt).lower(),
iits.strftime(ffmt),iite.strftime(ffmt),iits.strftime(wfmt)))
iifstr=glob.glob(ipathstr,recursive=True)[0]
if nday>0:
print('first file starts on ',iits)
break # file has been found
except IndexError:
nday=nday+1
if nday==flen:
iits_str=iits.strftime('%Y %b %d')
start_str=start.strftime('%Y %b %d')
if flen==1:
exc_msg= (f'\nFile not found:\n{ipathstr}\n'
f'Check that results directory is accessible and the start date entered is included in the run. \n')
else:
exc_msg= (f'\nDays per output file is set to {flen}. \n'
f'No file found with start date in range {iits_str} to {start_str} \n'
f'of form {ipathstr}\n'
f'Check that results directory is accessible and the start date entered is included in run. \n')
raise Exception(exc_msg) # file has not been found
iits=start-dt.timedelta(days=nday)
iite=iits+dt.timedelta(days=(flen-1))
ind=0
inds=list()
paths=list()
t_0=list()
t_n=list()
while iits<end:
iite=iits+dt.timedelta(days=(flen-1))
iitn=iits+dt.timedelta(days=flen)
try:
iifstr=glob.glob(os.path.join(basedir,stencil.format(iits.strftime(dfmt).lower(),
iits.strftime(ffmt),iite.strftime(ffmt),iits.strftime(wfmt))),recursive=True)[0]
except IndexError:
raise Exception('file does not exist: '+os.path.join(basedir,stencil.format(iits.strftime(dfmt).lower(),
iits.strftime(ffmt),iite.strftime(ffmt),iits.strftime(wfmt))))
inds.append(ind)
paths.append(iifstr)
t_0.append(iits)
t_n.append(iitn)
iits=iitn
ind=ind+1
return pd.DataFrame(data=np.swapaxes([paths,t_0,t_n],0,1),index=inds,columns=['paths','t_0','t_n'])
def index_model_files_flex(basedir,ftype,freq='1d',nam_fmt='nowcast',start=None,end=None):
"""
See inputs for matchData above.
outputs pandas dataframe containing columns 'paths','t_0', and 't_1'
Requires file naming convention with start date and end date as YYYYMMDD_YYYYMMDD
lists all files of a particular filetype and output frequency from a given results file structure
useful if there are missing files
If start and end are provided, date start is included but end is not.
"""
paths=glob.glob(os.path.join(basedir,'???????','*'+ftype+'*')) # assume if there are subdirectories, they must have nowcast yymmmdd format
if len(paths)==0: # in case of no subdirectories
paths=glob.glob(os.path.join(basedir,'*'+ftype+'*'))
paths=[el for el in paths if re.search(freq,el)] # restrict to files with desired output frequency
t_0=list()
t_n=list()
for ifl in paths:
if nam_fmt=='nowcast':
dates=re.findall('\d{8}',re.search('\d{8}_\d{8}',ifl)[0])
elif nam_fmt=='long':
dates=re.findall('\d{8}',re.search('\d{8}-\d{8}',ifl)[0])
else:
raise Exception('option not implemented: nam_fmt=',nam_fmt)
t_0.append(dt.datetime.strptime(dates[0],'%Y%m%d'))
t_n.append(dt.datetime.strptime(dates[1],'%Y%m%d')+dt.timedelta(days=1))
idf=pd.DataFrame(data=np.swapaxes([paths,t_0,t_n],0,1),index=range(0,len(paths)),columns=['paths','t_0','t_n'])
if start is not None and end is not None:
ilocs=(idf['t_n']>start)&(idf['t_0']<end)
idf=idf.loc[ilocs,:].copy(deep=True)
idf=idf.sort_values(['t_0']).reset_index(drop=True)
return idf
def loadDFOCTD(basedir='/ocean/shared/SalishSeaCastData/DFO/CTD/', dbname='DFO_CTD.sqlite',
datelims=()):
"""
load DFO CTD data stored in SQLite database (exclude most points outside Salish Sea)
basedir is location of database
dbname is database name
datelims, if provided, loads only data between first and second datetime in tuple
"""
try:
from sqlalchemy import create_engine, case
from sqlalchemy.orm import create_session
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.sql import and_, or_, not_, func
except ImportError:
raise ImportError('You need to install sqlalchemy in your environment to use this function.')
# definitions
# if db does not exist, exit
if not os.path.isfile(os.path.join(basedir, dbname)):
raise Exception(f'ERROR: {dbname} does not exist in {basedir}')
engine = create_engine('sqlite:///' + basedir + dbname, echo = False)
Base = automap_base()
# reflect the tables in salish.sqlite:
Base.prepare(engine, reflect=True)
# mapped classes have been created
# existing tables:
StationTBL=Base.classes.StationTBL
ObsTBL=Base.classes.ObsTBL
CalcsTBL=Base.classes.CalcsTBL
session = create_session(bind = engine, autocommit = False, autoflush = True)
SA=case([(CalcsTBL.Salinity_T0_C0_SA!=None, CalcsTBL.Salinity_T0_C0_SA)], else_=
case([(CalcsTBL.Salinity_T1_C1_SA!=None, CalcsTBL.Salinity_T1_C1_SA)], else_=
case([(CalcsTBL.Salinity_SA!=None, CalcsTBL.Salinity_SA)], else_= None)))
CT=case([(CalcsTBL.Temperature_Primary_CT!=None, CalcsTBL.Temperature_Primary_CT)], else_=
case([(CalcsTBL.Temperature_Secondary_CT!=None, CalcsTBL.Temperature_Secondary_CT)], else_=CalcsTBL.Temperature_CT))
ZD=case([(ObsTBL.Depth!=None,ObsTBL.Depth)], else_= CalcsTBL.Z)
FL=case([(ObsTBL.Fluorescence_URU_Seapoint!=None,ObsTBL.Fluorescence_URU_Seapoint)], else_= ObsTBL.Fluorescence_URU_Wetlabs)
if len(datelims)<2:
qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'),
StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'),
StationTBL.Lat,StationTBL.Lon,ZD.label('Z'),SA.label('SA'),CT.label('CT'),FL.label('Fluor'),
ObsTBL.Oxygen_Dissolved_SBE.label('DO_mLL'),ObsTBL.Oxygen_Dissolved_SBE_1.label('DO_umolkg')).\
select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\
join(CalcsTBL,CalcsTBL.ObsTBLID==ObsTBL.ID).filter(and_(StationTBL.Lat>47-3/2.5*(StationTBL.Lon+123.5),
StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121),
StationTBL.Include==True,ObsTBL.Include==True,CalcsTBL.Include==True))
else:
start_y=datelims[0].year
start_m=datelims[0].month
start_d=datelims[0].day
end_y=datelims[1].year
end_m=datelims[1].month
end_d=datelims[1].day
qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'),
StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'),
StationTBL.Lat,StationTBL.Lon,ZD.label('Z'),SA.label('SA'),CT.label('CT'),FL.label('Fluor')).\
select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\
join(CalcsTBL,CalcsTBL.ObsTBLID==ObsTBL.ID).filter(and_(or_(StationTBL.StartYear>start_y,
and_(StationTBL.StartYear==start_y, StationTBL.StartMonth>start_m),
and_(StationTBL.StartYear==start_y, StationTBL.StartMonth==start_m, StationTBL.StartDay>=start_d)),
or_(StationTBL.StartYear<end_y,
and_(StationTBL.StartYear==end_y,StationTBL.StartMonth<end_m),
and_(StationTBL.StartYear==end_y,StationTBL.StartMonth==end_m, StationTBL.StartDay<end_d)),
StationTBL.Lat>47-3/2.5*(StationTBL.Lon+123.5),
StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121),
StationTBL.Include==True,ObsTBL.Include==True,CalcsTBL.Include==True))
df1=pd.read_sql_query(qry.statement, engine)
df1['dtUTC']=[dt.datetime(int(y),int(m),int(d))+dt.timedelta(hours=h) for y,m,d,h in zip(df1['Year'],df1['Month'],df1['Day'],df1['Hour'])]
session.close()
engine.dispose()
return df1
def loadDFO(basedir='/ocean/eolson/MEOPAR/obs/DFOOPDB/', dbname='DFO_OcProfDB.sqlite',
datelims=(),excludeSaanich=True):
"""
load DFO data stored in SQLite database
basedir is location of database
dbname is database name
datelims, if provided, loads only data between first and second datetime in tuple
"""
try:
from sqlalchemy import create_engine, case
from sqlalchemy.orm import create_session
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.sql import and_, or_, not_, func
except ImportError:
raise ImportError('You need to install sqlalchemy in your environment to use this function.')
# definitions
# if db does not exist, exit
if not os.path.isfile(os.path.join(basedir, dbname)):
raise Exception('ERROR: {}.sqlite does not exist'.format(dbname))
engine = create_engine('sqlite:///' + basedir + dbname, echo = False)
Base = automap_base()
# reflect the tables in salish.sqlite:
Base.prepare(engine, reflect=True)
# mapped classes have been created
# existing tables:
StationTBL=Base.classes.StationTBL
ObsTBL=Base.classes.ObsTBL
CalcsTBL=Base.classes.CalcsTBL
session = create_session(bind = engine, autocommit = False, autoflush = True)
SA=case([(CalcsTBL.Salinity_Bottle_SA!=None, CalcsTBL.Salinity_Bottle_SA)], else_=
case([(CalcsTBL.Salinity_T0_C0_SA!=None, CalcsTBL.Salinity_T0_C0_SA)], else_=
case([(CalcsTBL.Salinity_T1_C1_SA!=None, CalcsTBL.Salinity_T1_C1_SA)], else_=
case([(CalcsTBL.Salinity_SA!=None, CalcsTBL.Salinity_SA)], else_=
case([(CalcsTBL.Salinity__Unknown_SA!=None, CalcsTBL.Salinity__Unknown_SA)],
else_=CalcsTBL.Salinity__Pre1978_SA)
))))
Tem=case([(ObsTBL.Temperature!=None, ObsTBL.Temperature)], else_=
case([(ObsTBL.Temperature_Primary!=None, ObsTBL.Temperature_Primary)], else_=
case([(ObsTBL.Temperature_Secondary!=None, ObsTBL.Temperature_Secondary)], else_=ObsTBL.Temperature_Reversing)))
TemUnits=case([(ObsTBL.Temperature!=None, ObsTBL.Temperature_units)], else_=
case([(ObsTBL.Temperature_Primary!=None, ObsTBL.Temperature_Primary_units)], else_=
case([(ObsTBL.Temperature_Secondary!=None, ObsTBL.Temperature_Secondary_units)],
else_=ObsTBL.Temperature_Reversing_units)))
TemFlag=ObsTBL.Quality_Flag_Temp
CT=case([(CalcsTBL.Temperature_CT!=None, CalcsTBL.Temperature_CT)], else_=
case([(CalcsTBL.Temperature_Primary_CT!=None, CalcsTBL.Temperature_Primary_CT)], else_=
case([(CalcsTBL.Temperature_Secondary_CT!=None, CalcsTBL.Temperature_Secondary_CT)],
else_=CalcsTBL.Temperature_Reversing_CT)
))
if len(datelims)<2:
qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'),
StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'),
StationTBL.Lat,StationTBL.Lon,
ObsTBL.Pressure,ObsTBL.Depth,ObsTBL.Chlorophyll_Extracted,
ObsTBL.Chlorophyll_Extracted_units,ObsTBL.Nitrate_plus_Nitrite.label('N'),
ObsTBL.Silicate.label('Si'),ObsTBL.Silicate_units,SA.label('AbsSal'),CT.label('ConsT'),
ObsTBL.Oxygen_Dissolved,ObsTBL.Oxygen_Dissolved_units).\
select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\
join(CalcsTBL,CalcsTBL.ObsID==ObsTBL.ID).filter(and_(StationTBL.Lat>47-3/2.5*(StationTBL.Lon+123.5),
StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121)))
else:
start_y=datelims[0].year
start_m=datelims[0].month
start_d=datelims[0].day
end_y=datelims[1].year
end_m=datelims[1].month
end_d=datelims[1].day
qry=session.query(StationTBL.StartYear.label('Year'),StationTBL.StartMonth.label('Month'),
StationTBL.StartDay.label('Day'),StationTBL.StartHour.label('Hour'),
StationTBL.Lat,StationTBL.Lon,
ObsTBL.Pressure,ObsTBL.Depth,ObsTBL.Chlorophyll_Extracted,
ObsTBL.Chlorophyll_Extracted_units,ObsTBL.Nitrate_plus_Nitrite.label('N'),
ObsTBL.Silicate.label('Si'),ObsTBL.Silicate_units,SA.label('AbsSal'),CT.label('ConsT'),
ObsTBL.Oxygen_Dissolved,ObsTBL.Oxygen_Dissolved_units).\
select_from(StationTBL).join(ObsTBL,ObsTBL.StationTBLID==StationTBL.ID).\
join(CalcsTBL,CalcsTBL.ObsID==ObsTBL.ID).filter(and_(or_(StationTBL.StartYear>start_y,
and_(StationTBL.StartYear==start_y, StationTBL.StartMonth>start_m),
and_(StationTBL.StartYear==start_y, StationTBL.StartMonth==start_m, StationTBL.StartDay>=start_d)),
or_(StationTBL.StartYear<end_y,
and_(StationTBL.StartYear==end_y,StationTBL.StartMonth<end_m),
and_(StationTBL.StartYear==end_y,StationTBL.StartMonth==end_m, StationTBL.StartDay<end_d)),
StationTBL.Lat>47-3/2.5*(StationTBL.Lon+123.5),
StationTBL.Lat<47-3/2.5*(StationTBL.Lon+121)))#,
#not_(and_(StationTBL.Lat>48.77,StationTBL.Lat<49.27,
# StationTBL.Lon<-123.43))))
if excludeSaanich:
qry1=qry.filter(not_(and_(StationTBL.Lat>48.47,StationTBL.Lat<48.67,
StationTBL.Lon>-123.6,StationTBL.Lon<-123.43)))
df1=pd.read_sql_query(qry1.statement, engine)
else:
df1=pd.read_sql_query(qry.statement, engine)
df1['Z']=np.where(df1['Depth']>=0,df1['Depth'],-1.0*gsw.z_from_p(p=df1['Pressure'].values,lat=df1['Lat'].values))
df1['dtUTC']=[dt.datetime(int(y),int(m),int(d))+dt.timedelta(hours=h) for ind, (y,m,d,h) in df1.loc[:,['Year','Month','Day','Hour']].iterrows()]
session.close()
engine.dispose()
return df1
def _lt0convert(arg):
# convert text '<0' to numeric zero since nutrient concentrations cannot be negative
if arg=='<0':
val=0.0
else:
val=pd.to_numeric(arg, errors='coerce',downcast=None)
return float(val)
def loadPSF(datelims=(),loadChl=True,loadCTD=False):
""" load PSF data from spreadsheets, optionally loading matched T and S data from nearest CTD casts """
dfs=list()
dfchls=list()
if len(datelims)<2:
datelims=(dt.datetime(2014,1,1),dt.datetime(2020,1,1))
if loadCTD:
ctddfs=dict()
if datelims[0].year<2016:
# load 2015
f2015 = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx',
sheet_name = '2015 N+P+Si',dtype={'date (dd/mm/yyyy)':str},engine=excelEngine)
f2015=f2015.drop(f2015.loc[(f2015['lon']<-360)|(f2015['lon']>360)].index)
f2015 = f2015.dropna(subset = ['date (dd/mm/yyyy)', 'Time (Local)', 'lat', 'lon', 'depth'], how='any')
ds=f2015['date (dd/mm/yyyy)'].values
ts=f2015['Time (Local)'].values
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(ii,'%Y-%m-%d %H:%M:%S')+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second)
).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)]
f2015['dtUTC']=dts
f2015.rename(columns={'lat':'Lat','lon':'Lon','depth':'Z','station':'Station','no23':'NO23','po4':'PO4','si':'Si'},inplace=True)
f2015.drop(['num','date (dd/mm/yyyy)','Time (Local)'],axis=1,inplace=True)
f2015_g=f2015.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False)
f2015_m=f2015_g.mean()
f2015=f2015_m.reindex()
dfs.append(f2015)
if loadChl:
# load 2015 chl
Chl2015=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/Chla_2015PSFSalish_Sea_22.01.2018vers_8_CN_edits.csv',encoding='latin-1',
dtype={'Date sampled (mm/dd/yyyy)':str, 'Time of Day (Local)':str,
'Latitude':str,'Longitude':str,'Chl a':float,'Phaeophytin':float,'Depth':float},parse_dates=False)
degminlat=[ii.split('ç') for ii in Chl2015['Latitude'].values]
Chl2015['Lat']=[float(ii[0])+float(ii[1])/60 for ii in degminlat]
degminlon=[ii.split('ç') for ii in Chl2015['Longitude'].values]
Chl2015['Lon']=[-1.0*(float(ii[0])+float(ii[1])/60) for ii in degminlon]
Chl2015 = Chl2015.dropna(subset = ['Date sampled (mm/dd/yyyy)', 'Time of Day (Local)', 'Lat', 'Lon', 'Depth'], how='any')
ds=Chl2015['Date sampled (mm/dd/yyyy)']
ts=Chl2015['Time of Day (Local)']
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(ii+'T'+jj,'%m/%d/%yT%I:%M:%S %p')).astimezone(pytz.utc).replace(tzinfo=None)
for ii,jj in zip(ds,ts)]
Chl2015['dtUTC']=dts
Chl2015['Z']=[float(ii) for ii in Chl2015['Depth']]
Chl2015.drop(['Date sampled (mm/dd/yyyy)','Time of Day (Local)','Latitude','Longitude','Depth'],axis=1,inplace=True)
Chl2015.rename(columns={'Chl a':'Chl','Phaeophytin':'Phaeo','Station Name':'Station'},inplace=True)
Chl2015_g=Chl2015.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False)
Chl2015_m=Chl2015_g.mean()
Chl2015=Chl2015_m.reindex()
dfchls.append(Chl2015)
if loadCTD:
phys2015=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2015_20180621.csv',skiprows=lambda x: x in [0,1,2,3,4,6],delimiter=',',
dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float},
converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x),
'conductivity': lambda x: float(x),'salinity': lambda x: float(x),
'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)})
ctddfs[2015]=dict()
ctddfs[2015]['df']=phys2015
ctddfs[2015]['dtlims']=(dt.datetime(2014,12,31),dt.datetime(2016,1,1))
if (datelims[0].year<2017) and (datelims[1].year>2015):
# load 2016
f2016N = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx',
sheet_name = '2016 N+P',dtype={'NO3+NO':str,'PO4':str},na_values=('nan','NaN','30..09'),
engine=excelEngine)
f2016N = f2016N.drop(f2016N.keys()[11:], axis=1)
f2016N['NO23']=[_lt0convert(ii) for ii in f2016N['NO3+NO']]
f2016N['PO4_2']=[_lt0convert(ii) for ii in f2016N['PO4']]
f2016N = f2016N.dropna(subset = ['Date (dd/mm/yyyy)', 'Time (Local)', 'Latitude', 'Longitude', 'Depth'], how='any')
ds=f2016N['Date (dd/mm/yyyy)']
ts=f2016N['Time (Local)']
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second)
).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)]
f2016N['dtUTC']=dts
f2016N.drop(['Crew','Date (dd/mm/yyyy)','Time (Local)', 'Lat_reported',
'Long_reported','PO4','NO3+NO'],axis=1,inplace=True)
f2016N.rename(columns={'PO4_2':'PO4','Latitude':'Lat','Longitude':'Lon','Depth':'Z'},inplace=True)
f2016N_g=f2016N.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False)
f2016N_m=f2016N_g.mean()
f2016Si = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx',
sheet_name = '2016 SiO2',engine=excelEngine)
f2016Si = f2016Si.drop(f2016Si.keys()[9:], axis=1)
f2016Si = f2016Si.dropna(subset = ['DDMMYYYY', 'Time (Local)', 'Latitude', 'Longitude', 'Depth'], how='any')
ds=f2016Si['DDMMYYYY']
ts=f2016Si['Time (Local)']
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second)
).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)]
f2016Si['dtUTC']=dts
z=[0 if (iii=='S') else float(iii) for iii in f2016Si['Depth'].values]
f2016Si['Z']=z
f2016Si.rename(columns={'Latitude':'Lat','Longitude':'Lon','SiO2 µM':'Si','Site ID':'Station'},inplace=True)
f2016Si.drop(['DDMMYYYY','Time (Local)', 'Lat_reported',
'Long_reported','Depth'],axis=1,inplace=True)
f2016Si_g=f2016Si.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False)
f2016Si_m=f2016Si_g.mean()
f2016 = pd.merge(f2016N_m, f2016Si_m, how='outer', left_on=['Station','Lat','Lon','dtUTC','Z'], right_on = ['Station','Lat','Lon','dtUTC','Z'])
dfs.append(f2016)
if loadChl:
# load 2016 chl
Chl2016Dat=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/2016ChlorophyllChlData.csv')#,encoding='latin-1')
Chl2016Sta=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/2016ChlorophyllStationData.csv')
Chl2016Sta.rename(columns={'DateCollected ':'DateCollected','Latitude':'Lat','Longitude':'Lon'},inplace=True)
Chl2016Sta.dropna(subset = ['DateCollected', 'TimeCollected', 'Lat','Lon', 'Depth_m'], how='any',inplace=True)
Chl2016Sta.drop_duplicates(inplace=True)
Chl2016Dat.drop(Chl2016Dat.loc[Chl2016Dat.quality_flag>3].index,axis=0,inplace=True)
Chl2016Dat.drop(['Chla_ugL','Phaeophytin_ugL','quality_flag','ShipBoat'],axis=1,inplace=True)
Chl2016Dat.rename(columns={'MeanChla_ugL':'Chl','MeanPhaeophytin_ugL':'Phaeo'},inplace=True)
Chl2016=pd.merge(Chl2016Sta,Chl2016Dat,how='inner', left_on=['DateCollected','Station','Depth_m'], right_on = ['DateCollected','Station','Depth_m'])
Chl2016['Z']=[float(ii) for ii in Chl2016['Depth_m']]
ds=Chl2016['DateCollected']
ts=Chl2016['TimeCollected']
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime.strptime(ii+'T'+jj,'%m-%d-%YT%I:%M:%S %p')).astimezone(pytz.utc).replace(tzinfo=None)
for ii,jj in zip(ds,ts)]
Chl2016['dtUTC']=dts
Chl2016.drop(['DateCollected','TimeCollected','CV'],axis=1,inplace=True)
dfchls.append(Chl2016)
if loadCTD:
phys2016=pd.read_csv('/ocean/eolson/MEOPAR/obs/PSFCitSci/phys/CitSci2016_20180621.csv',skiprows=lambda x: x in [0,1,2,3,4,5,6,7,9],delimiter=',',
dtype={'Patrol': str,'ID':str,'station':str,'datetime':str,'latitude':float,'longitude':float},
converters={'pressure': lambda x: float(x),'depth': lambda x: float(x),'temperature': lambda x: float(x),
'conductivity': lambda x: float(x),'salinity': lambda x: float(x),
'o2SAT': lambda x: float(x),'o2uM':lambda x: float(x),'chl':lambda x: float(x)})
ctddfs[2016]=dict()
ctddfs[2016]['df']=phys2016
ctddfs[2016]['dtlims']=(dt.datetime(2015,12,31),dt.datetime(2017,1,1))
if (datelims[1].year>2016):
# load 2017
f2017 = pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/All_Yrs_Nutrients_2018-01-31_EOEdit.xlsx',
sheet_name = '2017 N+P+Si',skiprows=3,dtype={'Date (dd/mm/yyyy)':dt.date,'Time (Local)':dt.time,
'NO3+NO':str,'PO4':str,'Si':str},engine=excelEngine)
f2017['NO23']=[_lt0convert(ii) for ii in f2017['NO3+NO']]
f2017['PO4_2']=[_lt0convert(ii) for ii in f2017['PO4']]
f2017['Si_2']=[_lt0convert(ii) for ii in f2017['Si']]
degminlat=[ii.split('°') for ii in f2017['Latitude'].values]
f2017['Lat']=[float(ii[0])+float(ii[1])/60 for ii in degminlat]
degminlon=[ii.split('°') for ii in f2017['Longitude'].values]
f2017['Lon']=[-1.0*(float(ii[0])+float(ii[1])/60) for ii in degminlon]
f2017 = f2017.dropna(subset = ['Date (dd/mm/yyyy)', 'Time (Local)', 'Lat', 'Lon', 'Depth'], how='any')
ds=f2017['Date (dd/mm/yyyy)']
ts=f2017['Time (Local)']
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second)
).astimezone(pytz.utc).replace(tzinfo=None) for ii,jj in zip(ds,ts)]
f2017['dtUTC']=dts
f2017.drop(['Crew','Date (dd/mm/yyyy)','Time (Local)','Comments','Latitude','Longitude','NO3+NO'],axis=1,inplace=True)
f2017.rename(columns={'Depth':'Z','PO4_2':'PO4','Si_2':'Si'},inplace=True)
f2017_g=f2017.groupby(['Station','Lat','Lon','dtUTC','Z'],as_index=False)
f2017_m=f2017_g.mean()
f2017=f2017_m.reindex()
dfs.append(f2017)
if loadChl:
# load 2017 chl
Chl2017=pd.read_excel('/ocean/eolson/MEOPAR/obs/PSFCitSci/PSF 2017 Chla_Data_Final_v-January 22-2018_CN_edits.xlsx',
sheet_name='avg-mean-cv%',skiprows=15,usecols=[1,3,4,5,7,9,11],
names=['Date','Station','Time','Z0','Chl','Qflag','Phaeo'],engine=excelEngine)
Chl2017.dropna(subset=['Station','Date','Time','Z0'],how='any',inplace=True)
Chl2017.dropna(subset=['Chl','Phaeo'],how='all',inplace=True)
Chl2017.drop(Chl2017.loc[Chl2017.Qflag>3].index,axis=0,inplace=True)
ds=Chl2017['Date']
ts=Chl2017['Time']
dts=[pytz.timezone('Canada/Pacific').localize(dt.datetime(ii.year,ii.month,ii.day)+dt.timedelta(hours=jj.hour,minutes=jj.minute,seconds=jj.second)).astimezone(pytz.utc).replace(tzinfo=None)
for ii,jj in zip(ds,ts)]
Chl2017['dtUTC']=dts
staMap2017=f2017.loc[:,['Station','Lat','Lon']].copy(deep=True)
staMap2017.drop_duplicates(inplace=True)
Chl2017= | pd.merge(Chl2017,staMap2017,how='inner', left_on=['Station'], right_on = ['Station']) | pandas.merge |
# Dashboard Interativo com Streamlit, Folium e Plotly Para Monitoramento de Casos de Covid-19 em Tempo Real
# Execute no terminal: streamlit run Mini-Projeto1.py
# Imports
import json
import folium
import requests
import mimetypes
import http.client
import pandas as pd
import streamlit as st
import plotly
import plotly.express as px
from streamlit_folium import folium_static
from folium.plugins import HeatMap
from pandas.io.json import json_normalize
import warnings
warnings.filterwarnings("ignore", category = FutureWarning)
# Função Main
def main():
# Título da área principal
st.markdown("<h1 style='text-align: center; color: #fa634d;'><strong><u>Real-Time Covid-19 Dashboard</u></strong></h1>", unsafe_allow_html = True)
# Título do menu lateral
st.sidebar.markdown("<h1 style='text-align: center; color: #baccee;'><strong><u>Monitoramento de Casos de Covid-19</u></strong></h1>", unsafe_allow_html = True)
# Sub-títulos da área principal
st.markdown("O Dashboard Utiliza Dados Reais da Johns Hopkins CSSE.", unsafe_allow_html = True)
st.markdown("Os Dados São Atualizados Diariamente, com uma janela de 24 horas.", unsafe_allow_html = True)
# Conexão aos dados em tempo real via API
# https://coronavirus.jhu.edu/map.html
# https://covid19api.com/
conn = http.client.HTTPSConnection("api.covid19api.com")
payload = ''
headers = {}
conn.request("GET","/summary",payload,headers)
res = conn.getresponse()
data = res.read().decode('UTF-8')
covid = json.loads(data)
# Gera o dataframe
df = pd.DataFrame(covid['Countries'])
# Data Cleaning
# Drop unnecessary features
covid1 = df.drop(columns = ['CountryCode', 'Slug', 'Premium'], axis = 1)
# Feature Engineering
covid1['ActiveCases'] = covid1['TotalConfirmed'] - covid1['TotalRecovered']
covid1['ActiveCases'] = covid1['ActiveCases'] - covid1['TotalDeaths']
# New Dataframes
dfn = covid1.drop(['NewConfirmed', 'NewDeaths', 'NewRecovered'], axis = 1)
dfn = dfn.groupby('Country')['TotalConfirmed','TotalDeaths','TotalRecovered','ActiveCases'].sum().sort_values(by = 'TotalConfirmed', ascending = False)
dfn.style.background_gradient(cmap = 'Oranges')
dfc = covid1.groupby('Country')['TotalConfirmed', 'TotalDeaths', 'TotalRecovered', 'ActiveCases'].max().sort_values(by = 'TotalConfirmed', ascending = False).reset_index()
# Mapa 1
m1 = folium.Map(tiles = 'Stamen Terrain', min_zoom = 1.5)
url = 'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data'
country_shapes = f'{url}/world-countries.json'
folium.Choropleth(geo_data = country_shapes,
min_zoom = 2,
name = 'Covid-19',
data = covid1,
columns = ['Country','TotalConfirmed'],
key_on = 'feature.properties.name',
fill_color = 'YlOrRd',
nan_fill_color = 'white',
legend_name = 'Total de Casos Confirmados',).add_to(m1)
# Mapa 2
m2 = folium.Map(tiles = 'Stamen Terrain', min_zoom = 1.5)
url='https://raw.githubusercontent.com/python-visualization/folium/master/examples/data'
country_shapes = f'{url}/world-countries.json'
folium.Choropleth(geo_data = country_shapes,
min_zoom = 2,
name = 'Covid-19',
data = covid1,
columns = ['Country','TotalRecovered'],
key_on = 'feature.properties.name',
fill_color = 'PuBu',
nan_fill_color = 'white',
legend_name = 'Total de Casos Recuperados',).add_to(m2)
# Mapa 3
m3 = folium.Map(tiles = 'Stamen Terrain', min_zoom = 1.5)
url = 'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data'
country_shapes = f'{url}/world-countries.json'
folium.Choropleth(geo_data = country_shapes,
min_zoom = 2,
name = 'Covid-19',
data = covid1,
columns = ['Country','ActiveCases'],
key_on = 'feature.properties.name',
fill_color = 'YlGnBu',
nan_fill_color = 'white',
legend_name = 'Total de Casos Ativos',).add_to(m3)
# Coordenadas dos países
coordinates = | pd.read_csv('dados/country-coordinates-world.csv') | pandas.read_csv |
#### Master Script 5: Assess CPM_MNLR and CPM_POLR performance ####
#
# <NAME>
# University of Cambridge
# email address: <EMAIL>
#
### Contents:
# I. Initialisation
# II. Create bootstrapping resamples (that will be used for all model performance evaluation)
# III. Prepare compiled CPM_MNLR and CPM_POLR testing set predictions
# IV. Calculate and save performance metrics
### I. Initialisation
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from ast import literal_eval
import matplotlib.pyplot as plt
from scipy.special import logit
from collections import Counter
from argparse import ArgumentParser
from pandas.api.types import CategoricalDtype
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler, minmax_scale
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# StatsModel methods
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.miscmodels.ordinal_model import OrderedModel
from statsmodels.discrete.discrete_model import Logit
from statsmodels.tools.tools import add_constant
# TQDM for progress tracking
from tqdm import tqdm
# Custom methods
from functions.analysis import calc_bs_ORC, calc_bs_gen_c, calc_bs_thresh_AUC, calc_bs_cm, calc_bs_accuracy, calc_bs_thresh_accuracy, calc_bs_thresh_ROC, calc_bs_thresh_calibration, calc_bs_thresh_calib_metrics
# Define version for assessment
VERSION = 'LOGREG_v1-0'
model_dir = '../CPM_outputs/'+VERSION
### II. Create bootstrapping resamples (that will be used for all model performance evaluation)
# Establish number of resamples for bootstrapping
NUM_RESAMP = 1000
# Establish number of cores for all parallel processing
NUM_CORES = multiprocessing.cpu_count() - 2
# Create directory to store model performance results
os.makedirs('../model_performance',exist_ok=True)
# Load cross-validation information to get GOSE and GUPIs
cv_splits = | pd.read_csv('../cross_validation_splits.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Graficar rendimientos de los padres en periodos desconocidos.
"""
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Simulacion import Optimizacion
from Simulacion import Graficos
from Simulacion import Genetico
from Simulacion import Kclusters
simulacion = Optimizacion.simulacion
grafico = Graficos.simulacion
genetico = Genetico.genetico
k_clusters = Kclusters.k_clusters
#%% Datos en csv
csv = ['AMXL.MX','WALMEX.MX','TLEVISACPO.MX','GMEXICOB.MX','GFNORTEO.MX','CEMEXCPO.MX','PENOLES.MX','GFINBURO.MX','ELEKTRA.MX','BIMBOA.MX','AC.MX','KIMBERA.MX','LABB.MX','LIVEPOL1.MX','ASURB.MX','GAPB.MX','ALPEKA.MX','GRUMAB.MX','ALSEA.MX','GCARSOA1.MX','PINFRA.MX']
for i in np.arange(len(csv)):
# csv[i] = '../Test/%s.csv'%csv[i] #Se utilizan todos los datos para hacer las pruebas
csv[i] = '../Train/%s.csv'%csv[i]
cetes = 'cetes_diarios.csv'
ndias = [5,20,40,125]
n_clusters = 4
nombre = 'model_close'
model_close = pickle.load(open('model_close.sav','rb'))
#%% Cargamos los resultados generados por el algorítmo genético.
nombre = 'Intento3_2'
[punt,padres,hist_mean,hist_std,hist_cal,hist_padres] = pickle.load(open(nombre + '.sav','rb'))
#%% Para generar un mapa de calor de las decisiones de todos los padres:
#hm_padres = np.zeros((hist_padres[0].shape[0]*len(hist_padres),hist_padres[0].shape[1])) # Creamos la matriz para construir el mapa de calor.
#for i in np.arange(len(hist_padres)):
# hm_padres[i*hist_padres[0].shape[0]:(i+1)*hist_padres[0].shape[0],:] = hist_padres[i]
#%% Dibujamos el heatmap
#fig = plt.figure(figsize=(12,12))
#plt.imshow(hm_padres)
#%% Segundo heatmap, esta vez con el promedio de los padres.
#hm_padres2 = np.zeros((len(hist_padres),hist_padres[0].shape[1])) # Creamos la matriz para construir el mapa de calor.
#for i in np.arange(len(hist_padres)):
# hm_padres2[i,:] = np.round(hist_padres[i].mean(axis=0)) # Redondeado
# hm_padres2[i,:] = hist_padres[i].mean(axis=0) # Sin redondear
#%% Dibujamos el heatmap
#fig = plt.figure(figsize=(24,4))
#plt.imshow(hm_padres2)
#%%############################################################################
#################### Tiempos desconocidos. Mismos padres. #####################
ult = 176
hist_padres[10]
rf = 0.0471/252
gen = 5
#%%
#Vals = np.zeros((len(hist_padres[0])*gen,6))
#cont = 0
#for i in np.arange(gen)+1:
# for j in range(len(hist_padres[0])):
# Vp = simulacion(csv,ndias,model_close,hist_padres[-i][-j],cetes)
# plt.plot(Vp[:,-ult:-1].T/Vp[:,-ult].T) # Grafica el comportamiento de nuestro padre en cada uno de los activos
# plt.plot(np.mean(Vp[:,-ult:-1].T/Vp[:,-ult].T,axis=1)) #Grafica el comportamiento de nuestro padre en todo el portafolio
#
# pct = (Vp[:,1:]/Vp[:,0:-1]-1)
# g_pct = pct.mean(axis=0)
#
# mean1 = g_pct[:-ult].mean()
# mean2 = g_pct[-ult:].mean()
# std1 = g_pct[:-ult].std()
# std2 = g_pct[-ult:].std()
# shpe1 = (mean1-rf)/std1
# shpe2 = (mean2-rf)/std2
# Vals[cont,:] = [mean1,mean2,std1,std2,shpe1,shpe2]
# cont += 1
#%%
#pickle.dump(Vals, open('Vals.sav','wb'))
#pickle.load(open('Vals.sav','rb'))
#%% ### Consenso de tomas de decisiones. ###
SP = pd.value_counts(padres[:,0])
for i in range(len(padres[0])-1):
SP = pd.concat([SP, | pd.value_counts(padres[:,i+1]) | pandas.value_counts |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:46:57 2020
@author: reideej1
:DESCRIPTION: Evaluate coaching data for the last 50 years of college football
- the goal is to determine how coaches who struggle in their first 3 years
fare over time at the same program
:REQUIRES: scrape_sports_reference.py located in: cfbAnalysis\src\data
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import datetime
import glob
import os
import numpy as np
import pandas as pd
import pathlib
import time
import tqdm
from src.data.scrape_sports_reference import *
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def renameSchool(df, name_var):
'''
Purpose: Rename a school/university to a standard name as specified in
the file `school_abbreviations.csv`
Inputs
------
df : Pandas Dataframe
DataFrame containing a school-name variable for which the names
need to be standardized
name_var : string
Name of the variable which is to be renamed/standardized
Outputs
-------
list(row)[0] : string
Standardized version of the school's name based on the first value
in the row in the file `school_abbreviations.csv`
'''
# read in school name information
df_school_names = pd.read_csv(r'references\names_pictures_ncaa.csv')
# convert the dataframe to a dictionary such that the keys are the
# optional spelling of each school and the value is the standardized
# name of the school
dict_school_names = {}
for index, row in df_school_names.iterrows():
# isolate the alternative name columns
names = row[[x for x in row.index if 'Name' in x]]
# convert the row to a list that doesn't include NaN values
list_names = [x for x in names.values.tolist() if str(x) != 'nan']
# add the nickname to the team names as an alternative name
nickname = row['Nickname']
list_names_nicknames = list_names.copy()
for name in list_names:
list_names_nicknames.append(name + ' ' + nickname)
# extract the standardized team name
name_standardized = row['Team']
# add the standardized name
list_names_nicknames.append(name_standardized)
# add the nickname to the standardized name
list_names_nicknames.append(name_standardized + ' ' + nickname)
# for every alternative spelling of the team, set the value to be
# the standardized name
for name_alternate in list_names_nicknames:
dict_school_names[name_alternate] = name_standardized
# df[name_var] = df[name_var].apply(
# lambda x: dict_school_names[x] if str(x) != 'nan' else '')
df[name_var] = df[name_var].apply(
lambda x: rename_school_helper(x, dict_school_names))
return df
def rename_school_helper(name_school, dict_school_names):
try:
if str(name_school) != 'nan':
return dict_school_names[name_school]
else:
return ''
except:
print(f'School not found in school abbreviations .csv file: {name_school} ')
return name_school
def create_coach_dataframe(df_schools):
'''
Purpose: Given historic school data, create a dataframe of coaches and
their performance data on a year-by-year basis
Inputs
------
df_schools : Pandas DataFrame
Contains year-by-year results for each school (with coaches' names)
Outputs
-------
df_coaches : Pandas DataFrame
A dataframe containing all historic season data from a coaching perspective
'''
# Create a dictionary that assigns each school to its current conference
df_conf = df_schools.groupby(['School', 'Conf']).head(1).groupby('School').head(1).reset_index(drop = True)
df_conf = df_conf[['School', 'Conf']]
df_conf['Power5'] = df_conf.apply(lambda row: True if row['Conf'] in [
'SEC', 'Pac-12', 'Big 12', 'ACC', 'Big Ten'] else False, axis = 1)
df_conf = df_conf.set_index('School')
dict_conf = df_conf.to_dict(orient = 'index')
# Create a coaching dataframe by iterating over every year for every school
list_coaches = []
for index, row in df_schools.iterrows():
# handle every coach that coached that season
for coach in row['Coach(es)'].split(', '):
dict_coach_year = {}
dict_coach_year['coach'] = coach.split(' (')[0].strip()
dict_coach_year['year'] = row['Year']
dict_coach_year['school'] = row['School']
dict_coach_year['ranking_pre'] = row['AP_Pre']
dict_coach_year['ranking_high'] = row['AP_High']
dict_coach_year['ranking_post'] = row['AP_Post']
dict_coach_year['ranked_pre'] = not pd.isna(row['AP_Pre'])
dict_coach_year['ranked_post'] = not pd.isna(row['AP_Post'])
try:
dict_coach_year['ranked_top_10'] = row['AP_Post'] <= 10
except:
print(row['AP_Post'])
dict_coach_year['ranked_top_5'] = row['AP_Post'] <= 5
# handle bowl games
if pd.isna(row['Bowl']):
dict_coach_year['bowl'] = False
dict_coach_year['bowl_name'] = ''
dict_coach_year['bowl_win'] = False
else:
dict_coach_year['bowl'] = True
dict_coach_year['bowl_name'] = row['Bowl'].split('-')[0]
if '-' in str(row['Bowl']):
try:
if row['Bowl'].split('-')[1] == 'W':
dict_coach_year['bowl_win'] = True
except:
print(row['Bowl'])
# handle wins and losses
if len(coach.split('(')[1].split('-')) > 2:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
dict_coach_year['T'] = coach.split('(')[1].split('-')[2].strip(')')
else:
dict_coach_year['W'] = coach.split('(')[1].split('-')[0]
dict_coach_year['L'] = coach.split('(')[1].split('-')[1].strip(')')
# assign conference information
dict_coach_year['conf'] = dict_conf[row['School']]['Conf']
dict_coach_year['power5'] = dict_conf[row['School']]['Power5']
list_coaches.append(dict_coach_year)
# Convert list to DataFrame
df_coaches = pd.DataFrame(list_coaches)
# Convert all Tie Nans to 0
df_coaches['T'] = df_coaches['T'].fillna(0)
# Identify all unique coaches in the dataframe
list_coaches = list(df_coaches['coach'].unique())
# Cast Win and Loss columns to ints
df_coaches['W'] = df_coaches['W'].astype('int')
df_coaches['L'] = df_coaches['L'].astype('int')
df_coaches['T'] = df_coaches['T'].astype('int')
# Add a column for games coached in the season
df_coaches['GP'] = df_coaches.apply(lambda row: row['W'] + row['L'] + row['T'], axis = 1)
return df_coaches
def add_coach_metadata(df_stint):
'''
Purpose: Iterate over a coach's historic data and tabulate totals on a
year-by-year basis
Inputs
------
df_stint : Pandas DataFrame
Contains year-by-year results for a coach
** Note: This is continuous years only. Breaks in coaching stints
are treated as separate coaching histories **
Outputs
-------
df_coach : Pandas DataFrame
Coaching data with updated year-by-year totals
'''
df_coach = df_stint.copy()
# 1. Year # at school
df_coach['season'] = list(range(1,len(df_coach)+1))
# 2. Cumulative games coached at school (on a year-by-year basis)
df_coach['cum_GP'] = df_coach['GP'].cumsum(axis = 0)
# 3. Cumulative wins at school (on a year-by-year basis)
df_coach['cum_W'] = df_coach['W'].cumsum(axis = 0)
# 4. Cumulative losses at school (on a year-by-year basis)
df_coach['cum_L'] = df_coach['L'].cumsum(axis = 0)
# 5. Cumulative ties at school (on a year-by-year basis)
df_coach['cum_T'] = df_coach['T'].cumsum(axis = 0)
# 6. Cumulative Win Pct at school (on a year-by-year basis)
if len(df_coach) == 1:
if int(df_coach['cum_GP']) == 0:
df_coach['cum_win_pct'] = 0
else:
df_coach['cum_win_pct'] = df_coach.apply(lambda row: row['cum_W'] / row['cum_GP'] if row['cum_GP'] != 0 else 0, axis = 1)
else:
df_coach['cum_win_pct'] = df_coach.apply(lambda row: row['cum_W'] / row['cum_GP'] if row['cum_GP'] != 0 else 0, axis = 1)
# 7. Total bowl games at school
df_coach['total_bowl'] = df_coach['bowl'].sum(axis = 0)
# 8. Total bowl wins at school
df_coach['total_bowl_win'] = df_coach['bowl_win'].sum(axis = 0)
# 9. Total AP Preseason rankings
df_coach['total_ranked_pre'] = df_coach['ranked_pre'].sum(axis = 0)
# 10. Total AP Postseason rankings
df_coach['total_ranked_post'] = df_coach['ranked_post'].sum(axis = 0)
# 11. Total Top 10 finishes
df_coach['total_top_10'] = df_coach['ranked_top_10'].sum(axis = 0)
# 12. Total Top 5 finishes
df_coach['total_top_5'] = df_coach['ranked_top_5'].sum(axis = 0)
# 13. Total Seasons Coached at School
df_coach['total_seasons'] = df_coach.iloc[len(df_coach)-1]['season']
# 14. Total Games Coached at School
df_coach['total_games'] = df_coach.iloc[len(df_coach)-1]['cum_GP']
# 15. Total Wins at School
df_coach['total_wins'] = df_coach.iloc[len(df_coach)-1]['cum_W']
# 16. Total Losses at School
df_coach['total_losses'] = df_coach.iloc[len(df_coach)-1]['cum_L']
# 17. Total Win Pct at School
df_coach['total_win_pct'] = df_coach.iloc[len(df_coach)-1]['cum_win_pct']
return df_coach
def calculate_year_by_year(df_coaches):
'''
Purpose: Given the data for coaches in a historical perspective, iterate
through their coaching stints and calculate year-by-year totals in an
effor to understand their progress over time
Inputs
------
df_coaches : Pandas DataFrame
A dataframe containing all historic season data from a coaching perspective
Outputs
-------
df_yr_by_yr : Pandas DataFrame
Coaching data with updated year-by-year totals separated by stints
at schools in each coach's career
'''
# make an empty dataframe for storing new coach info
df_yr_by_yr = pd.DataFrame()
# Coach-by-coach --> Year by year, determine the following:
gps = df_coaches.groupby(['coach', 'school'])
for combo, df_coach in tqdm.tqdm(gps):
# sort the dataframe by earliest year to latest
df_coach = df_coach.sort_values(by = 'year')
# look for gaps in years
num_stints = 1
list_stint_end = []
list_years = list(df_coach['year'])
for num_ele in list(range(0,len(list_years))):
if (num_ele == 0):
pass
else:
if list_years[num_ele] - list_years[num_ele-1] > 1:
# print(f"Gap detected for coach: {df_coach.iloc[0]['coach']}")
# print(f" -- Gap between {list_years[num_ele]} and {list_years[num_ele-1]}")
list_stint_end.append(list_years[num_ele-1])
num_stints = num_stints + 1
# handle coaches with multiple stints
if num_stints >= 2:
for stint_count in list(range(0,num_stints)):
# split the coaches data into stints
if stint_count == 0:
year_stint_end = list_stint_end[stint_count]
df_stint = df_coach[df_coach['year'] <= year_stint_end]
elif stint_count < num_stints-1:
year_stint_end = list_stint_end[stint_count]
year_stint_end_prev = list_stint_end[stint_count-1]
df_stint = df_coach[df_coach['year'] <= year_stint_end]
df_stint = df_stint[df_stint['year'] > year_stint_end_prev]
else:
year_stint_end_prev = list_stint_end[stint_count-1]
df_stint = df_coach[df_coach['year'] > year_stint_end_prev]
# process the data on a year by year basis
df_stint = add_coach_metadata(df_stint)
# Add coach dataframe to overall dataframe
if len(df_yr_by_yr) == 0:
df_yr_by_yr = df_stint.copy()
else:
df_yr_by_yr = df_yr_by_yr.append(df_stint)
else:
# process the data on a year by year basis
df_coach = add_coach_metadata(df_coach)
# Add coach dataframe to overall dataframe
if len(df_yr_by_yr) == 0:
df_yr_by_yr = df_coach.copy()
else:
df_yr_by_yr = df_yr_by_yr.append(df_coach)
# reset dataframe index
df_yr_by_yr = df_yr_by_yr.reset_index(drop = True)
return df_yr_by_yr
def create_week_by_week_dataframe(df_all_games, df_schools, games_sf):
'''
Purpose: Combine the week-by-week results for each school with the
end-of-year school/coach information to create a week-by-week
dataframe detailing who coached each team when. This will facilitate
analysis of coaching tenures.
Inputs
------
df_all_games : Pandas DataFrame
Contains week-by-week results for each school
df_schools : Pandas DataFrame
Contains year-by-year results for each school (with coaches' names)
games_sf : int
Scott Frost's current number of games
Outputs
-------
df_engineered : Pandas DataFrame
A dataframe containing all historic week-by-week results infused
with coaches' names
'''
# standardize team names
df_all_games = renameSchool(df_all_games, 'School')
df_all_games = renameSchool(df_all_games, 'Opponent')
df_schools = renameSchool(df_schools, 'School')
# merge data together
df_coaches = pd.merge(df_all_games,
df_schools[['School', 'Year', 'Conf', 'Conf_W', 'Conf_L',
'Conf_T', 'AP_Pre', 'AP_High', 'AP_Post',
'Coach(es)', 'Bowl']],
how = 'left',
on = ['School', 'Year'])
# rename columns
df_coaches = df_coaches.rename(columns = {'Conf_x':'Conf_Opp', 'Conf_y':'Conf'})
# sort dataframe to ensure no issues with groupby
df_coaches = df_coaches.sort_values(by = ['School', 'Year', 'G'])
# Break out coaches on a week-by-week basis
list_coaches = []
table_coaches = pd.DataFrame(columns = ['School', 'Year', 'Coach', 'Games'])
for school, grp in tqdm.tqdm(df_coaches.groupby(['School', 'Year'])):
dict_coaches = {}
# Handle Utah 2003
if school[0] == 'Utah' and school[1] == 2004:
dict_coaches['Urban Meyer'] = 12
# Handle Utah St. 2021
elif school[0] == 'Utah St.' and school[1] == 2021:
coach_name = '<NAME>'
coach_games = grp['G'].count()
dict_coaches[coach_name] = coach_games
# Handle USC 2021
elif school[0] == 'USC' and school[1] == 2021:
dict_coaches['C<NAME>'] = 2
dict_coaches['<NAME>'] = len(grp) - 2
# handle every coach that coached that season for that team
else:
# for every coach a team has, calculate how many games they coached that season
for coach in grp['Coach(es)'].iloc[0].split(', '):
coach_name = coach.split(' (')[0]
coach_record = coach.split(' (')[1].replace(')','')
# first attempt to account for ties in a coaches' record
try:
coach_games = int(coach_record.split('-')[0]) + int(coach_record.split('-')[1]) + int(coach_record.split('-')[2])
# otherwise assume they only have wins-losses in their record
except:
coach_games = int(coach_record.split('-')[0]) + int(coach_record.split('-')[1])
dict_coaches[coach_name] = coach_games
# add coaches to master list
num_games = 0
for coach in dict_coaches.keys():
list_coaches = list_coaches + ([coach] * dict_coaches[coach])
table_coaches = table_coaches.append(pd.DataFrame(
[[school[0], school[1], coach, dict_coaches[coach]]],
columns = ['School', 'Year', 'Coach', 'Games']))
num_games = dict_coaches[coach] + num_games
if num_games != len(grp):
print('oops!')
break
df_coaches['Coach'] = list_coaches
# test for any values of "coach" that weren't in the original data
for index, row in tqdm.tqdm(df_coaches.iterrows()):
if not pd.isna(row['Coach(es)']):
if row['Coach'] not in row['Coach(es)']:
print(f"{row['Coach']} not found in {row['Coach(es)']}")
# add power5 status to dataframe
df_school_info = pd.read_csv(r'references\names_pictures_ncaa.csv')
df_school_info = df_school_info.rename(columns = {'Team':'School'})
df_coaches = pd.merge(df_coaches, df_school_info[['School', 'Power5']], how = 'left', on = 'School')
df_school_info = df_school_info.rename(columns = {'School':'Opponent', 'Power5':'Power5_Opp'})
df_coaches = pd.merge(df_coaches, df_school_info[['Opponent', 'Power5_Opp']], how = 'left', on = 'Opponent')
# rename columns
df_coaches = df_coaches.rename(columns = {'G':'Week',
'Year':'Season',
'Opp':'Pts_Opp',
'Cum_W':'W_Sn',
'Cum_L':'L_Sn',
'T':'T_Sn'})
# add opponent's record for the year to the table
df_team_records = pd.merge(df_coaches[['Season', 'Opponent']],
df_schools[['School', 'Year', 'Overall_Pct', 'Conf_Pct']],
left_on = ['Season', 'Opponent'],
right_on = ['Year', 'School'])
df_team_records = df_team_records.drop_duplicates()
df_team_records = df_team_records[['Season', 'School', 'Overall_Pct', 'Conf_Pct']]
df_team_records = df_team_records.rename(columns = {'Overall_Pct':'Win_Pct_Opp',
'Conf_Pct':'Win_Pct_Conf_Opp',
'School':'Opponent'})
df_coaches = pd.merge(df_coaches, df_team_records, how = 'left', on = ['Season', 'Opponent'])
# add flag if opponent's overall record was > .500
df_coaches['Opp_Winning_Record'] = list(df_coaches.apply(
lambda row: True if row['Win_Pct_Opp'] > .5 else False, axis = 1))
# add flag if opponent's conference record was > .500
df_coaches['Opp_Conf_Winning_Record'] = list(df_coaches.apply(
lambda row: True if row['Win_Pct_Conf_Opp'] > .5 else False, axis = 1))
# reorder columns
df_coaches = df_coaches[['Season', 'Week', 'Date', 'Day', 'Rank', 'School',
'Coach', 'Conf', 'Power5', 'Home_Away', 'Rank_Opp', 'Opponent',
'Conf_Opp', 'Power5_Opp', 'Win_Pct_Opp', 'Opp_Winning_Record',
'Win_Pct_Conf_Opp', 'Opp_Conf_Winning_Record',
'Result', 'Pts', 'Pts_Opp', 'W_Sn',
'L_Sn', 'T_Sn', 'AP_Pre', 'AP_High', 'AP_Post',
'Notes', 'Bowl', 'url_boxscore']]
# Engineer variables for each coach's stint/tenure at a given school=
df_engineered = | pd.DataFrame() | pandas.DataFrame |
# coding=utf=8
import numpy as np
import pandas as pd
# from pandas.tools.plotting import bootstrap_plot
import matplotlib.pyplot as plt
import dataviz.utils as utils
import matplotlib
matplotlib.style.use('ggplot')
def files2dataframe(root_dir: str, expression: str, offset: int, sort_columns: dict, size: int) -> pd.DataFrame:
"""
gets data with and :offset from files at :root_dir that follow the pattern
:expression and :returns a pandas.DataFrame with the data
"""
files = utils.get_files(root_dir, expression, [key for key, values in sort_columns.items()], size)
data_dict = {}
for key, values in files.items():
i = 0
run_dict = {}
for file in values:
run_dict[i] = np.load(file)[0][-offset:]
i += 1
data_dict[key] = run_dict
# idx = pd.Index([i for i in range(0, offset)])
return pd.DataFrame(pd.DataFrame(data_dict), columns=sorted(sort_columns, key=sort_columns.get))
def file2dataframe(root_dir: str, expression: str, offset: int, value_name: str, size: int) -> pd.DataFrame:
"""
gets data with and :offset from files at :root_dir that follow the pattern
:expression and :returns a pandas.DataFrame with the data
"""
data_dict = file2array(root_dir, expression, offset, value_name, size)
# idx = pd.Index([i for i in range(0, offset)])
return pd.DataFrame(pd.DataFrame(data_dict))
def file2array(root_dir: str, expression: str, offset: int, value_name: str, size: int):
files = utils.get_files(root_dir, expression, value_name, size)
data_dict = {}
for key, values in files.items():
i = 0
run_dict = {}
for file in values:
run_dict[i] = np.load(file)[-offset:]
i += 1
data_dict[key] = run_dict
return data_dict
def calculate_hist(data: np.ndarray, bins: list, density=False) -> (np.ndarray, np.ndarray):
hist_data = []
for i in range(len(data)):
hist_data.append(np.histogram(data[i], bins=bins, density=density)[0])
means = np.mean(hist_data, axis=0)
errors = np.std(hist_data, axis=0)
return means, errors
def plot_hist_with_errors(means: np.ndarray, errors: np.ndarray, index: list, column: list, save_path: str,
name="_actions_hist.svg"):
means_df = pd.DataFrame(means, columns=column)
errors_df = | pd.DataFrame(errors, columns=column) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
qualify donor data
"""
# %% REQUIRED LIBRARIES
import os
import argparse
import json
import ast
import pandas as pd
import datetime as dt
import numpy as np
# %% USER INPUTS (choices to be made in order to run the code)
codeDescription = "qualify donor data"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument(
"-d",
"--date-stamp",
dest="date_stamp",
default=dt.datetime.now().strftime("%Y-%m-%d"),
help="date, in '%Y-%m-%d' format, of the date when " +
"donors were accepted"
)
parser.add_argument(
"-u",
"--userid",
dest="userid",
default=np.nan,
help="userid of account shared with the donor group or master account"
)
parser.add_argument(
"-o",
"--output-data-path",
dest="data_path",
default=os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "data"
)
),
help="the output path where the data is stored"
)
parser.add_argument("-q",
"--qualification-criteria",
dest="qualificationCriteria",
default=os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"tidepool-qualification-criteria.json")
),
type=argparse.FileType('r'),
help="JSON file to be processed, see " +
"tidepool-qualification-critier.json " +
"for a list of required fields")
parser.add_argument(
"-s",
"--save-dayStats",
dest="save_dayStats",
default="False",
help="save the day stats used for qualifying (True/False)"
)
args = parser.parse_args()
# %% FUNCTIONS
def defineStartAndEndIndex(args, nDonors):
startIndex = int(args.startIndex)
endIndex = int(args.endIndex)
if endIndex == -1:
if startIndex == 0:
endIndex = nDonors
else:
endIndex = startIndex + 1
if endIndex == -2:
endIndex = nDonors
return startIndex, endIndex
def removeNegativeDurations(df):
if "duration" in list(df):
nNegativeDurations = sum(df.duration.astype(float) < 0)
if nNegativeDurations > 0:
df = df[~(df.duration.astype(float) < 0)]
else:
nNegativeDurations = np.nan
return df, nNegativeDurations
def add_uploadDateTime(df):
if "upload" in data.type.unique():
uploadTimes = pd.DataFrame(
df[df.type == "upload"].groupby("uploadId").time.describe()["top"]
)
else:
uploadTimes = pd.DataFrame(columns=["top"])
# if an upload does not have an upload date, then add one
# NOTE: this is a new fix introduced with healthkit data...we now have
# data that does not have an upload record
unique_uploadIds = set(df["uploadId"].unique())
unique_uploadRecords = set(
df.loc[df["type"] == "upload", "uploadId"].unique()
)
uploadIds_missing_uploadRecords = unique_uploadIds - unique_uploadRecords
for upId in uploadIds_missing_uploadRecords:
last_upload_time = df.loc[df["uploadId"] == upId, "time"].max()
uploadTimes.loc[upId, "top"] = last_upload_time
uploadTimes.reset_index(inplace=True)
uploadTimes.rename(
columns={
"top": "uploadTime",
"index": "uploadId"
},
inplace=True
)
df = pd.merge(df, uploadTimes, how='left', on='uploadId')
df["uploadTime"] = pd.to_datetime(df["uploadTime"])
return df
def filterAndSort(groupedDF, filterByField, sortByField):
filterDF = groupedDF.get_group(filterByField).dropna(axis=1, how="all")
filterDF = filterDF.sort_values(sortByField)
return filterDF
def getClosedLoopDays(groupedData, qualCriteria, metadata):
# filter by basal data and sort by time
if "basal" in groupedData.type.unique():
basalData = filterAndSort(groupedData, "basal", "time")
# get closed loop days
nTB = qualCriteria["nTempBasalsPerDayIsClosedLoop"]
tbDataFrame = basalData.loc[basalData.deliveryType == "temp", ["time"]]
tbDataFrame.index = pd.to_datetime(tbDataFrame["time"])
tbDataFrame = tbDataFrame.drop(["time"], axis=1)
tbDataFrame["basal.temp.count"] = 1
nTempBasalsPerDay = tbDataFrame.resample("D").sum()
closedLoopDF = pd.DataFrame(nTempBasalsPerDay,
index=nTempBasalsPerDay.index.date)
closedLoopDF["date"] = nTempBasalsPerDay.index.date
closedLoopDF["basal.closedLoopDays"] = \
closedLoopDF["basal.temp.count"] >= nTB
nClosedLoopDays = closedLoopDF["basal.closedLoopDays"].sum()
# get the number of days with 670g
basalData["date"] = pd.to_datetime(basalData.time).dt.date
bdGroup = basalData.groupby("date")
topPump = bdGroup.deviceId.describe()["top"]
med670g = pd.DataFrame(topPump.str.contains("1780")).rename(columns={"top":"670g"})
med670g.reset_index(inplace=True)
n670gDays = med670g["670g"].sum()
else:
closedLoopDF = pd.DataFrame(columns=["basal.closedLoopDays", "date"])
med670g = pd.DataFrame(columns=["670g", "date"])
nClosedLoopDays = 0
n670gDays = 0
metadata["basal.closedLoopDays.count"] = nClosedLoopDays
metadata["med670gDays.count"] = n670gDays
return closedLoopDF, med670g, metadata
def removeInvalidCgmValues(df):
nBefore = len(df)
# remove values < 38 and > 402 mg/dL
df = df.query("(value >= 2.109284236597303) and" +
"(value <= 22.314006924003046)")
nRemoved = nBefore - len(df)
return df, nRemoved
def removeDuplicates(df, criteriaDF):
nBefore = len(df)
df = df.loc[~(df[criteriaDF].duplicated())]
df = df.reset_index(drop=True)
nDuplicatesRemoved = nBefore - len(df)
return df, nDuplicatesRemoved
def removeCgmDuplicates(df, timeCriterion):
if timeCriterion in df:
df.sort_values(by=[timeCriterion, "uploadTime"],
ascending=[False, False],
inplace=True)
dfIsNull = df[df[timeCriterion].isnull()]
dfNotNull = df[df[timeCriterion].notnull()]
dfNotNull, nDuplicatesRemoved = removeDuplicates(dfNotNull, [timeCriterion, "value"])
df = pd.concat([dfIsNull, dfNotNull])
df.sort_values(by=[timeCriterion, "uploadTime"],
ascending=[False, False],
inplace=True)
else:
nDuplicatesRemoved = 0
return df, nDuplicatesRemoved
def getStartAndEndTimes(df, dateTimeField):
dfBeginDate = df[dateTimeField].min()
dfEndDate = df[dateTimeField].max()
return dfBeginDate, dfEndDate
def getCalculatorCounts(groupedData, metadata):
if "wizard" in groupedData.type.unique():
# filter by calculator data and sort by time
calculatorData = filterAndSort(groupedData, "wizard", "time")
# add dayIndex
calculatorData["dayIndex"] = pd.DatetimeIndex(calculatorData["time"]).date
# get rid of duplicates
calculatorData, nDuplicatesRemoved = \
removeDuplicates(calculatorData, ["time", "bolus"])
metadata["calculator.duplicatesRemoved.count"] = nDuplicatesRemoved
# get start and end times
calculatorBeginDate, calculatorEndDate = getStartAndEndTimes(calculatorData, "dayIndex")
metadata["calculator.beginDate"] = calculatorBeginDate
metadata["calculator.endDate"] = calculatorEndDate
# group by day and get number of calculator boluses
catDF = calculatorData.groupby(calculatorData["dayIndex"])
calculatorPerDay = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue May 3 10:49:58 2016
Auger peak finding and quantitative routines ... batch processing
@author: tkc
First get it working for single file.
"""
#%%
import pandas as pd
import numpy as np
import os, sys, shutil, glob, re
if 'C:\\Users\\tkc\\Documents\\Python_Scripts' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts')
import Auger_smdifquant_functions as AESsmquant
import Auger_integquant_functions as AESintquant
import Auger_utility_functions as AESutils
import Auger_plot_functions as AESplot
''' AESsmquant contains functions related to peak finding in smooth-differentiated spectra
whereas AESquant contains background fitting and integration over peaks for direct from counts '''
# import Auger_integquant_functions as AESquant
#%% REFIT of problematic peaks
# Manual refitting of failed fits on single peaks (usually Ca)
# filter with SPE list above
AugerParamLog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
AESquantparams= | pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\AESquantparams.csv', encoding='utf-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""hood_event_scrape_module
Authors:
<NAME> <EMAIL>
<NAME> <EMAIL>
<NAME> <EMAIL>
Imports to:
WhatsUp_main_gui.py
"""
# !pip install rtree
# !pip install geopandas
# !pip install beautifulsoup4
# !pip install censusgeocode
# Import libraries
import rtree
import geopandas as gp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests
import zipfile
import io
from shapely.geometry import Point, Polygon
from bs4 import BeautifulSoup
import re
import unicodedata
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
from geopandas import GeoDataFrame
import censusgeocode as cg
import time
# %% Useful functions and constants
# clean_neighborhood_info function for making neighborhood names congruent among multiple data sets
def clean_neighborhood_info(x):
"""This function should be applied to a neighborhood name column via a lambda function to return edited column values
Several neighborhoods in Pittsburgh have multiple column aliases. The function checks a column for these aliases and
replaces them with the neighborhood name contained in the City of Pittsburgh's official data'"""
if x == 'Arlington/Arlington Heights':
return 'Arlington Heights'
if x == 'Crafton':
return 'Crafton Heights'
if x == 'Downtown':
return 'Central Business District'
if x == 'East Allegheny - Deutschtown':
return 'East Allegheny'
if x == 'Hays or Hays Woods':
return 'Hays'
if x == 'Mt. Washington':
return 'Mount Washington'
if x == 'Sprint Hill':
return 'Spring Hill - City View'
if x == 'The Hill District':
return 'Hill District'
if x == 'The Strip District':
return 'Strip District'
if x == 'West End Village':
return 'West End'
else:
return x
# fix_neighborhood_info ensures that scraped data that applies to multiple neighborhoods that are sometimes referred to
# under the alias of a single neighborhood are assigned to each of those neighborhoods
def fix_neighbor_info(my_df):
"""This function takes in a dataframe containing neighborhood aliases that refer to multiple Pittsburgh Neighborhoods
and returns a dataframe with duplicated neighborhood info assigned to each specific neighborhood.
i.e. df values for Oakland -> df values for North Oakland, South Oakland, and West Oakland"""
multi_desc = pd.DataFrame()
for i in range(len(my_df)):
if my_df.loc[i]['Neighborhood'] == 'Lawrenceville':
copy_1 = my_df.loc[i].copy()
copy_1['Neighborhood'] = 'Upper Lawrenceville'
multi_desc=multi_desc.append(copy_1)
copy_2 = my_df.loc[i].copy()
copy_2['Neighborhood'] = 'Lower Lawrenceville'
multi_desc=multi_desc.append(copy_2)
if my_df.loc[i]['Neighborhood'] == 'Oakland':
copy_1 = my_df.loc[i].copy()
copy_1['Neighborhood'] = 'West Oakland'
multi_desc=multi_desc.append(copy_1)
copy_2 = my_df.loc[i].copy()
copy_2['Neighborhood'] = 'North Oakland'
multi_desc=multi_desc.append(copy_2)
copy_3 = my_df.loc[i].copy()
copy_3['Neighborhood'] = 'South Oakland'
multi_desc=multi_desc.append(copy_3)
if my_df.loc[i]['Neighborhood'] == 'Squirrel Hill':
copy_1 = my_df.loc[i].copy()
copy_1['Neighborhood'] = 'Squirrel Hill North'
multi_desc=multi_desc.append(copy_1)
copy_2 = my_df.loc[i].copy()
copy_2['Neighborhood'] = 'Squirrel Hill South'
multi_desc=multi_desc.append(copy_2)
if my_df.loc[i]['Neighborhood'] == 'South Side':
copy_1 = my_df.loc[i].copy()
copy_1['Neighborhood'] = 'South Side Slopes'
copy_2 = my_df.loc[i].copy()
copy_2['Neighborhood'] = 'South Side Flats'
if my_df.loc[i]['Neighborhood'] == 'The South Side':
copy_3 = my_df.loc[i].copy()
copy_1['description'] = copy_3['description']
multi_desc=multi_desc.append(copy_1)
copy_4 = my_df.loc[i].copy()
copy_2['description'] = copy_4['description']
multi_desc=multi_desc.append(copy_2)
return multi_desc
# flatten function works with data produced by fix_neighbor_info function to compact multiple rows with the same
# neighborhood into one by empty columns with values from other rows of the same neighborhood
def flatten(g):
return g.fillna(method='bfill').iloc[0]
# get_address_nm geocodes data with nominatim geocoder
def get_address_nm(my_address):
"""This function should be applied to the address column of a dataframe to geocode the addresses, and
the function returns either a np.nan value for addresses that could not be geocoded or a POINT object
corresponding with the EPSG:4326 projection"""
try:
#time.sleep(1)
locator = Nominatim(user_agent="myGeocoder", timeout=10)
geocode = RateLimiter(locator.geocode, min_delay_seconds=1)
my_loc = geocode(my_address).point
my_loc_list = []
lat = my_loc[0]
long=my_loc[1]
my_point = Point(long, lat)
return my_point
except:
return np.nan
# Set header constant for use in scraping from Beautiful Pittsburgh because the website is sensitive to scraping without
# a user agend and to multiple calls
header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10136',
"Accept-Encoding": "*",
"Connection": "keep-alive"}
# %% Scrape GeoData
#### Downloading and processing inital pgh neighborhoods shapefile
# Download and get shapefile for pgh neighborhoods
url = 'https://pghgishub-pittsburghpa.opendata.arcgis.com/datasets/dbd133a206cc4a3aa915cb28baa60fd4_0.zip?outSR=%7B%22latestWkid%22%3A2272%2C%22wkid%22%3A102729%7D'
local_path = 'tmp/'
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(path=local_path)
z.extractall(path=local_path)
filenames = [y for y in sorted(z.namelist()) for ending in ['dbf', 'prj', 'shp', 'shx'] if y.endswith(ending)]
# Read in with geopandas
thehoods=gp.read_file(local_path+'Neighborhoods_.shp')
# Make geodataframe keeping only potentially useful info from neighborhoods data
thehoods_clean=thehoods[['objectid', 'geoid10', 'sqmiles', 'hood', 'hood_no', 'unique_id', 'geometry']].copy()
#### Downloading and processing csv of public art recorded by the city
# Get data on public art in pittsburgh neighborhoods from western PA regional data center via download
art_url = 'https://data.wprdc.org/datastore/dump/00d74e83-8a23-486e-841b-286e1332a151'
# Read data into normal dataframe
public_art = pd.read_csv(art_url)
# Clean and extract useful information
public_art_clean = public_art[['id', 'title', 'neighborhood', 'latitude', 'longitude']].copy().dropna().reset_index(drop=True)
# public_art_clean dataframe
public_art_clean.head(10)
# Count by number of city recorded public art works in each neighborhood
public_art_counting = public_art_clean.copy()
public_art_counting["art_count"] = 1
public_art_count = public_art_counting.groupby("neighborhood")[["art_count"]].sum().reset_index()
# Note that only 55 neighborhoods have art
#### Downloading and processing csv of playgrounds in the city
playgrounds_url = 'https://data.wprdc.org/datastore/dump/47350364-44a8-4d15-b6e0-5f79ddff9367'
# Read data into normal dataframe
playgrounds = pd.read_csv(playgrounds_url)
# Clean and extract useful information
playgrounds_clean = playgrounds[['id', 'name', 'neighborhood', 'latitude', 'longitude']].copy().dropna().reset_index(drop=True)
# playgrounds_clean dataframe
playgrounds_clean.head(10)
# Count by number of city recorded playgrounds in each neighborhood
playgrounds_counting = playgrounds_clean.copy()
playgrounds_counting["playground_count"] = 1
playgrounds_count = playgrounds_counting.groupby("neighborhood")[["playground_count"]].sum().reset_index()
# Note that there are only 68 neighborhoods with playgrounds
# %% Scrape Walkscore Information"""
### Scrape table of walkscore, bikescore, transitscore, population for pgh neighborhoods from walkscore.com
# Get html for each line of table
walkscore_data = | pd.DataFrame() | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
| tm.assert_class_equal(result.index, df.index, obj="dataframe index") | pandas.util.testing.assert_class_equal |
#Copyright 2013 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Some parts of this script where written by other authors and in some cases
# modified by <NAME>. The original authors are #quoted in each routine.
#
import os, glob
from pylab import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import re
from scipy import interpolate
import time
import os
from scipy import ndimage
from numpy import sin,cos,round,isscalar,array,ndarray,ones_like,pi
from astropy.io.fits import open
from astropy.io import fits as pf
from astropy.table import Table
############################################################################
#
#
# Convert coodinates from equatorial to galactic coordinates.
#
# Author: <NAME>
# Modified by <NAME>
# Modified by <NAME>, introducing a correction on the DE Calculation. Not fully tested yet
#
############################################################################
def hms2deg(RA,DE):
'''
Convert RA= [hh,mm,ss] and DEC = [DD,mm,ss] to degres.
Usage: hms2deg(RA,DEC).
Adapted by: <NAME>
'''
RA0 = array(RA).reshape(-1,3)
DE0 = array(DE).reshape(-1,3)
RA = 15.*RA0[:,0] + 15./60.*RA0[:,1] + 15./3600.*RA0[:,2]
# DE = DE0[:,0] + 1./60.*DE0[:,1] + 1./3600.*DE0[:,2]
if DE0[:,0] >=0.:
DE=((DE0[:,2]/60+DE0[:,1])/60 + DE0[:,0])
elif DE0[:,0][0] <0.:
DE=(-1*(DE0[:,2]/60+DE0[:,1])/60 + DE0[:,0])
return RA,DE
def eq2galCoords(RA,DE,units='observers'):
deg2rad = pi/180.
rad2deg = 180./pi
kpc2km = 3.085678e16
yr2s = 31557600.
# Direction to the North Galactic Pole in Equatorial coordinates
RA_GP = 15*(12.+49./60.+ 8.13e-4 *(2000.-1950.)) * deg2rad
DE_GP = (27.4 - 5.44e-3 *(2000.-1950.)) * deg2rad
# Galactic longitude of the North Celestial Pole
l_NCP = (123. - 1.33e-3 *(2000.-1950.)) * deg2rad
if units == 'observers':
(RA,DE)=hms2deg(RA,DE)
if units == 'deg':
RA = array(RA).reshape(-1)
DE = array(DE).reshape(-1)
sdp = sin(DE_GP)
cdp = sqrt(1. - sdp*sdp)
sdec= sin(DE*deg2rad)
cdec= sqrt(1. - sdec*sdec)
ras = RA*deg2rad - RA_GP
sgb = sdec*sdp + cdec*cdp*cos(ras)
b = arcsin(sgb)*rad2deg
cgb = sqrt(1. - sgb*sgb)
sine= cdec*sin(ras) / cgb
cose= (sdec-sdp*sgb) / (cdp*cgb)
l = (l_NCP - arctan2(sine,cose))*rad2deg
# lt0 = argwhere(l < 0.).reshape(-1)
# l[lt0] = l[lt0]+360.
return (l,b)
############################################################################
#
#
# Reddening Laws
#
# Written/addapted by <NAME>
#
############################################################################
def ccm(wavelength,rv):
'''
CCM -- Compute CCM Extinction Law for a given lambda and a given RV. Default is 3.1.
Usage: ccm(wavelength,rv)
Implemented by <NAME>
'''
# Convert to inverse microns
x = 10000. / wavelength
# Compute a(x) and b(x)
if (x < 0.3):
print("Wavelength out of range of extinction function")
elif (x < 1.1):
y = x ** 1.61
a = 0.574 * y
b = -0.527 * y
elif (x < 3.3):
y = x - 1.82
a = 1 + y * (0.17699 + y * (-0.50447 + y * (-0.02427 +
y * (0.72085 + y * (0.01979 + y * (-0.77530 + y * 0.32999))))))
b = y * (1.41338 + y * (2.28305 + y * (1.07233 + y * (-5.38434 +
y * (-0.62251 + y * (5.30260 + y * (-2.09002)))))))
elif (x < 5.9):
y = (x - 4.67) ** 2
a = 1.752 - 0.316 * x - 0.104 / (y + 0.341)
b = -3.090 + 1.825 * x + 1.206 / (y + 0.263)
elif (x < 8.0):
y = (x - 4.67) ** 2
a = 1.752 - 0.316 * x - 0.104 / (y + 0.341)
b = -3.090 + 1.825 * x + 1.206 / (y + 0.263)
y = x - 5.9
a = a - 0.04473 * y**2 - 0.009779 * y**3
b = b + 0.2130 * y**2 + 0.1207 * y**3
elif (x <= 10.0):
y = x - 8
a = -1.072 - 0.628 * y + 0.137 * y**2 - 0.070 * y**3
b = 13.670 + 4.257 * y - 0.420 * y**2 + 0.374 * y**3
else:
print("\n\n >>>>> Lambda=",wavelength,": Wavelength out of range for the dust extintion law <<<<\n\n")
# Compute A(lambda)/A(V)
y = a + b / rv
return (y)
# print y
def ftzpt(lamb,flx,ebv):
'''
Fitzpatrick, <NAME> (1999, PASP, 111, 63) extinction law
usage: ftzpt(lamb,flx,ebv)
Implemented by: <NAME>
Addapted by: <NAME>
'''
x = (1.e4)/lamb
flx_cor = flx*10**(.4*ebv*(1.e-5 + .22707*x + 1.95243*x**2 - 2.67596*x**3 + 2.6507*x**4 - 1.26812*x**5 + 0.27549*x**6 - 0.02212*x**7))
return(flx_cor)
def seaton(wavelength,ebv):
'''
Seaton (1979, MNRAS, 187P, 73) extinction law
Implemented by: <NAME>
Addapted by: <NAME>
'''
X=[1.0, 1.1 ,1.2 ,1.3 ,1.4 ,1.5 ,1.6 ,1.7 ,1.8 ,1.9 ,2.0 ,2.1 ,2.2 ,2.3 ,2.4 ,2.5 ,2.6 ,2.7 ]
A=[1.36,1.44,1.84,2.04,2.24,2.44,2.66,2.88,3.14,3.36,3.56,3.77,3.96,4.15,4.26,4.40,4.52,4.64]
x=10000.0/wavelength
GetPts=interpolate.interp1d(X,A)
try:
y=GetPts(x)
except:
print("\n\n >>>>> Lambda=",wavelength,": Wavelength out of range for the dust extintion law <<<<\n\n")
cor= y * ebv
return (cor)
def calzetti(wavelength,ebv):
'''
Calzetti et all. (2000, ApJ, 533, 682) extinction law.
Implemented by: <NAME>
Addapted by: <NAME>
'''
x=wavelength/10000.0
if x< 0.12:
print("\n\n >>>>> Lambda=",wavelength,": Wavelength out of range for the dust extintion law <<<<\n\n")
elif 0.12 <= x <= 0.63:
k=2.659*(-2.156+(1.509/x)-(0.198/x**2)+(0.011/x**3)) + 4.05
elif 0.63 <= x <= 2.20:
k=2.659*(-1.857+(1.040/x)) + 4.05
else:
print("\n\n >>>>> Lambda=",wavelength,": Wavelength out of range for the dust extintion law <<<<\n\n")
cor = k*ebv
return (cor)
############################################################################
#
#
# Routine to apply reddening Laws
#
# Written by <NAME>
############################################################################
def deredden(spectra,law,ebv,rv=3.1,quiet=False):
'''
This routine allows to apply reddening corrections using the following reddening laws:
- ccm - Cardelli, Clayton, and Mathis (1989, ApJ, 345, 245)
- calzetti - Calzetti et all. (2000, ApJ, 533, 682)
- ftzpt - Fitzpatrick, Edward L (1999, PASP, 111, 63)
- seaton - Seaton (1979, MNRAS, 187P, 73).
usage: deredden(spectra,law,ebv,rv)
spectra = file name of an ASCII file with 2 or 3 collumns (lambda, flux, eflux) or an array with [lambda, flux and/or eflux]. If there is no eflux, it is assumed as zero, and returns eflux=0 in both cases (array or file name).
law= one of the above laws
ebv = E(B-V)
rv = 3.1 is the default.
example: deredden('spectra.dat','ccm',ebv=0.5,rv=3.0)
'''
ebv=ebv
law=law
rv=rv
av=rv*ebv
if type(spectra) == str:
filein=spectra
corrname=spectra
try:
(lambdas,flux,eflux)=np.loadtxt(filein, unpack=True)
except:
(lambdas,flux)=np.loadtxt(filein, unpack=True)
eflux=flux*0.0
# Trying to implement the correction for a color, but it does not work in the loops.
# elif type(spectra) == list:
# spectra=array(spectra).reshape(-2)
# try:
# lambdas=spectra[0]
# flux=spectra[1]
# eflux=spectra[2]
# except:
# lambdas=spectra[0]
# flux=spectra[1]
# eflux=flux*0.0
# corrname='a list of colors'
else:
try:
lambdas=spectra[:,0]
flux=spectra[:,1]
eflux=spectra[:,2]
except:
lambdas=spectra[:,0]
flux=spectra[:,1]
eflux=flux*0.0
corrname='an array'
fluxCor=[]
efluxCor=[]
if (law == 'ftzpt'):
for i in range(0,len(lambdas)):
fluxCor.append(ftzpt(lambdas[i],flux[i],ebv))
efluxCor.append(ftzpt(lambdas[i],eflux[i],ebv))
elif(law == 'ccm'):
for i in range(0,len(lambdas)):
cor = 10. ** (0.4 * av * ccm(lambdas[i], rv))
fluxCor.append(flux[i] * cor)
efluxCor.append(eflux[i] * cor)
elif(law == 'seaton'):
for i in range(0,len(lambdas)):
cor = 10. ** (0.4 *seaton(lambdas[i], ebv))
fluxCor.append(flux[i] * cor)
efluxCor.append(eflux[i] * cor)
elif(law== 'calzetti'):
for i in range(0,len(lambdas)):
cor = 10. ** (0.4 *calzetti(lambdas[i], ebv))
# print lambdas[i], flux[i], flux[i]*cor, calzetti(lambdas[i], ebv)
fluxCor.append(flux[i] * cor)
efluxCor.append(eflux[i] * cor)
if quiet == True:
print("\n ----------------------------------------------------\n")
print("Reddening correction sucessfull applied to: ", corrname)
print("\nUsing: Law=",law,"; E(B-V)=",ebv," & Rv= ",rv)
print("\n ----------------------------------------------------\n")
return np.array(lambdas),np.array(fluxCor),np.array(efluxCor)
############################################################################
#
#
# Routine to get ebv from Schlegel et al. dust maps http://irsa.ipac.caltech.edu/applications/DUST/
# Copyright 2008 <NAME>
############################################################################
#Copyright 2008 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file was modified by <NAME> (<EMAIL>)
# in 2012 for the purpose of using it in the QR tool developed by LIneA.
#
# This file was modified by <NAME> (<EMAIL>)
# in 2013 for the purpose of using it in the ReddeningCorrections tool.
def get_SFD_dust(longitud,lat,dustmap='ebv',pathdustmap='./aux_files/DustMaps',interpolate=True):
"""
Gets map values from Schlegel, Finkbeiner, and Davis 1998 extinction maps.
`dustmap` can either be a filename (if '%s' appears in the string, it will be
replaced with 'ngp' or 'sgp'), or one of:
* 'i100'
100-micron map in MJy/Sr
* 'x'
X-map, temperature-correction factor
* 't'
Temperature map in degrees Kelvin for n=2 emissivity
* 'ebv'
E(B-V) in magnitudes
* 'mask'
Mask values
For these forms, the files are assumed to lie in the current directory.
Input coordinates are in degrees of galactic latiude and logitude - they can
be scalars or arrays.
if `interpolate` is an integer, it can be used to specify the order of the
interpolating polynomial
"""
if type(dustmap) is not str:
raise ValueError('dustmap is not a string')
dml=dustmap.lower()
if dml == 'ebv' or dml == 'eb-v' or dml == 'e(b-v)' :
dustmapfn=pathdustmap+'/SFD_dust_4096_%s.fits'
else:
dustmapfn=dustmap
if isscalar(longitud):
l=array([longitud])*pi/180
else:
l=array(longitud)*pi/180
if isscalar(lat):
b=array([lat])*pi/180
else:
b=array(lat)*pi/180
if not len(l)==len(b):
raise ValueError('input coordinate arrays are of different length')
if '%s' not in dustmapfn:
f=open(dustmapfn)
try:
mapds=[f[0].data]
finally:
f.close()
assert mapds[-1].shape[0] == mapds[-1].shape[1],'map dimensions not equal - incorrect map file?'
polename=dustmapfn.split('.')[0].split('_')[-1].lower()
if polename=='ngp':
n=[1]
if sum(b > 0) > 0:
print('using ngp file when lat < 0 present... put %s wherever "ngp" or "sgp" should go in filename')
elif polename=='sgp':
n=[-1]
if sum(b < 0) > 0:
print('using sgp file when lat > 0 present... put %s wherever "ngp" or "sgp" should go in filename')
else:
raise ValueError("couldn't determine South/North from filename - should have 'sgp' or 'ngp in it somewhere")
masks = [ones_like(b).astype(bool)]
else: #need to do things seperately for north and south files
nmask = b >= 0
smask = ~nmask
masks = [nmask,smask]
ns = [1,-1]
mapds=[]
f=open(dustmapfn%'ngp')
try:
mapds.append(f[0].data)
finally:
f.close()
assert mapds[-1].shape[0] == mapds[-1].shape[1],'map dimensions not equal - incorrect map file?'
f=open(dustmapfn%'sgp')
try:
mapds.append(f[0].data)
finally:
f.close()
assert mapds[-1].shape[0] == mapds[-1].shape[1],'map dimensions not equal - incorrect map file?'
retvals=[]
for n,mapd,m in zip(ns,mapds,masks):
#project from galactic longitude/latitude to lambert pixels (see SFD98)
npix=mapd.shape[0]
x=npix/2*cos(l[m])*(1-n*sin(b[m]))**0.5+npix/2-0.5
y=-npix/2*n*sin(l[m])*(1-n*sin(b[m]))**0.5+npix/2-0.5
#now remap indecies - numpy arrays have y and x convention switched from SFD98 appendix
x,y=y,x
if interpolate:
from scipy.ndimage import map_coordinates
if type(interpolate) is int:
retvals.append(map_coordinates(mapd,[x,y],order=interpolate))
else:
retvals.append(map_coordinates(mapd,[x,y]))
else:
x=round(x).astype(int)
y=round(y).astype(int)
retvals.append(mapd[x,y])
if isscalar(longitud) or isscalar(lat):
for r in retvals:
if len(r)>0:
return r[0]
assert False,'None of the return value arrays were populated - incorrect inputs?'
else:
#now recombine the possibly two arrays from above into one that looks like the original
retval=ndarray(l.shape)
for m,val in zip(masks,retvals):
retval[m] = val
return retval
def linereddening(line1,line2,obs_ratio,theo_ratio,error_ratio_1,error_ratio_2,law,rv=3.1,verbose='y'):
'''
This routine allows to apply reddening corrections using the following reddening laws:
- ccm - Cardelli, Clayton, and Mathis (1989, ApJ, 345, 245)
- calzetti - Calzetti et all. (2000, ApJ, 533, 682)
- ftzpt - Fitzpatrick, <NAME> (1999, PASP, 111, 63)
- seaton - Seaton (1979, MNRAS, 187P, 73).
usage: linereddening(line1,line2,ObsRatio,TeorRatio,law,rv=3.1)
line1 = Wavelength of the first line
line2 = Wavelength of the second line
obs_ratio = line1/line2 observed ratio
theo_ratio = line1/line2 theoretical ratio
error_ratio_1= Flux error / Flux for line1
error_ratio_2= Flux error / Flux for line2
law= one of the above laws
rv = 3.1 is the default.
verbose= y/n to print or not the input informations
'''
if (law == 'ftzpt'):
f_lamb=(ftzpt(line1,rv) - ftzpt(line2,rv))
elif(law == 'ccm'):
f_lamb=(ccm(line1,rv) - ccm(line2,rv))
elif(law == 'seaton'):
f_lamb=(seaton(line1,rv) - seaton(line2,rv))
elif(law== 'calzetti'):
f_lamb=(calzetti(line1,rv) - calzetti(line2,rv))
C_ext=np.log10(obs_ratio/theo_ratio)/(-1.0*f_lamb)
sigR=np.sqrt((obs_ratio)**2*((error_ratio_1)**2 + (error_ratio_2)**2)) # error propagation
Cext_err = abs(1./(f_lamb*(obs_ratio))*np.log10(np.e)*sigR)
if verbose == 'y':
toprint=('\n*********\nline1='+str(line1)+'\n'+'line2='+str(line2)+'\n'+'obs_ratio='+str(obs_ratio)+'\n'
+'theo_ratio='+str(theo_ratio)+'\n'+'error_ratio_1='+str(error_ratio_1)+'\n'+'error_ratio_2='+str(error_ratio_2)+'\n'
+'law='+law +'\n'+'rv='+str(rv)+'\n-----\n'+'C_ext='+str(C_ext)+'\n'+'Cext_err='+str(Cext_err)+'\n*********\n')
print(toprint)
return C_ext, Cext_err,f_lamb
###########################################################
# Interpolation: spline
# Convertido de R (<NAME>.) para Python
###########################################################
def interp(xin,xout,yin):
Ninterpol = len(xout)
yout = np.array([])
for k in range(Ninterpol):
t = xin[xin < xout[k]]
tind = len(t)-1 #Verificar este -1. Soluciona o problema, mas pode ter gerado outros escondidos
if tind <= 0: tind = 1
if tind >= len(xin): tind = len(xin) - 1
t1 = xin[tind - 1]
t2 = xin[tind]
t3 = xin[tind + 1]
tx = xout[k]
A = (tx - t1) / (t3 - t1)
B = (tx - t2) / (t3 - t2)
C = (tx - t3) / (t2 - t1)
D = (tx - t1) / (t3 - t2)
E = (tx - t2) / (t3 - t1)
G = (tx - t2) / (t3 - t2)
H = (tx - t1) / (t2 - t1)
if (t1 != t2) & (t2 != t3):
yout = np.append(yout, yin[tind+1] * A * B - yin[tind] * D * C + yin[tind-1] * E * C)
if (t1 == t2):
yout = np.append(yout, yin[tind+1] - yin[tind]) * G + yin[tind]
if(t2 == t3):
yout = np.append(yout, yin[tind] - yin[tind-1]) * H + yin[tind-1]
return(yout)
##############################################################################################
# sdss2tocorrect_sdss
##############################################################################################
# PURPOSE
# Convert sdss .fits files to .csv files that are input for extinction and doppler correction codes
#
# CALLING SEQUENCE
# sdss2tocorrect_sdss('file_name')
#
# INPUT PARAMETERS
# String 'path/file_name' that is the file name containig a list of groups and extensions of sdss data separated by a space
#
# OUTPUT
# tocorrect_sdss_LCG %group% _ %extention% .csv
#
# REQUIRED SCRIPTS
#
# COMMENTS
# Developed by <NAME>
##############################################################################################
def sdss2tocorrect_sdss(file_list_name): #Nome do arquivo contendo a lista de objetos no modo "group extension"
data = pd.read_csv(file_list_name, sep=',')
selection = (data['flag_sdss'] == 1) & (data['onoff'] == 1)
data = data[selection]
data.index = range(len(data))
for i in range(len(data)):
group = str(data.lcgID[i])
extension = str(data.extension[i])
hdul = pf.open('files_sdss/' + group + '_' + extension + '.fits', memmap=True)
evt_data = Table(hdul[1].data)
try:
csv_file = pd.DataFrame([10**evt_data['loglam'], evt_data['flux'], 1/np.sqrt(evt_data['ivar'])]).T
except:
csv_file = pd.DataFrame([10**evt_data['LOGLAM'], evt_data['FLUX'], 1/np.sqrt(evt_data['IVAR'])]).T
csv_file.columns = ['lambda', 'flux', 'error']
selection = csv_file['lambda'] == float('inf')
selection += csv_file['error'] == float('inf')
dropinf = csv_file[selection]
for i in range(len(dropinf)):
item = dropinf.index[i]
csv_file = csv_file.drop(item)
csv_file.to_csv('files_tocorrect/tocorrect_sdss_LCG' + group + '_' + extension + '.csv', header=False, index=False, sep=' ')
print('tocorrect_sdss_LCG' + group + '_' + extension + '.csv')
##############################################################################################
# csv_gemini2tocorrect_gemini
##############################################################################################
# PURPOSE
# Convert gemini .csv files to .csv files that are input for extinction and doppler correction codes
#
# CALLING SEQUENCE
# csv_gemini2tocorrect_gemini('file_name')
#
# INPUT PARAMETERS
# String 'file_name' that is the file name containig a list of groups and extensions of sdss data separated by a space
#
# OUTPUT
# tocorrect_gemini_LCG %group% _ %extention% .csv
#
# REQUIRED SCRIPTS
#
# COMMENTS
# Developed by <NAME>
##############################################################################################
def csv_gemini2tocorrect_gemini(file_list_name):
spectra_z_file = file_list_name
data = pd.read_csv(spectra_z_file, sep=',')
selection = (data['flag_sdss'] == 0) & (data['onoff'] == 1)
data = data[selection]
data.index = range(len(data))
for i in range(len(data)):
group = str(data.lcgID[i])
extension = str(data.extension[i])
name_spec = 'files_gemini/csv_' + str(group) + '/csv_LCG' + str(group) + '_' + str(extension) + '.csv'
name_erro = 'files_gemini/csv_' + str(group) + '/csv_ERROR_LCG' + str(group) + '_' + str(extension) + '.csv'
spec = pd.read_csv(name_spec, header=None, delim_whitespace=True, names = ['lambda', 'flux'])
erro = pd.read_csv(name_erro, header=None, delim_whitespace=True, names = ['lambda', 'error'])
output = pd.DataFrame([spec['lambda'], spec['flux'], erro['error']]).T
output.to_csv('files_tocorrect/tocorrect_gemini_LCG' + str(group) + '_' + str(extension) + '.csv', index=False, header=False, sep=' ')
print('tocorrect_gemini_LCG' + str(group) + '_' + str(extension) + '.csv')
##############################################################################################
# radec_sdss_organizer
##############################################################################################
# PURPOSE
# In case of missing "allgalaxies_list" file, these commands will collect and organize all the useful galaxies of "lista_all_galaxies" (result from a sql query in casjobs)
#
# CALLING SEQUENCE
# radec_sdss_organizer('file_name')
#
# INPUT PARAMETERS
# String 'file_name' that is the file name containig the result of casjobs query called "lista_all_galaxies"
#
# OUTPUT
# allgalaxies_radec.csv, containing columns [lcgID, ra, dec] with no header
#
# REQUIRED SCRIPTS
#
# COMMENTS
# Developed by <NAME>
##############################################################################################
def radec_sdss_organizer(allgalaxies_info):
dados = pd.read_csv('aux_files/allgalaxies_info.csv')
radec_lcg = dados[['lcgID','ra_lcg','dec_lcg']]
radec_others = dados[['lcgID','ra','dec']]
radec_lcg.index = dados.lcgID
radec_others.index = dados.lcgID
radec_lcg.columns = [['lcgID','ra', 'dec']]
radec_others.columns = [['lcgID','ra', 'dec']]
radec_lcg.drop_duplicates(inplace = True)
radec = radec_lcg.append(radec_others)
radec.sort_index(inplace=True)
radec = radec[['ra','dec']]
radec.to_csv('allgalaxies_radec.csv', sep=',', header=False)
######################################################################################################################
######################################################################################################################
# ISOLATED GROUPS OF EXTREMELY BLUE DWARF GALAXIES
######################################################################################################################
######################################################################################################################
###########################################################################
# Nome do arquivo contendo a lista de galáxias a ser analisada
# Chamando funções de transformação de arquivos sdss e gemini para .csv
###########################################################################
spectra_z_file = 'aux_files/galaxy_list.csv'
sdss2tocorrect_sdss(spectra_z_file)
csv_gemini2tocorrect_gemini(spectra_z_file)
##################################################################################
# Puxando coords dos arquivos de interesse e convertendo de eq para gal
##################################################################################
data = pd.read_csv(spectra_z_file, sep=',')
selection = data['onoff'] == 1
data = data[selection]
data.index = range(len(data))
radec = pd.DataFrame([data.lcgID, data.ra, data.dec]).T
radec.index = range(len(radec))
radec.columns = ['lcgID', 'ra', 'dec']
allcoords = pd.DataFrame(eq2galCoords(radec[['ra']], radec[['dec']], units='deg')).T
allcoords = pd.concat([radec,allcoords], axis = 1)
allcoords.columns = ['lcgID', 'ra','dec','l','b']
###########################################################
# Aplicando E(B-V)
###########################################################
ebv_maps = pd.DataFrame(get_SFD_dust(allcoords.l,allcoords.b,dustmap='ebv', pathdustmap='./aux_files/DustMaps',interpolate=True))
ebv_maps.columns = ['ebv']
allcoords = pd.concat([allcoords,ebv_maps], axis=1)
######################################################################################################################
# Selecionando amostra Gemini e organizando
# Os slits das lcgs das galáxias do sloan e 6193 são == 1
# Para alterar os arquivos de interesse, basta alterar a flag "onoff" no arquivo "galaxy_list.csv"
######################################################################################################################
gemini = | pd.DataFrame([data.extension, allcoords.ra, allcoords.dec, allcoords.l, allcoords.b, allcoords.ebv, data.z, data.flag_sdss]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import time
import sys
def bill_calculator(load_profile, tariff):
def pre_processing_load(load_profile):
# placeholder for a quick quality check function for load profile
# make sure it is kwh
# make sure it is one year
# make sure it doesn't have missing value or changing the missing values to zero or to average
# make sure the name is Load
# time interval is half hour
return load_profile
def fr_calc(load_profile, tariff):
f_load_profile = load_profile
imports = [np.nansum(f_load_profile[col].values[f_load_profile[col].values > 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
Results = pd.DataFrame(index=[col for col in f_load_profile.columns if col != 'READING_DATETIME'],
data=imports, columns=['Annual_kWh'])
Results['Annual_kWh_exp'] = [-1 * np.nansum(f_load_profile[col].values[f_load_profile[col].values < 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
if tariff['ProviderType'] == 'Retailer':
Results['DailyCharge'] = len(load_profile.index.normalize().unique()) * tariff['Parameters']['Daily']['Value']
Results['EnergyCharge'] = Results['Annual_kWh'] * tariff['Parameters']['Energy']['Value']
Results['EnergyCharge_Discounted'] = Results['EnergyCharge'] * (1 - tariff['Discount (%)'] / 100)
Results['Fit_Rebate'] = Results['Annual_kWh_exp'] * tariff['Parameters']['FiT']['Value']
Results['Bill'] = Results['DailyCharge'] + Results['EnergyCharge'] - Results['Fit_Rebate']
else:
for TarComp, TarCompVal in tariff['Parameters'].items():
Results[TarComp, 'DailyCharge'] = len(load_profile.index.normalize().unique()) * TarCompVal['Daily']['Value']
Results[TarComp, 'EnergyCharge'] = Results['Annual_kWh'] * TarCompVal['Energy']['Value']
Results['Bill'] = Results['NUOS','DailyCharge'] + Results['NUOS','EnergyCharge']
return Results
def block_annual(load_profile, tariff):
f_load_profile = load_profile
imports = [np.nansum(f_load_profile[col].values[f_load_profile[col].values > 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
Results = pd.DataFrame(index=[col for col in f_load_profile.columns if col != 'READING_DATETIME'],
data=imports, columns=['Annual_kWh'])
Results['Annual_kWh_exp'] = [-1 * np.nansum(f_load_profile[col].values[f_load_profile[col].values < 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
if tariff['ProviderType'] == 'Retailer':
tariff_temp = tariff.copy()
del tariff_temp['Parameters']
tariff_temp['Parameters'] = {'Retailer': tariff['Parameters']}
tariff = tariff_temp.copy()
for TarComp, TarCompVal in tariff['Parameters'].items():
Results[TarComp, 'DailyCharge'] = len(load_profile.index.normalize().unique()) * TarCompVal['Daily'][
'Value']
BlockUse = Results[['Annual_kWh']].copy()
BlockUseCharge = Results[['Annual_kWh']].copy()
lim = 0
for k, v in TarCompVal['Energy'].items():
BlockUse[k] = BlockUse['Annual_kWh']
BlockUse[k][BlockUse[k] > v['HighBound']]= v['HighBound']
BlockUse[k] = BlockUse[k]-lim
BlockUse[k][BlockUse[k] < 0] = 0
lim = v['HighBound']
BlockUseCharge[k] = BlockUse[k] * v['Value']
del BlockUse['Annual_kWh']
del BlockUseCharge['Annual_kWh']
Results[TarComp, 'EnergyCharge'] = BlockUseCharge.sum(axis=1)
if 'Discount (%)' in tariff:
Results[TarComp, 'EnergyCharge_Discounted'] = Results[TarComp, 'EnergyCharge'] * (
1 - tariff['Discount (%)'] / 100)
else:
Results[TarComp, 'EnergyCharge_Discounted'] = Results[TarComp, 'EnergyCharge']
if 'FiT' in TarCompVal:
Results[TarComp, 'Fit_Rebate'] = Results['Annual_kWh_exp'] * TarCompVal['FiT']['Value']
else:
Results[TarComp, 'Fit_Rebate'] = 0
if tariff['ProviderType'] == 'Retailer':
Results['Bill'] = Results['Retailer', 'DailyCharge'] + Results['Retailer', 'EnergyCharge_Discounted'] - \
Results['Retailer', 'Fit_Rebate']
else:
Results['Bill'] = Results['NUOS', 'DailyCharge'] + Results['NUOS', 'EnergyCharge_Discounted'] - Results[
'NUOS', 'Fit_Rebate']
return Results
def block_quarterly(load_profile, tariff):
load_profile_imp=load_profile.clip_lower(0)
load_profile_Q1 = load_profile_imp.loc[load_profile_imp.index.month.isin([1, 2, 3]), :]
load_profile_Q2 = load_profile_imp.loc[load_profile_imp.index.month.isin([4, 5, 6]), :]
load_profile_Q3 = load_profile_imp.loc[load_profile_imp.index.month.isin([7, 8, 9]), :]
load_profile_Q4 = load_profile_imp.loc[load_profile_imp.index.month.isin([10, 11, 12]), :]
f_load_profile = load_profile
imports = [np.nansum(f_load_profile[col].values[f_load_profile[col].values > 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
Results = pd.DataFrame(index=[col for col in f_load_profile.columns if col != 'READING_DATETIME'],
data=imports, columns=['Annual_kWh'])
Results['Q1_kWh'] = load_profile_Q1.sum()
Results['Q2_kWh'] = load_profile_Q2.sum()
Results['Q3_kWh'] = load_profile_Q3.sum()
Results['Q4_kWh'] = load_profile_Q4.sum()
Results['Annual_kWh_exp'] = [-1 * np.nansum(f_load_profile[col].values[f_load_profile[col].values < 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
if tariff['ProviderType'] == 'Retailer':
tariff_temp = tariff.copy()
del tariff_temp['Parameters']
tariff_temp['Parameters'] = {'Retailer': tariff['Parameters']}
tariff = tariff_temp.copy()
for TarComp, TarCompVal in tariff['Parameters'].items():
Results[TarComp, 'DailyCharge'] = len(load_profile.index.normalize().unique()) * TarCompVal['Daily'][
'Value']
for i in range(1,5):
BlockUse = Results[['Q{}_kWh'.format(i)]].copy()
BlockUseCharge = BlockUse.copy()
lim = 0
for k, v in TarCompVal['Energy'].items():
BlockUse[k] = BlockUse['Q{}_kWh'.format(i)]
BlockUse[k][BlockUse[k] > v['HighBound']] = v['HighBound']
BlockUse[k] = BlockUse[k] - lim
BlockUse[k][BlockUse[k] < 0] = 0
lim = v['HighBound']
BlockUseCharge[k] = BlockUse[k] * v['Value']
del BlockUse['Q{}_kWh'.format(i)]
del BlockUseCharge['Q{}_kWh'.format(i)]
Results[TarComp, 'EnergyCharge_Q{}'.format(i)] = BlockUseCharge.sum(axis=1)
Results[TarComp, 'EnergyCharge'] = Results[TarComp, 'EnergyCharge_Q1'] +Results[TarComp, 'EnergyCharge_Q2']\
+Results[TarComp, 'EnergyCharge_Q3']+Results[TarComp, 'EnergyCharge_Q4']
if 'Discount (%)' in tariff:
Results[TarComp, 'EnergyCharge_Discounted'] = Results[TarComp, 'EnergyCharge'] * (
1 - tariff['Discount (%)'] / 100)
else:
Results[TarComp, 'EnergyCharge_Discounted'] = Results[TarComp, 'EnergyCharge']
if 'FiT' in TarCompVal:
Results[TarComp, 'Fit_Rebate'] = Results['Annual_kWh_exp'] * TarCompVal['FiT']['Value']
else:
Results[TarComp, 'Fit_Rebate'] = 0
if tariff['ProviderType'] == 'Retailer':
Results['Bill'] = Results['Retailer', 'DailyCharge'] + Results['Retailer', 'EnergyCharge_Discounted'] - \
Results['Retailer', 'Fit_Rebate']
else:
Results['Bill'] = Results['NUOS', 'DailyCharge'] + Results['NUOS', 'EnergyCharge_Discounted'] - Results[
'NUOS', 'Fit_Rebate']
return Results
def tou_calc(load_profile, tariff):
t0 = time.time()
f_load_profile = load_profile
imports = [np.nansum(f_load_profile[col].values[f_load_profile[col].values > 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
Results = pd.DataFrame(index=[col for col in f_load_profile.columns if col != 'READING_DATETIME'],
data=imports, columns=['Annual_kWh'])
Results['Annual_kWh_exp'] = [-1 * np.nansum(f_load_profile[col].values[f_load_profile[col].values < 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
if tariff['ProviderType'] == 'Retailer':
tariff_temp = tariff.copy()
del tariff_temp['Parameters']
tariff_temp['Parameters'] = {'Retailer': tariff['Parameters']}
tariff = tariff_temp.copy()
for TarComp, TarCompVal in tariff['Parameters'].items():
Results[TarComp,'DailyCharge'] = len(load_profile.index.normalize().unique()) * TarCompVal['Daily']['Value']
time_ind = np.zeros(load_profile.shape[0])
load_profile_TI = pd.DataFrame()
load_profile_TI_Charge = pd.DataFrame()
ti = 0
for k, v in TarCompVal['Energy'].items():
this_part = v.copy()
ti += 1
for k2, v2, in this_part['TimeIntervals'].items():
start_hour = int(v2[0][0:2])
if start_hour == 24:
start_hour = 0
start_min = int(v2[0][3:5])
end_hour = int(v2[1][0:2])
if end_hour == 0:
end_hour = 24
end_min = int(v2[1][3:5])
if this_part['Weekday']:
if start_hour <= end_hour:
time_ind = np.where((load_profile.index.weekday < 5) &
(load_profile.index.month.isin(this_part['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) &
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
else:
time_ind = np.where((load_profile.index.weekday < 5) &
(load_profile.index.month.isin(this_part['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) |
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
if this_part['Weekend']:
if start_hour <= end_hour:
time_ind = np.where((load_profile.index.weekday >= 5) &
(load_profile.index.month.isin(this_part['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) &
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
else:
time_ind = np.where((load_profile.index.weekday >= 5) &
(load_profile.index.month.isin(this_part['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) |
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
load_profile_TI[k] = load_profile.loc[time_ind == ti, :].sum()
load_profile_TI_Charge[k] = this_part['Value'] * load_profile_TI[k]
Results[TarComp,'EnergyCharge'] = load_profile_TI_Charge.sum(axis=1)
if 'Discount (%)' in tariff:
Results[TarComp,'EnergyCharge_Discounted'] = Results[TarComp,'EnergyCharge'] * (1 - tariff['Discount (%)'] / 100)
else:
Results[TarComp,'EnergyCharge_Discounted'] = Results[TarComp,'EnergyCharge']
if 'FiT' in TarCompVal:
Results[TarComp, 'Fit_Rebate'] = Results['Annual_kWh_exp'] * TarCompVal['FiT']['Value']
else:
Results[TarComp, 'Fit_Rebate'] = 0
if tariff['ProviderType'] == 'Retailer':
Results['Bill'] = Results['Retailer','DailyCharge'] + Results['Retailer','EnergyCharge_Discounted'] - Results['Retailer','Fit_Rebate']
else:
Results['Bill'] = Results['NUOS','DailyCharge'] + Results['NUOS','EnergyCharge_Discounted'] - Results['NUOS','Fit_Rebate']
print(time.time() - t0)
return Results
def demand_charge(load_profile, tariff):
f_load_profile = load_profile
imports = [np.nansum(f_load_profile[col].values[f_load_profile[col].values > 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
Results = pd.DataFrame(index=[col for col in f_load_profile.columns if col != 'READING_DATETIME'],
data=imports, columns=['Annual_kWh'])
Results['Annual_kWh_exp'] = [-1 * np.nansum(f_load_profile[col].values[f_load_profile[col].values < 0])
for col in f_load_profile.columns if col != 'READING_DATETIME']
if tariff['ProviderType'] == 'Retailer':
tariff_temp = tariff.copy()
del tariff_temp['Parameters']
tariff_temp['Parameters'] = {'Retailer': tariff['Parameters']}
tariff = tariff_temp.copy()
for TarComp, TarCompVal in tariff['Parameters'].items():
Results[TarComp, 'DailyCharge'] = len(load_profile.index.normalize().unique()) * TarCompVal['Daily'][
'Value']
if ('Unit', '$/kWh') in TarCompVal['Energy'].items():
Results[TarComp, 'EnergyCharge'] = Results['Annual_kWh'] * TarCompVal['Energy']['Value']
else:
load_profile_imp = load_profile.clip_lower(0)
load_profile_Q1 = load_profile_imp.loc[load_profile_imp.index.month.isin([1, 2, 3]), :]
load_profile_Q2 = load_profile_imp.loc[load_profile_imp.index.month.isin([4, 5, 6]), :]
load_profile_Q3 = load_profile_imp.loc[load_profile_imp.index.month.isin([7, 8, 9]), :]
load_profile_Q4 = load_profile_imp.loc[load_profile_imp.index.month.isin([10, 11, 12]), :]
Results['Q1_kWh'] = load_profile_Q1.sum()
Results['Q2_kWh'] = load_profile_Q2.sum()
Results['Q3_kWh'] = load_profile_Q3.sum()
Results['Q4_kWh'] = load_profile_Q4.sum()
for i in range(1, 5):
BlockUse = Results[['Q{}_kWh'.format(i)]].copy()
BlockUseCharge = BlockUse.copy()
lim = 0
for k, v in TarCompVal['Energy'].items():
BlockUse[k] = BlockUse['Q{}_kWh'.format(i)]
BlockUse[k][BlockUse[k] > v['HighBound']] = v['HighBound']
BlockUse[k] = BlockUse[k] - lim
BlockUse[k][BlockUse[k] < 0] = 0
lim = v['HighBound']
BlockUseCharge[k] = BlockUse[k] * v['Value']
del BlockUse['Q{}_kWh'.format(i)]
del BlockUseCharge['Q{}_kWh'.format(i)]
Results[TarComp, 'EnergyCharge_Q{}'.format(i)] = BlockUseCharge.sum(axis=1)
Results[TarComp, 'EnergyCharge'] = Results[TarComp, 'EnergyCharge_Q1'] + Results[TarComp, 'EnergyCharge_Q2'] \
+ Results[TarComp, 'EnergyCharge_Q3'] + Results[
TarComp, 'EnergyCharge_Q4']
if 'Discount (%)' in tariff:
Results[TarComp, 'EnergyCharge_Discounted'] = Results[TarComp, 'EnergyCharge'] * (
1 - tariff['Discount (%)'] / 100)
else:
Results[TarComp, 'EnergyCharge_Discounted'] = Results[TarComp, 'EnergyCharge']
if 'FiT' in TarCompVal:
Results[TarComp, 'Fit_Rebate'] = Results['Annual_kWh_exp'] * TarCompVal['FiT']['Value']
else:
Results[TarComp, 'Fit_Rebate'] = 0
Results[TarComp, 'Demand'] = 0
Results[TarComp, 'DemandCharge'] = 0
for DemCharComp, DemCharCompVal in TarCompVal['Demand'].items():
TSNum = DemCharCompVal['Demand Window Length'] # number of timestamp
NumofPeaks = DemCharCompVal['Number of Peaks']
if TSNum > 1:
load_profile_r = load_profile.rolling(TSNum, min_periods=1).mean()
else:
load_profile_r = load_profile
time_ind = np.zeros(load_profile.shape[0])
ti = 1
for k2, v2, in DemCharCompVal['TimeIntervals'].items():
start_hour = int(v2[0][0:2])
if start_hour == 24:
start_hour = 0
start_min = int(v2[0][3:5])
end_hour = int(v2[1][0:2])
if end_hour == 0:
end_hour = 24
end_min = int(v2[1][3:5])
if DemCharCompVal['Weekday']:
if start_hour <= end_hour:
time_ind = np.where((load_profile.index.weekday < 5) &
(load_profile.index.month.isin(DemCharCompVal['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) &
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
else:
time_ind = np.where((load_profile.index.weekday < 5) &
(load_profile.index.month.isin(DemCharCompVal['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) |
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
if DemCharCompVal['Weekend']:
if start_hour <= end_hour:
time_ind = np.where((load_profile.index.weekday >= 5) &
(load_profile.index.month.isin(DemCharCompVal['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) &
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
else:
time_ind = np.where((load_profile.index.weekday >= 5) &
(load_profile.index.month.isin(DemCharCompVal['Month'])) &
(((60 * load_profile.index.hour + load_profile.index.minute)
>= (60 * start_hour + start_min)) |
((60 * load_profile.index.hour + load_profile.index.minute)
< (60 * end_hour + end_min))), ti,
time_ind)
load_profile_r = load_profile_r.loc[time_ind == ti, :]
load_profile_f = load_profile_r.copy()
load_profile_f = load_profile_f.reset_index()
load_profile_f = pd.melt(load_profile_f, id_vars=['READING_DATETIME'],
value_vars=[x for x in load_profile_f.columns if x != 'READING_DATETIME'])
load_profile_f = load_profile_f.rename(columns={'variable': 'HomeID', 'value': 'kWh'})
load_profile_f['Month'] = | pd.to_datetime(load_profile_f['READING_DATETIME']) | pandas.to_datetime |
# NB: You have to run main_sampling.py in order for this script to function
import numpy as np
import pandas as pd
import pickle
import datetime
from seir.sampling.model import SamplingNInfectiousModel
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as st
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
sns.set(style='darkgrid')
# get data
logging.info('Loading data')
df_deaths = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_deaths.csv',
parse_dates=['date'],
date_parser=lambda t: pd.to_datetime(t, format='%d-%m-%Y')
)
df_confirmed = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_confirmed.csv',
parse_dates=['date'],
date_parser=lambda t: pd.to_datetime(t, format='%d-%m-%Y')
)
df_hosp_icu = pd.read_csv('data/WC_hosp_icu.csv',
parse_dates=['Date'],
date_parser=lambda t: pd.to_datetime(t, format='%d/%m/%Y'))
df_deaths = df_deaths.sort_values('date')
df_confirmed = df_confirmed.sort_values('date')
df_hosp_icu = df_hosp_icu.sort_values('Date')
logging.info('Taking intersection of dates in all dataframes')
max_date = np.min([df_deaths['date'].max(), df_confirmed['date'].max(), df_hosp_icu['Date'].max()])
logging.info(f'Maximum date at which all data sources had data: {max_date}')
df_confirmed = df_confirmed[df_confirmed['date'] < max_date]
df_deaths = df_deaths[['date', 'WC']]
df_confirmed = df_confirmed[['date', 'WC']]
logging.info('Linearly interpolating missing data')
df_confirmed = df_confirmed.interpolate(method='linear')
logging.info('Setting date of lockdown 2020-03-27 to day 0')
df_deaths['Day'] = (df_deaths['date'] - pd.to_datetime('2020-03-27')).dt.days
df_confirmed['Day'] = (df_confirmed['date'] - pd.to_datetime('2020-03-27')).dt.days
df_hosp_icu['Day'] = (df_hosp_icu['Date'] - pd.to_datetime('2020-03-27')).dt.days
logging.info('Merging data sources')
df_merge = df_confirmed.merge(df_deaths, on='Day', how='left', suffixes=('_confirmed', '_deaths'))
df_merge = df_merge.merge(df_hosp_icu, on='Day', how='left')
df_merge = df_merge.interpolate(method='linear')
df_merge = df_merge[
['date_confirmed', 'WC_confirmed', 'WC_deaths', 'Current hospitalisations', 'Current ICU', 'Day']]
df_merge = df_merge.fillna(0)
logging.info('Casting data')
df_merge['WC_confirmed'] = df_merge['WC_confirmed'].astype(int)
df_merge['WC_deaths'] = df_merge['WC_deaths'].astype(int)
df_merge['Day'] = df_merge['Day'].astype(int)
t = df_merge['Day'].to_numpy()
i_h_obs = df_merge['Current hospitalisations']
i_icu_obs = df_merge['Current ICU']
i_d_obs = df_merge['WC_confirmed']
d_icu_obs = df_merge['WC_deaths']
# load solution data
nb_runs = 5
for run in range(nb_runs):
with open(f'data/sampling-runs/run{run:02}_scalar.pkl', 'rb') as f:
scalar_vars = pickle.load(f)
with open(f'data/sampling-runs/run{run:02}_group.pkl', 'rb') as f:
group_vars = pickle.load(f)
df_resample = | pd.read_csv(f'data/sampling-runs/run{run:02}_resample.csv') | pandas.read_csv |
import os
import glob
import pandas as pd
# dict matching target to download link
source_dict = {'Deaths': 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/' \
'csse_covid_19_time_series/time_series_covid19_deaths_global.csv',
'Cases': 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/' \
'csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'}
locs = pd.read_csv('viz/location_codes.csv')
# extract data for given targets
for target in ['Deaths', 'Cases']:
# contains data from all countries
df = pd.read_csv(source_dict[target])
# countries
df = df[df['Country/Region'].isin(locs.location_name)]
# drop provinces etc. (e.g. Bermuda)
df = df[df['Province/State'].isnull()].copy()
# reformat
df.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
df.rename(columns={'Country/Region':'location_name'}, inplace=True)
df = pd.melt(df, id_vars=['location_name'], var_name='date')
df.date = | pd.to_datetime(df.date) | pandas.to_datetime |
import logging
from pathlib import Path
import altair as alt
import pandas as pd
import requests
import streamlit as st
import streamlit.components.v1 as components
st.set_page_config(layout="wide")
st.title("Bundes-Notbremse Ampel")
pd.set_option('precision', 2)
def is_covid_file_up_to_date():
covid_path = Path('./covid.csv')
up_to_date = False
if covid_path.is_file():
covid_data = pd.read_csv('covid.csv', usecols=["Datenstand"], nrows=10)
datenstand = pd.to_datetime(covid_data.Datenstand.str.split(',').str[0], dayfirst=True).max()
up_to_date = datenstand == (pd.Timestamp.today().normalize())
logging.info("Found covid.csv with Datenstand: %s", datenstand)
return up_to_date
def download_covid_data():
logging.info('Downloading covid.csv')
url='https://www.arcgis.com/sharing/rest/content/items/f10774f1c63e40168479a1feb6c7ca74/data'
response = requests.get(url, stream = True)
text_file = open("covid.csv","wb")
for chunk in response.iter_content(chunk_size=1024):
text_file.write(chunk)
text_file.close()
@st.cache
def load_covid_data():
if not is_covid_file_up_to_date():
download_covid_data()
cols = ["IdLandkreis", "Meldedatum", "AnzahlFall", "NeuerFall"]
covid = pd.read_csv('covid.csv', usecols=cols, parse_dates=['Meldedatum'])
covid.Meldedatum = pd.DatetimeIndex(covid.Meldedatum.dt.date)
return covid
def color_notbremse(val):
background_color = 'green'
if val >= 165:
background_color = 'blue'
elif val >= 150:
background_color = 'red'
elif val >= 100:
background_color = 'yellow'
return 'background_color: %s' % background_color
def show_traffic_light(color):
style = """
<style>
.css-ampel {
display: inline-block;
width: 30px;
height: 90px;
border-radius: 6px;
position: relative;
background-color: black;
zoom: 1.7;
}
.css-ampel span,
.css-ampel:before,
.css-ampel:after {
content: "";
color: white;
position: absolute;
border-radius: 15px;
width: 22px;
height: 22px;
left: 4px;
}
.css-ampel:before {
top: 6px;
background-color: red;
background-color: dimgrey;
}
.css-ampel:after {
top: 34px;
background-color: yellow;
background-color: dimgrey;
}
.css-ampel span {
top: 62px;
background-color: green;
background-color: dimgrey;
}
.ampelrot:before {
background-color: red;
box-shadow: 0 0 20px red;
}
.ampelblau:before {
background-color: blue;
box-shadow: 0 0 20px blue;
}
.ampelgelb:after {
background-color: yellow;
box-shadow: 0 0 20px yellow;
}
.ampelgruen span {
background-color: limegreen;
box-shadow: 0 0 20px limegreen;
}
</style>
"""
value = "< 100"
if color == "gelb":
value = "> 100"
elif color == "rot":
value = "> 150"
elif color == "blau":
value = "> 165"
st.write(f'### Aktuell gelten die Regeln für eine 7 Tage Inzidenz {value}.')
components.html(
style + f"""
<span class="css-ampel ampel{color}">
<span>
</span>
</span>
""",
height=170
)
covid_data = load_covid_data()
einwohner = pd.read_csv('Einwohnerzahlen.csv')
def get_inzidenz_data(landkreise):
adm_unit_ids = einwohner[einwohner.Region.isin(landkreise)].AdmUnitId
inzidenz_data = covid_data[(covid_data.NeuerFall != -1) & (covid_data.IdLandkreis.isin(adm_unit_ids))].groupby(['IdLandkreis', 'Meldedatum']).sum()
idx = pd.DatetimeIndex(pd.date_range(start=covid_data.Meldedatum.min(), end=covid_data.Meldedatum.max(), freq='1D'))
multi_idx = pd.MultiIndex.from_product([inzidenz_data.index.levels[0], idx], names=["IdLandkreis", "Meldedatum"])
inzidenz_data = inzidenz_data.reindex(multi_idx, fill_value=0)
cases_7d = []
for group, data in inzidenz_data.groupby(level=0):
cases_7d.extend(data.reset_index().set_index('Meldedatum').rolling(window="7D").sum()['AnzahlFall'].to_list())
inzidenz_data['AnzahlFall7T'] = cases_7d
inzidenz_data.reset_index(inplace=True)
inzidenz_data = inzidenz_data.merge(einwohner, left_on="IdLandkreis", right_on="AdmUnitId")
inzidenz_data['Inz7T'] = inzidenz_data.AnzahlFall7T * 100000 / inzidenz_data.EWZ
return inzidenz_data
def get_ampel_color(inz7T):
ampel = "gruen"
for i, val in enumerate(inz7T[:-1]):
if i > 2:
w3 = min(inz7T[i-3:i])
if (w3 >= 100) & (ampel == "gruen"):
ampel = "gelb"
if (w3 >= 150) & (ampel == "gelb"):
ampel = "rot"
if (w3 >= 165) & (ampel == "rot"):
ampel = "blau"
if i > 4:
w5 = max(inz7T[i-5:i])
if (w5 < 165) & (ampel == "blau"):
ampel = "rot"
if (w5 < 150) & (ampel == "rot"):
ampel = "gelb"
if (w5 < 100) & (ampel == "gelb"):
ampel = "gruen"
return ampel
st.markdown('Die Informationen der Bundesregierung zum Infektionsschutzgesetz finden die [hier](https://www.bundesregierung.de/breg-de/suche/bundesweite-notbremse-1888982).')
selected_landkreise = st.sidebar.multiselect(
'Landkreis',
einwohner.Region
)
show_verlauf = st.sidebar.checkbox("Inzidenzverlauf anzeigen")
if selected_landkreise:
inzidenz_data = get_inzidenz_data(selected_landkreise)
for landkreis in selected_landkreise:
st.markdown(f"## {landkreis}:")
color = get_ampel_color(inzidenz_data[inzidenz_data.Region == landkreis]['Inz7T'])
show_traffic_light(color)
period = pd.date_range(end=inzidenz_data.Meldedatum.max(), periods=10, freq="D")
notbremse_mask = inzidenz_data.Meldedatum.isin(period) & inzidenz_data.Region.isin(selected_landkreise)
notbremse_data = inzidenz_data[notbremse_mask]
notbremse_data = notbremse_data.pivot(index='Region', columns='Meldedatum', values='Inz7T')
notbremse_data.columns = notbremse_data.columns.astype(str)
if show_verlauf:
st.write("# Inzidenzverlauf:")
st.dataframe(notbremse_data.style.applymap(color_notbremse))
inzidenz_plot = alt.Chart(inzidenz_data[inzidenz_data.Region.isin(selected_landkreise)]).mark_line().encode(
x='Meldedatum',
y=alt.Y('Inz7T'),
color="Region",
).interactive(True)
inzidenz_plot += alt.Chart(pd.DataFrame({'y': [165]})).mark_rule(color="blue").encode(y='y').interactive(True)
inzidenz_plot += alt.Chart(pd.DataFrame({'y': [150]})).mark_rule(color="red").encode(y='y').interactive(True)
inzidenz_plot += alt.Chart( | pd.DataFrame({'y': [100]}) | pandas.DataFrame |
"""
inspiration from R Package - PerformanceAnalytics
"""
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.analysis.util import per_series
PER_YEAR_MAP = {
'BA': 1.,
'BAS': 1.,
'A': 1.,
'AS': 1.,
'BQ': 4.,
'BQS': 4.,
'Q': 4.,
'QS': 4.,
'D': 365.,
'B': 252.,
'BMS': 12.,
'BM': 12.,
'MS': 12.,
'M': 12.,
'W': 52.,
}
def guess_freq(index):
# admittedly weak way of doing this...This needs to be abolished
if isinstance(index, (pd.Series, pd.DataFrame)):
index = index.index
if hasattr(index, 'freqstr') and index.freqstr:
return index.freqstr[0]
elif len(index) < 3:
raise Exception('cannot guess frequency with less than 3 items')
else:
lb = min(7, len(index))
idx_zip = lambda: list(zip(index[-lb:-1], index[-(lb-1):]))
diff = min([t2 - t1 for t1, t2, in idx_zip()])
if diff.days <= 1:
if 5 in index.dayofweek or 6 in index.dayofweek:
return 'D'
else:
return 'B'
elif diff.days == 7:
return 'W'
else:
diff = min([t2.month - t1.month for t1, t2, in idx_zip()])
if diff == 1:
return 'M'
diff = min([t2.year - t1.year for t1, t2, in idx_zip()])
if diff == 1:
return 'A'
strs = ','.join([i.strftime('%Y-%m-%d') for i in index[-lb:]])
raise Exception('unable to determine frequency, last %s dates %s' % (lb, strs))
def periodicity(freq_or_frame):
"""
resolve the number of periods per year
"""
if hasattr(freq_or_frame, 'rule_code'):
rc = freq_or_frame.rule_code
rc = rc.split('-')[0]
factor = PER_YEAR_MAP.get(rc, None)
if factor is not None:
return factor / abs(freq_or_frame.n)
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, str):
factor = PER_YEAR_MAP.get(freq_or_frame, None)
if factor is not None:
return factor
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, (pd.Series, pd.DataFrame, pd.TimeSeries)):
freq = freq_or_frame.index.freq
if not freq:
freq = pd.infer_freq(freq_or_frame.index)
if freq:
return periodicity(freq)
else:
# Attempt to resolve it
import warnings
freq = guess_freq(freq_or_frame.index)
warnings.warn('frequency not set. guessed it to be %s' % freq)
return periodicity(freq)
else:
return periodicity(freq)
else:
raise ValueError("periodicity expects DataFrame, Series, or rule_code property")
periods_in_year = periodicity
def _resolve_periods_in_year(scale, frame):
""" Convert the scale to an annualzation factor. If scale is None then attempt to resolve from frame. If scale is a scalar then
use it. If scale is a string then use it to lookup the annual factor
"""
if scale is None:
return periodicity(frame)
elif isinstance(scale, str):
return periodicity(scale)
elif np.isscalar(scale):
return scale
else:
raise ValueError("scale must be None, scalar, or string, not %s" % type(scale))
def excess_returns(returns, bm=0):
"""
Return the excess amount of returns above the given benchmark bm
"""
return returns - bm
def returns(prices, method='simple', periods=1, fill_method='pad', limit=None, freq=None):
"""
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
"""
if method not in ('simple', 'compound', 'log'):
raise ValueError("Invalid method type. Valid values are ('simple', 'compound')")
if method == 'simple':
return prices.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)
else:
if freq is not None:
raise NotImplementedError("TODO: implement this logic if needed")
if isinstance(prices, pd.Series):
if fill_method is None:
data = prices
else:
data = prices.fillna(method=fill_method, limit=limit)
data = np.log(data / data.shift(periods=periods))
mask = pd.isnull(prices.values)
np.putmask(data.values, mask, np.nan)
return data
else:
return pd.DataFrame(
{name: returns(col, method, periods, fill_method, limit, freq) for name, col in prices.items()},
columns=prices.columns,
index=prices.index)
def returns_cumulative(returns, geometric=True, expanding=False):
""" return the cumulative return
Parameters
----------
returns : DataFrame or Series
geometric : bool, default is True
If True, geometrically link returns
expanding : bool default is False
If True, return expanding series/frame of returns
If False, return the final value(s)
"""
if expanding:
if geometric:
return (1. + returns).cumprod() - 1.
else:
return returns.cumsum()
else:
if geometric:
return (1. + returns).prod() - 1.
else:
return returns.sum()
def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True):
""" return the rolling cumulative returns
Parameters
----------
returns : DataFrame or Series
window : number of observations
min_periods : minimum number of observations in a window
geometric : link the returns geometrically
"""
if geometric:
rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1.
else:
rc = lambda x: (x[np.isfinite(x)]).sum()
return pd.rolling_apply(returns, window, rc, min_periods=min_periods)
def returns_annualized(returns, geometric=True, scale=None, expanding=False):
""" return the annualized cumulative returns
Parameters
----------
returns : DataFrame or Series
geometric : link the returns geometrically
scale: None or scalar or string (ie 12 for months in year),
If None, attempt to resolve from returns
If scalar, then use this as the annualization factor
If string, then pass this to periodicity function to resolve annualization factor
expanding: bool, default is False
If True, return expanding series/frames.
If False, return final result.
"""
scale = _resolve_periods_in_year(scale, returns)
if expanding:
if geometric:
n = pd.expanding_count(returns)
return ((1. + returns).cumprod() ** (scale / n)) - 1.
else:
return pd.expanding_mean(returns) * scale
else:
if geometric:
n = returns.count()
return ((1. + returns).prod() ** (scale / n)) - 1.
else:
return returns.mean() * scale
def drawdowns(returns, geometric=True):
"""
compute the drawdown series for the period return series
return: periodic return Series or DataFrame
"""
wealth = 1. + returns_cumulative(returns, geometric=geometric, expanding=True)
values = wealth.values
if values.ndim == 2:
ncols = values.shape[-1]
values = np.vstack(([1.] * ncols, values))
maxwealth = pd.expanding_max(values)[1:]
dds = wealth / maxwealth - 1.
dds[dds > 0] = 0 # Can happen if first returns are positive
return dds
elif values.ndim == 1:
values = np.hstack(([1.], values))
maxwealth = pd.expanding_max(values)[1:]
dds = wealth / maxwealth - 1.
dds[dds > 0] = 0 # Can happen if first returns are positive
return dds
else:
raise ValueError('unable to process array with %s dimensions' % values.ndim)
def max_drawdown(returns=None, geometric=True, dd=None, inc_date=False):
"""
compute the max draw down.
returns: period return Series or DataFrame
dd: drawdown Series or DataFrame (mutually exclusive with returns)
"""
if (returns is None and dd is None) or (returns is not None and dd is not None):
raise ValueError('returns and drawdowns are mutually exclusive')
if returns is not None:
dd = drawdowns(returns, geometric=geometric)
if isinstance(dd, pd.DataFrame):
vals = [max_drawdown(dd=dd[c], inc_date=inc_date) for c in dd.columns]
cols = ['maxxdd'] + (inc_date and ['maxdd_dt'] or [])
res = pd.DataFrame(vals, columns=cols, index=dd.columns)
return res if inc_date else res.maxdd
else:
mddidx = dd.idxmin()
# if mddidx == dd.index[0]:
# # no maxff
# return 0 if not inc_date else (0, None)
#else:
sub = dd[:mddidx]
start = sub[::-1].idxmax()
mdd = dd[mddidx]
# return start, mddidx, mdd
return mdd if not inc_date else (mdd, mddidx)
@per_series(result_is_frame=1)
def drawdown_info(returns, geometric=True):
"""Return a DataFrame containing information about ALL the drawdowns for the rets. The frame
contains the columns:
'dd start': drawdown start date
'dd end': drawdown end date
'maxdd': maximium drawdown
'maxdd dt': maximum drawdown
'days': duration of drawdown
"""
dd = drawdowns(returns, geometric=True).to_frame()
last = dd.index[-1]
dd.columns = ['vals']
dd['nonzero'] = (dd.vals != 0).astype(int)
dd['gid'] = (dd.nonzero.shift(1) != dd.nonzero).astype(int).cumsum()
idxname = dd.index.name or 'index'
ixs = dd.reset_index().groupby(['nonzero', 'gid'])[idxname].apply(lambda x: np.array(x))
rows = []
if 1 in ixs:
for ix in ixs[1]:
sub = dd.ix[ix]
# need to get t+1 since actually draw down ends on the 0 value
end = dd.index[dd.index.get_loc(sub.index[-1]) + (last != sub.index[-1] and 1 or 0)]
rows.append([sub.index[0], end, sub.vals.min(), sub.vals.idxmin()])
f = pd.DataFrame.from_records(rows, columns=['dd start', 'dd end', 'maxdd', 'maxdd dt'])
f['days'] = (f['dd end'] - f['dd start']).astype('timedelta64[D]')
return f
def std_annualized(returns, scale=None, expanding=0):
scale = _resolve_periods_in_year(scale, returns)
if expanding:
return np.sqrt(scale) * pd.expanding_std(returns)
else:
return np.sqrt(scale) * returns.std()
def sharpe(returns, rfr=0, expanding=0):
"""
returns: periodic return string
rfr: risk free rate
expanding: bool
"""
if expanding:
excess = excess_returns(returns, rfr)
return pd.expanding_mean(excess) / pd.expanding_std(returns)
else:
return excess_returns(returns, rfr).mean() / returns.std()
def sharpe_annualized(returns, rfr_ann=0, scale=None, expanding=False, geometric=False):
scale = _resolve_periods_in_year(scale, returns)
stdann = std_annualized(returns, scale=scale, expanding=expanding)
retsann = returns_annualized(returns, scale=scale, expanding=expanding, geometric=geometric)
return (retsann - rfr_ann) / stdann
def downside_deviation(rets, mar=0, expanding=0, full=0, ann=0):
"""Compute the downside deviation for the specifed return series
:param rets: periodic return series
:param mar: minimum acceptable rate of return (MAR)
:param full: If True, use the lenght of full series. If False, use only values below MAR
:param expanding:
:param ann: True if result should be annualized
"""
below = rets[rets < mar]
if expanding:
n = pd.expanding_count(rets)[below.index] if full else | pd.expanding_count(below) | pandas.expanding_count |
# %% Import
import numpy as np
import pandas as pd
import requests
import os
from bs4 import BeautifulSoup
"""
Takes a dictionary of relevant brands and their URLs and returns a raw csv file
"""
# %% Functions
def outlets_crawl(brand, url):
"""
Returns a raw, unformatted df of outlets with it's brand from the url inserted
"""
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
# ensure crawler had actual results to work with.
def _check_results(class_term, soup=soup):
results = soup.find_all(attrs={"class": class_term})
if len(results) == 0:
raise ValueError("No outlets found, check class_term or url.")
return results
try:
results = _check_results("outlet_item")
except ValueError:
results = _check_results("lease_item")
# continue
_ls = []
for result in results:
_ls.append([i for i in result.stripped_strings])
df = | pd.DataFrame(_ls) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sb
import base64
from io import BytesIO
from flask import send_file
from flask import request
from napa import player_information as pi
import matplotlib
matplotlib.use('Agg') # required to solve multithreading issues with matplotlib within flask
import matplotlib.pyplot as plt
import matplotlib.style as style
sb.set_context("talk", font_scale = 1)
style.use('seaborn-whitegrid')
#######################################################################
# napa Database structure
#######################################################################
# TABLE SCHEMA
# team_a
# team_b
# lineup
# all_perms
# perm_score
def get_team_info():
''' If valid team IDs have been entered, then import player data from the NAPA website. If a random team has been selected,
then create a random team using the create_rand_team function. If there is an error with the team IDs, or inconsistent team lengths have been chosen, then set error = True.'''
error = False
# Retrieve ids from the input forms
A = [request.form.get('player_' +str(i) +'a') for i in range(1,6)]
B = [request.form.get('player_' +str(i) +'b') for i in range(1,6)]
rand_a = request.form.get('random_a')
rand_b = request.form.get('random_b')
try:
if (rand_a == '1') & (rand_b == '1'): # Two random teams
(team_A_df, team_B_df) = pi.create_two_rand_teams(5)
elif rand_a == '1': # Team A is random, team B is real
team_A_df = pi.create_rand_team(5)
team_B = [int(x) for x in B if x]
team_B_df = pi.team_data(team_B)
if len(team_A_df) != len(team_B_df):
error = True
elif rand_b == '1': # Team B is random, team A is real
team_B_df = pi.create_rand_team(5)
team_A = [int(x) for x in A if x]
team_A_df = pi.team_data(team_A)
if len(team_A_df) != len(team_B_df):
error = True
else: # Both teams are real
team_A = [int(x) for x in A if x]
team_B = [int(x) for x in B if x]
team_A_df = pi.team_data(team_A)
team_B_df = pi.team_data(team_B)
if len(team_A_df) != len(team_B_df):
error = True
except:
error = True
return [], [], error
return team_A_df, team_B_df, error
def load_team_a(con):
''' Select all players from team_a table and rename the columns. '''
query = ''' SELECT * FROM team_a'''
team_a = pd.read_sql_query(query,con)
return team_a.rename(columns={'name': 'a_name', 'id':'a_id', 'eight_skill': 'a_skill'})
def load_team_b(con):
''' Select all players from team_b table and rename the columns. '''
query = ''' SELECT * FROM team_b'''
team_b = pd.read_sql_query(query,con)
return team_b.rename(columns={'name': 'b_name', 'id':'b_id', 'eight_skill': 'b_skill'})
def get_prev_player(con, player_id, team):
''' Select the name and the ID of the player who was chosen on the previous webpage. '''
query = '''
SELECT name, id
FROM ''' + team + '''
WHERE id = ''' + str(player_id)
player_name = pd.read_sql_query(query,con).values[0][0]
player_id = pd.read_sql_query(query,con).values[0][1]
return player_name, player_id
def update_lineup(con, player_name, player_id, cur_pos, poss):
''' Update the lineup table with the active matchups. Clear all later matchup entries to avoid webpage caching when using the back button. '''
cursor = con.cursor()
cursor.execute('''UPDATE lineup SET name = ''' + "'" + player_name + "'" + ''', id = ''' + str(player_id) + ''' WHERE pos = ''' + str(cur_pos) + ''';''')
# poss is a list of all lineup entries to be cleared
for pos in poss:
cursor.execute('''UPDATE lineup SET id = 0 WHERE pos = ''' + str(pos) + ''';''')
con.commit()
cursor.close()
def add_prev_player(con, form, team, prev_pos, poss):
''' Add the player chosen on the previous webpage to the current lineup.'''
player_id = request.form.get(form)
query = '''
SELECT name, id
FROM ''' + team + '''
WHERE id = ''' + str(player_id)
player_name = pd.read_sql_query(query,con).values[0][0]
player_id = pd.read_sql_query(query,con).values[0][1]
update_lineup(con, player_name, player_id, prev_pos, poss)
return player_name, player_id
def clear_lineup(con):
'''Set all lineup ids equal to zero. '''
cursor = con.cursor()
cursor.execute('''UPDATE lineup SET id = 0 ;''')
con.commit()
cursor.close()
def get_lineup(con):
''' Return the current lineup table as a dataframe.'''
query = ''' SELECT name, id FROM lineup ORDER BY pos '''
return pd.read_sql_query(query,con).values
def get_short_lineup(con):
''' Return the current lineup table as a dataframe, ignoring empty entries.'''
query = ''' SELECT name, id
FROM lineup
WHERE id <> 0
ORDER BY pos '''
return pd.read_sql_query(query,con).values
def load_team(con, team):
''' Return the remaining players available to be picked (i.e. those that have not been added to the lineup table).'''
query = ''' SELECT * FROM team_'''+ team + ''' WHERE id NOT IN (SELECT id FROM lineup)'''
team_df = pd.read_sql_query(query,con)
return team_df.rename(columns={'name': team + '_name', 'id': team + '_id', 'eight_skill': team + '_skill'})
def print_figure_init(pj, cj):
''' Produce a bar plot visualization of the predicted match points for all permutations. Produce a swarmplot showing predicted raw score coefficients for each permutation.'''
img = BytesIO()
fig, axs = plt.subplots(figsize=(15,5), ncols=2)
sb.despine(left=True)
sb.countplot(x='r1', data=pj, color='darkred', ax=axs[0])
axs[0].set_xlabel('Predicted winning points margin')
axs[0].set_ylabel('Permutations')
axs[0].set_xticklabels(['{:.0f}'.format(float(t.get_text())) for t in axs[0].get_xticklabels()])
axs[0].xaxis.set_tick_params(rotation=45)
g2 = sb.swarmplot(x=cj['r1'], color = 'darkred', size=10, ax=axs[1])
axs[1].legend(loc='best', fontsize='small')
axs[1].set_xlabel('Average winning probability')
plt.savefig(img, format='png', bbox_inches = "tight")
plt.close()
img.seek(0)
plot_url = base64.b64encode(img.getvalue()).decode('utf8')
return plot_url
def print_figure(pj, cj):
''' Produce a bar plot visualization of the predicted match points for the remaining permutations. Produce a swarmplot showing predicted raw score coefficients for each permutation, with remaining possible permutations highlighted.'''
img = BytesIO()
fig, axs = plt.subplots(figsize=(15,5), ncols=2)
sb.despine(left=True)
sb.countplot(x='r2', data=pj, color='darkred', ax=axs[0])
axs[0].set_xlabel('Predicted winning points margin')
axs[0].set_ylabel('Permutations')
axs[0].set_xticklabels(['{:.0f}'.format(float(t.get_text())) for t in axs[0].get_xticklabels()])
axs[0].xaxis.set_tick_params(rotation=45)
g2 = sb.swarmplot(x=cj['r1'], y=[""]*len(cj), hue=cj['round'], palette = ['lightgray', 'darkred'], size=10, ax=axs[1])
axs[1].legend(loc='best', fontsize='small')
axs[1].set_xlabel('Average winning probability')
plt.savefig(img, format='png', bbox_inches = "tight")
plt.close()
img.seek(0)
plot_url = base64.b64encode(img.getvalue()).decode('utf8')
return plot_url
def get_clause(lineup):
''' Construct the SQL clause which will filter the remaining permutations based on the currently selected matchups. '''
llen = len(lineup)
clause = ''''''
if llen >= 2: # i.e. if the lineup table contains one or more complete rounds
clause = clause + '''SELECT permutation FROM all_perms WHERE id_a = ''' + str(lineup[0][1]) + ''' AND id_b = ''' + str(lineup[1][1])
if llen >= 4: # i.e. if the lineup table contains one or more complete rounds
rnd = int(np.floor(llen/2) + 1) # current round
for i in range(2,rnd):
pos1 = 2*(i-1)
pos2 = 2*(i-1)+1
clause = clause + ''' INTERSECT SELECT permutation FROM all_perms WHERE id_a = ''' + str(lineup[pos1][1]) + ''' AND id_b = ''' + str(lineup[pos2][1])
return clause
def get_pick_clause(lineup):
''' Construct the SQL clause which will filter the remaining permutations based on the currently selected matchups. '''
llen = len(lineup)
clause = ''''''
rnd = int(np.floor(llen/2) + 1) # current round
for i in range(1,rnd):
pos1 = 2*(i-1)
pos2 = 2*(i-1)+1
clause = clause + ''' INTERSECT SELECT permutation FROM all_perms WHERE id_a = ''' + str(lineup[pos1][1]) + ''' AND id_b = ''' + str(lineup[pos2][1])
return clause
def calc_stds_coef(con, team_A_df, team_B_df):
''' For each remaining possible permutation, find the average winning probability of each permutation containing each possible player matchup. The best choice for your team to put up is the player who has the lowest standard deviation across their matchups, i.e. regardless of who the opposing team chooses, the average winning probability for the subsequent remaining permutations will be approximately the same. '''
lineup = get_short_lineup(con)
clause = get_pick_clause(lineup)
query = '''
SELECT player_a, id_a, STDDEV_POP(avg_prob) as stddev_prob
FROM (
SELECT a.player_a, a.id_a, a.player_b, AVG(s.probability) as avg_prob
FROM (
SELECT permutation FROM all_perms ''' + clause + ''') as f
JOIN all_perms as a ON f.permutation = a.permutation
JOIN perm_score as s ON a.permutation = s.permutation
GROUP BY a.player_b, a.player_a, a.id_a ) as grouped_scores
GROUP BY player_a, id_a
HAVING id_a NOT IN (SELECT id FROM lineup)
ORDER BY stddev_prob'''
stds = pd.read_sql_query(query,con)
return stds
def calc_min_coef(con, team_A_df, team_B_df):
''' For each remaining possible permutation, find the average winning probability of each permutation containing each possible player matchup. The best choice for your team to put up is the player who has the lowest standard deviation across their matchups, i.e. regardless of who the opposing team chooses, the average winning probability for the subsequent remaining permutations will be approximately the same. '''
lineup = get_short_lineup(con)
clause = get_pick_clause(lineup)
query = '''
SELECT player_a, id_a, MIN(avg_prob) as min_prob
FROM (
SELECT a.player_a, a.id_a, a.player_b, AVG(s.probability) as avg_prob
FROM (
SELECT permutation FROM all_perms ''' + clause + ''') as f
JOIN all_perms as a ON f.permutation = a.permutation
JOIN perm_score as s ON a.permutation = s.permutation
GROUP BY a.player_b, a.player_a, a.id_a ) as grouped_scores
GROUP BY player_a, id_a
HAVING id_a NOT IN (SELECT id FROM lineup)
ORDER BY min_prob DESC'''
mins = pd.read_sql_query(query,con)
return mins
def calc_coefs(con, team_A_df, player_b, player_b_id):
''' Find the average winning probability for all permutations containing the remaining players available on your team versus the player the opposition has chosen. The best choice for your team to put up is the player who has the highest average winning probability across all permutations where they play against the opposition's chosen player. Return the dataframe ranked in order of highest to lowest average winning probability.'''
lineup = get_short_lineup(con)
clause = get_pick_clause(lineup)
query = '''
SELECT a.id_a, a.player_a, a.player_b, AVG(s.probability) as avg_prob
FROM (
SELECT permutation FROM all_perms ''' + clause + ''') as f
JOIN all_perms as a ON f.permutation = a.permutation
JOIN perm_score as s ON a.permutation = s.permutation
WHERE a.id_b = ''' + str(player_b_id) + '''
GROUP BY a.id_a, a.player_a, a.player_b
ORDER BY avg_prob DESC '''
team_A_df = pd.read_sql_query(query,con)
return team_A_df
def agg_coefs_init(con):
''' Aggregate the winning probabilities from each permutation, returning their average values in a dataframe.'''
lineup = get_short_lineup(con)
clause = get_clause(lineup)
query = '''
SELECT permutation, probability
FROM perm_score
'''
coef = pd.read_sql_query(query,con).values
return pd.DataFrame(coef, columns = ['permutation','r1'])
def agg_scores_init(con):
''' Aggregate the match scores from each permutation, returning their total values in a dataframe.'''
lineup = get_short_lineup(con)
clause = get_clause(lineup)
query = '''
SELECT permutation, SUM(result)
FROM all_perms
GROUP BY permutation
'''
scores = pd.read_sql_query(query,con).values
return pd.DataFrame(scores, columns = ['permutation','r1'])
def agg_coefs(con):
''' Aggregate the winning probabilities from each permutation, returning their average values in a dataframe. Furthermore, perform the aggregation on the remaining active permutations and add this an extra column.'''
lineup = get_short_lineup(con)
clause = get_clause(lineup)
query = '''
SELECT r1.permutation, r1.avg_prob as r1_coef, r2.tot_score as r2_coef, CASE WHEN r2.tot_score IS NULL THEN 'All Predictions' ELSE 'Active Predictions' END as round
FROM (SELECT permutation, probability as avg_prob FROM perm_score) as r1
LEFT JOIN
(SELECT s.permutation, s.probability as tot_score
FROM (''' + clause + '''
) as p
LEFT JOIN perm_score as s ON p.permutation = s.permutation) as r2 ON r1.permutation = r2.permutation
'''
coef_joined = pd.read_sql_query(query,con).values
return pd.DataFrame(coef_joined, columns = ['permutation','r1','r2','round'])
def agg_scores(con):
''' Aggregate the match scores from each permutation, returning their total values in a dataframe. Futhermore, perform the aggregation on the remaining active permutations and add this an extra column.'''
lineup = get_short_lineup(con)
clause = get_clause(lineup)
query = '''
SELECT r1.permutation, r1.sum as r1_score, r2.tot_score as r2_score
FROM (SELECT permutation, SUM(result) as sum FROM all_perms
GROUP BY permutation) as r1
LEFT JOIN
(SELECT a.permutation, SUM(a.result) as tot_score
FROM (''' + clause + '''
) as p
LEFT JOIN all_perms as a ON p.permutation = a.permutation
GROUP BY a.permutation) as r2 ON r1.permutation = r2.permutation
'''
perms_joined = pd.read_sql_query(query,con).values
return | pd.DataFrame(perms_joined, columns = ['permutation','r1','r2']) | pandas.DataFrame |
""" Getting final clusters data
:Author: <NAME> <<EMAIL>>
:Date: 2019-09-30
:License: MIT
"""
# Import Libraries
import pandas as pd
import numpy as np
import random
from collections import Counter
def main():
# clusters group user id
clusters_users = pd.read_csv('clusters_final.csv')
# clusters group user id
clusters_users_names = | pd.read_csv('clusters_group_names.csv') | pandas.read_csv |
# Making prediction about diagnostic labels of the subjects. Note that this file needs
# the output of 'fit/gql_ml_pred.py'.
from BD.sim.rnn_label_pred import finding_CV
from actionflow.data.data_process import DataProcess
from actionflow.qrl.gql import GQL
from actionflow.qrl.opt_ml import OptML
from actionflow.util import DLogger
from actionflow.util.helper import load_model
from BD.data.data_reader import DataReader
from BD.util.paths import Paths
import tensorflow as tf
import pandas as pd
def GQL_classify_subjects():
tf.reset_default_graph()
data = DataReader.read_BD()
ids = data['id'].unique().tolist()
dftr = | pd.DataFrame({'id': ids, 'train': 'train'}) | pandas.DataFrame |
"""The classes for specifying and compiling a declarative visualization."""
from __future__ import annotations
import io
import os
import re
import sys
import inspect
import itertools
import textwrap
from collections import abc
from collections.abc import Callable, Generator, Hashable
from typing import Any
import pandas as pd
from pandas import DataFrame, Series, Index
import matplotlib as mpl
from matplotlib.axes import Axes
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from seaborn._marks.base import Mark
from seaborn._stats.base import Stat
from seaborn._core.data import PlotData
from seaborn._core.moves import Move
from seaborn._core.scales import ScaleSpec, Scale
from seaborn._core.subplots import Subplots
from seaborn._core.groupby import GroupBy
from seaborn._core.properties import PROPERTIES, Property, Coordinate
from seaborn._core.typing import DataSource, VariableSpec, OrderSpec
from seaborn._core.rules import categorical_order
from seaborn._compat import set_scale_obj
from seaborn.external.version import Version
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from matplotlib.figure import SubFigure
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
# ---- Definitions for internal specs --------------------------------- #
class Layer(TypedDict, total=False):
mark: Mark # TODO allow list?
stat: Stat | None # TODO allow list?
move: Move | list[Move] | None
data: PlotData
source: DataSource
vars: dict[str, VariableSpec]
orient: str
class FacetSpec(TypedDict, total=False):
variables: dict[str, VariableSpec]
structure: dict[str, list[str]]
wrap: int | None
class PairSpec(TypedDict, total=False):
variables: dict[str, VariableSpec]
structure: dict[str, list[str]]
cross: bool
wrap: int | None
# ---- The main interface for declarative plotting -------------------- #
def build_plot_signature(cls):
"""
Decorator function for giving Plot a useful signature.
Currently this mostly saves us some duplicated typing, but we would
like eventually to have a way of registering new semantic properties,
at which point dynamic signature generation would become more important.
"""
sig = inspect.signature(cls)
params = [
inspect.Parameter("args", inspect.Parameter.VAR_POSITIONAL),
inspect.Parameter("data", inspect.Parameter.KEYWORD_ONLY, default=None)
]
params.extend([
inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)
for name in PROPERTIES
])
new_sig = sig.replace(parameters=params)
cls.__signature__ = new_sig
known_properties = textwrap.fill(
", ".join(PROPERTIES), 78, subsequent_indent=" " * 8,
)
if cls.__doc__ is not None: # support python -OO mode
cls.__doc__ = cls.__doc__.format(known_properties=known_properties)
return cls
@build_plot_signature
class Plot:
"""
An interface for declaratively specifying statistical graphics.
Plots are constructed by initializing this class and adding one or more
layers, comprising a `Mark` and optional `Stat` or `Move`. Additionally,
faceting variables or variable pairings may be defined to divide the space
into multiple subplots. The mappings from data values to visual properties
can be parametrized using scales, although the plot will try to infer good
defaults when scales are not explicitly defined.
The constructor accepts a data source (a :class:`pandas.DataFrame` or
dictionary with columnar values) and variable assignments. Variables can be
passed as keys to the data source or directly as data vectors. If multiple
data-containing objects are provided, they will be index-aligned.
The data source and variables defined in the constructor will be used for
all layers in the plot, unless overridden or disabled when adding a layer.
The following variables can be defined in the constructor:
{known_properties}
The `data`, `x`, and `y` variables can be passed as positional arguments or
using keywords. Whether the first positional argument is interpreted as a
data source or `x` variable depends on its type.
The methods of this class return a copy of the instance; use chaining to
build up a plot through multiple calls. Methods can be called in any order.
Most methods only add information to the plot spec; no actual processing
happens until the plot is shown or saved. It is also possible to compile
the plot without rendering it to access the lower-level representation.
"""
# TODO use TypedDict throughout?
_data: PlotData
_layers: list[Layer]
_scales: dict[str, ScaleSpec]
_subplot_spec: dict[str, Any] # TODO values type
_facet_spec: FacetSpec
_pair_spec: PairSpec
def __init__(
self,
*args: DataSource | VariableSpec,
data: DataSource = None,
**variables: VariableSpec,
):
if args:
data, variables = self._resolve_positionals(args, data, variables)
unknown = [x for x in variables if x not in PROPERTIES]
if unknown:
err = f"Plot() got unexpected keyword argument(s): {', '.join(unknown)}"
raise TypeError(err)
self._data = PlotData(data, variables)
self._layers = []
self._scales = {}
self._subplot_spec = {}
self._facet_spec = {}
self._pair_spec = {}
self._target = None
def _resolve_positionals(
self,
args: tuple[DataSource | VariableSpec, ...],
data: DataSource,
variables: dict[str, VariableSpec],
) -> tuple[DataSource, dict[str, VariableSpec]]:
"""Handle positional arguments, which may contain data / x / y."""
if len(args) > 3:
err = "Plot() accepts no more than 3 positional arguments (data, x, y)."
raise TypeError(err)
# TODO need some clearer way to differentiate data / vector here
# (There might be an abstract DataFrame class to use here?)
if isinstance(args[0], (abc.Mapping, pd.DataFrame)):
if data is not None:
raise TypeError("`data` given by both name and position.")
data, args = args[0], args[1:]
if len(args) == 2:
x, y = args
elif len(args) == 1:
x, y = *args, None
else:
x = y = None
for name, var in zip("yx", (y, x)):
if var is not None:
if name in variables:
raise TypeError(f"`{name}` given by both name and position.")
# Keep coordinates at the front of the variables dict
variables = {name: var, **variables}
return data, variables
def __add__(self, other):
if isinstance(other, Mark) or isinstance(other, Stat):
raise TypeError("Sorry, this isn't ggplot! Perhaps try Plot.add?")
other_type = other.__class__.__name__
raise TypeError(f"Unsupported operand type(s) for +: 'Plot' and '{other_type}")
def _repr_png_(self) -> tuple[bytes, dict[str, float]]:
return self.plot()._repr_png_()
# TODO _repr_svg_?
def _clone(self) -> Plot:
"""Generate a new object with the same information as the current spec."""
new = Plot()
# TODO any way to enforce that data does not get mutated?
new._data = self._data
new._layers.extend(self._layers)
new._scales.update(self._scales)
new._subplot_spec.update(self._subplot_spec)
new._facet_spec.update(self._facet_spec)
new._pair_spec.update(self._pair_spec)
new._target = self._target
return new
@property
def _variables(self) -> list[str]:
variables = (
list(self._data.frame)
+ list(self._pair_spec.get("variables", []))
+ list(self._facet_spec.get("variables", []))
)
for layer in self._layers:
variables.extend(c for c in layer["vars"] if c not in variables)
return variables
def on(self, target: Axes | SubFigure | Figure) -> Plot:
"""
Draw the plot into an existing Matplotlib object.
Parameters
----------
target : Axes, SubFigure, or Figure
Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add
artists without otherwise modifying the figure. Otherwise, subplots will be
created within the space of the given :class:`matplotlib.figure.Figure` or
:class:`matplotlib.figure.SubFigure`.
"""
# TODO alternate name: target?
accepted_types: tuple # Allow tuple of various length
if hasattr(mpl.figure, "SubFigure"): # Added in mpl 3.4
accepted_types = (
mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure
)
accepted_types_str = (
f"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}"
)
else:
accepted_types = mpl.axes.Axes, mpl.figure.Figure
accepted_types_str = f"{mpl.axes.Axes} or {mpl.figure.Figure}"
if not isinstance(target, accepted_types):
err = (
f"The `Plot.on` target must be an instance of {accepted_types_str}. "
f"You passed an instance of {target.__class__} instead."
)
raise TypeError(err)
new = self._clone()
new._target = target
return new
def add(
self,
mark: Mark,
stat: Stat | None = None,
move: Move | None = None, # TODO or list[Move]
*,
orient: str | None = None,
data: DataSource = None,
**variables: VariableSpec,
) -> Plot:
"""
Define a layer of the visualization.
This is the main method for specifying how the data should be visualized.
It can be called multiple times with different arguments to define
a plot with multiple layers.
Parameters
----------
mark : :class:`seaborn.objects.Mark`
The visual representation of the data to use in this layer.
stat : :class:`seaborn.objects.Stat`
A transformation applied to the data before plotting.
move : :class:`seaborn.objects.Move`
Additional transformation(s) to handle over-plotting.
orient : "x", "y", "v", or "h"
The orientation of the mark, which affects how the stat is computed.
Typically corresponds to the axis that defines groups for aggregation.
The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y",
but may be more intuitive with some marks. When not provided, an
orientation will be inferred from characteristics of the data and scales.
data : DataFrame or dict
Data source to override the global source provided in the constructor.
variables : data vectors or identifiers
Additional layer-specific variables, including variables that will be
passed directly to the stat without scaling.
"""
if not isinstance(mark, Mark):
msg = f"mark must be a Mark instance, not {type(mark)!r}."
raise TypeError(msg)
if stat is not None and not isinstance(stat, Stat):
msg = f"stat must be a Stat instance, not {type(stat)!r}."
raise TypeError(msg)
# TODO decide how to allow Mark to have default Stat/Move
# if stat is None and hasattr(mark, "default_stat"):
# stat = mark.default_stat()
# TODO it doesn't work to supply scalars to variables, but that would be nice
# TODO accept arbitrary variables defined by the stat (/move?) here
# (but not in the Plot constructor)
# Should stat variables ever go in the constructor, or just in the add call?
new = self._clone()
new._layers.append({
"mark": mark,
"stat": stat,
"move": move,
"vars": variables,
"source": data,
"orient": {"v": "x", "h": "y"}.get(orient, orient), # type: ignore
})
return new
def pair(
self,
x: list[Hashable] | Index[Hashable] | None = None,
y: list[Hashable] | Index[Hashable] | None = None,
wrap: int | None = None,
cross: bool = True,
# TODO other existing PairGrid things like corner?
# TODO transpose, so that e.g. multiple y axes go across the columns
) -> Plot:
"""
Produce subplots with distinct `x` and/or `y` variables.
Parameters
----------
x, y : sequence(s) of data identifiers
Variables that will define the grid of subplots.
wrap : int
Maximum height/width of the grid, with additional subplots "wrapped"
on the other dimension. Requires that only one of `x` or `y` are set here.
cross : bool
When True, define a two-dimensional grid using the Cartesian product of `x`
and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`
entries in by position.
"""
# TODO Problems to solve:
#
# - Unclear is how to handle the diagonal plots that PairGrid offers
#
# - Implementing this will require lots of downscale changes in figure setup,
# and especially the axis scaling, which will need to be pair specific
# TODO lists of vectors currently work, but I'm not sure where best to test
# Will need to update the signature typing to keep them
# TODO is it weird to call .pair() to create univariate plots?
# i.e. Plot(data).pair(x=[...]). The basic logic is fine.
# But maybe a different verb (e.g. Plot.spread) would be more clear?
# Then Plot(data).pair(x=[...]) would show the given x vars vs all.
# TODO would like to add transpose=True, which would then draw
# Plot(x=...).pair(y=[...]) across the rows
# This may also be possible by setting `wrap=1`, although currently the axes
# are shared and the interior labels are disabeled (this is a bug either way)
pair_spec: PairSpec = {}
if x is None and y is None:
# Default to using all columns in the input source data, aside from
# those that were assigned to a variable in the constructor
# TODO Do we want to allow additional filtering by variable type?
# (Possibly even default to using only numeric columns)
if self._data.source_data is None:
err = "You must pass `data` in the constructor to use default pairing."
raise RuntimeError(err)
all_unused_columns = [
key for key in self._data.source_data
if key not in self._data.names.values()
]
if "x" not in self._data:
x = all_unused_columns
if "y" not in self._data:
y = all_unused_columns
axes = {"x": [] if x is None else x, "y": [] if y is None else y}
for axis, arg in axes.items():
if isinstance(arg, (str, int)):
err = f"You must pass a sequence of variable keys to `{axis}`"
raise TypeError(err)
pair_spec["variables"] = {}
pair_spec["structure"] = {}
for axis in "xy":
keys = []
for i, col in enumerate(axes[axis]):
key = f"{axis}{i}"
keys.append(key)
pair_spec["variables"][key] = col
if keys:
pair_spec["structure"][axis] = keys
# TODO raise here if cross is False and len(x) != len(y)?
pair_spec["cross"] = cross
pair_spec["wrap"] = wrap
new = self._clone()
new._pair_spec.update(pair_spec)
return new
def facet(
self,
# TODO require kwargs?
col: VariableSpec = None,
row: VariableSpec = None,
order: OrderSpec | dict[str, OrderSpec] = None,
wrap: int | None = None,
) -> Plot:
"""
Produce subplots with conditional subsets of the data.
Parameters
----------
col, row : data vectors or identifiers
Variables used to define subsets along the columns and/or rows of the grid.
Can be references to the global data source passed in the constructor.
order : list of strings, or dict with dimensional keys
Define the order of the faceting variables.
wrap : int
Maximum height/width of the grid, with additional subplots "wrapped"
on the other dimension. Requires that only one of `x` or `y` are set here.
"""
variables = {}
if col is not None:
variables["col"] = col
if row is not None:
variables["row"] = row
structure = {}
if isinstance(order, dict):
for dim in ["col", "row"]:
dim_order = order.get(dim)
if dim_order is not None:
structure[dim] = list(dim_order)
elif order is not None:
if col is not None and row is not None:
err = " ".join([
"When faceting on both col= and row=, passing `order` as a list"
"is ambiguous. Use a dict with 'col' and/or 'row' keys instead."
])
raise RuntimeError(err)
elif col is not None:
structure["col"] = list(order)
elif row is not None:
structure["row"] = list(order)
spec: FacetSpec = {
"variables": variables,
"structure": structure,
"wrap": wrap,
}
new = self._clone()
new._facet_spec.update(spec)
return new
# TODO def twin()?
def scale(self, **scales: ScaleSpec) -> Plot:
"""
Control mappings from data units to visual properties.
Keywords correspond to variables defined in the plot, including coordinate
variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).
A number of "magic" arguments are accepted, including:
- The name of a transform (e.g., `"log"`, `"sqrt"`)
- The name of a palette (e.g., `"viridis"`, `"muted"`)
- A tuple of values, defining the output range (e.g. `(1, 5)`)
- A dict, implying a :class:`Nominal` scale (e.g. `{"a": .2, "b": .5}`)
- A list of values, implying a :class:`Nominal` scale (e.g. `["b", "r"]`)
For more explicit control, pass a scale spec object such as :class:`Continuous`
or :class:`Nominal`. Or use `None` to use an "identity" scale, which treats data
values as literally encoding visual properties.
"""
new = self._clone()
new._scales.update(**scales)
return new
def configure(
self,
figsize: tuple[float, float] | None = None,
sharex: bool | str | None = None,
sharey: bool | str | None = None,
) -> Plot:
"""
Control the figure size and layout.
Parameters
----------
figsize: (width, height)
Size of the resulting figure, in inches.
sharex, sharey : bool, "row", or "col"
Whether axis limits should be shared across subplots. Boolean values apply
across the entire grid, whereas `"row"` or `"col"` have a smaller scope.
Shared axes will have tick labels disabled.
"""
# TODO add an "auto" mode for figsize that roughly scales with the rcParams
# figsize (so that works), but expands to prevent subplots from being squished
# Also should we have height=, aspect=, exclusive with figsize? Or working
# with figsize when only one is defined?
new = self._clone()
# TODO this is a hack; make a proper figure spec object
new._figsize = figsize # type: ignore
if sharex is not None:
new._subplot_spec["sharex"] = sharex
if sharey is not None:
new._subplot_spec["sharey"] = sharey
return new
# TODO def legend (ugh)
def theme(self) -> Plot:
"""
Control the default appearance of elements in the plot.
TODO
"""
# TODO Plot-specific themes using the seaborn theming system
raise NotImplementedError()
new = self._clone()
return new
# TODO decorate? (or similar, for various texts) alt names: label?
def save(self, fname, **kwargs) -> Plot:
"""
Render the plot and write it to a buffer or file on disk.
Parameters
----------
fname : str, path, or buffer
Location on disk to save the figure, or a buffer to write into.
Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.
"""
# TODO expose important keyword arguments in our signature?
self.plot().save(fname, **kwargs)
return self
def plot(self, pyplot=False) -> Plotter:
"""
Compile the plot and return the :class:`Plotter` engine.
"""
# TODO if we have _target object, pyplot should be determined by whether it
# is hooked into the pyplot state machine (how do we check?)
plotter = Plotter(pyplot=pyplot)
common, layers = plotter._extract_data(self)
plotter._setup_figure(self, common, layers)
plotter._transform_coords(self, common, layers)
plotter._compute_stats(self, layers)
plotter._setup_scales(self, layers)
# TODO Remove these after updating other methods
# ---- Maybe have debug= param that attaches these when True?
plotter._data = common
plotter._layers = layers
for layer in layers:
plotter._plot_layer(self, layer)
plotter._make_legend()
# TODO this should be configurable
if not plotter._figure.get_constrained_layout():
plotter._figure.set_tight_layout(True)
return plotter
def show(self, **kwargs) -> None:
"""
Render and display the plot.
"""
# TODO make pyplot configurable at the class level, and when not using,
# import IPython.display and call on self to populate cell output?
# Keep an eye on whether matplotlib implements "attaching" an existing
# figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024
self.plot(pyplot=True).show(**kwargs)
# ---- The plot compilation engine ---------------------------------------------- #
class Plotter:
"""
Engine for compiling a :class:`Plot` spec into a Matplotlib figure.
This class is not intended to be instantiated directly by users.
"""
# TODO decide if we ever want these (Plot.plot(debug=True))?
_data: PlotData
_layers: list[Layer]
_figure: Figure
def __init__(self, pyplot=False):
self.pyplot = pyplot
self._legend_contents: list[
tuple[str, str | int], list[Artist], list[str],
] = []
self._scales: dict[str, Scale] = {}
def save(self, fname, **kwargs) -> Plotter:
kwargs.setdefault("dpi", 96)
self._figure.savefig(os.path.expanduser(fname), **kwargs)
return self
def show(self, **kwargs) -> None:
# TODO if we did not create the Plotter with pyplot, is it possible to do this?
# If not we should clearly raise.
import matplotlib.pyplot as plt
plt.show(**kwargs)
# TODO API for accessing the underlying matplotlib objects
# TODO what else is useful in the public API for this class?
def _repr_png_(self) -> tuple[bytes, dict[str, float]]:
# TODO better to do this through a Jupyter hook? e.g.
# ipy = IPython.core.formatters.get_ipython()
# fmt = ipy.display_formatter.formatters["text/html"]
# fmt.for_type(Plot, ...)
# Would like to have a svg option too, not sure how to make that flexible
# TODO use matplotlib backend directly instead of going through savefig?
# TODO perhaps have self.show() flip a switch to disable this, so that
# user does not end up with two versions of the figure in the output
# TODO use bbox_inches="tight" like the inline backend?
# pro: better results, con: (sometimes) confusing results
# Better solution would be to default (with option to change)
# to using constrained/tight layout.
# TODO need to decide what the right default behavior here is:
# - Use dpi=72 to match default InlineBackend figure size?
# - Accept a generic "scaling" somewhere and scale DPI from that,
# either with 1x -> 72 or 1x -> 96 and the default scaling be .75?
# - Listen to rcParams? InlineBackend behavior makes that so complicated :(
# - Do we ever want to *not* use retina mode at this point?
from PIL import Image
dpi = 96
buffer = io.BytesIO()
self._figure.savefig(buffer, dpi=dpi * 2, format="png", bbox_inches="tight")
data = buffer.getvalue()
scaling = .85 / 2
# w, h = self._figure.get_size_inches()
w, h = Image.open(buffer).size
metadata = {"width": w * scaling, "height": h * scaling}
return data, metadata
def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:
common_data = (
p._data
.join(None, p._facet_spec.get("variables"))
.join(None, p._pair_spec.get("variables"))
)
layers: list[Layer] = []
for layer in p._layers:
spec = layer.copy()
spec["data"] = common_data.join(layer.get("source"), layer.get("vars"))
layers.append(spec)
return common_data, layers
def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:
# --- Parsing the faceting/pairing parameterization to specify figure grid
# TODO use context manager with theme that has been set
# TODO (maybe wrap THIS function with context manager; would be cleaner)
subplot_spec = p._subplot_spec.copy()
facet_spec = p._facet_spec.copy()
pair_spec = p._pair_spec.copy()
for dim in ["col", "row"]:
if dim in common.frame and dim not in facet_spec["structure"]:
order = categorical_order(common.frame[dim])
facet_spec["structure"][dim] = order
self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)
# --- Figure initialization
figure_kws = {"figsize": getattr(p, "_figsize", None)} # TODO fix
self._figure = subplots.init_figure(
pair_spec, self.pyplot, figure_kws, p._target,
)
# --- Figure annotation
for sub in subplots:
ax = sub["ax"]
for axis in "xy":
axis_key = sub[axis]
# TODO Should we make it possible to use only one x/y label for
# all rows/columns in a faceted plot? Maybe using sub{axis}label,
# although the alignments of the labels from that method leaves
# something to be desired (in terms of how it defines 'centered').
names = [
common.names.get(axis_key),
*(layer["data"].names.get(axis_key) for layer in layers)
]
label = next((name for name in names if name is not None), None)
ax.set(**{f"{axis}label": label})
# TODO there should be some override (in Plot.configure?) so that
# tick labels can be shown on interior shared axes
axis_obj = getattr(ax, f"{axis}axis")
visible_side = {"x": "bottom", "y": "left"}.get(axis)
show_axis_label = (
sub[visible_side]
or axis in p._pair_spec and bool(p._pair_spec.get("wrap"))
or not p._pair_spec.get("cross", True)
)
axis_obj.get_label().set_visible(show_axis_label)
show_tick_labels = (
show_axis_label
or subplot_spec.get(f"share{axis}") not in (
True, "all", {"x": "col", "y": "row"}[axis]
)
)
for group in ("major", "minor"):
for t in getattr(axis_obj, f"get_{group}ticklabels")():
t.set_visible(show_tick_labels)
# TODO title template should be configurable
# ---- Also we want right-side titles for row facets in most cases?
# ---- Or wrapped? That can get annoying too.
# TODO should configure() accept a title= kwarg (for single subplot plots)?
# Let's have what we currently call "margin titles" but properly using the
# ax.set_title interface (see my gist)
title_parts = []
for dim in ["row", "col"]:
if sub[dim] is not None:
name = common.names.get(dim) # TODO None = val looks bad
title_parts.append(f"{name} = {sub[dim]}")
has_col = sub["col"] is not None
has_row = sub["row"] is not None
show_title = (
has_col and has_row
or (has_col or has_row) and p._facet_spec.get("wrap")
or (has_col and sub["top"])
# TODO or has_row and sub["right"] and <right titles>
or has_row # TODO and not <right titles>
)
if title_parts:
title = " | ".join(title_parts)
title_text = ax.set_title(title)
title_text.set_visible(show_title)
def _transform_coords(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:
for var in p._variables:
# Parse name to identify variable (x, y, xmin, etc.) and axis (x/y)
# TODO should we have xmin0/xmin1 or x0min/x1min?
m = re.match(r"^(?P<prefix>(?P<axis>[x|y])\d*).*", var)
if m is None:
continue
prefix = m["prefix"]
axis = m["axis"]
share_state = self._subplots.subplot_spec[f"share{axis}"]
# Concatenate layers, using only the relevant coordinate and faceting vars,
# This is unnecessarily wasteful, as layer data will often be redundant.
# But figuring out the minimal amount we need is more complicated.
cols = [var, "col", "row"]
# TODO basically copied from _setup_scales, and very clumsy
layer_values = [common.frame.filter(cols)]
for layer in layers:
if layer["data"].frame is None:
for df in layer["data"].frames.values():
layer_values.append(df.filter(cols))
else:
layer_values.append(layer["data"].frame.filter(cols))
if layer_values:
var_df = pd.concat(layer_values, ignore_index=True)
else:
var_df = pd.DataFrame(columns=cols)
prop = Coordinate(axis)
scale_spec = self._get_scale(p, prefix, prop, var_df[var])
# Shared categorical axes are broken on matplotlib<3.4.0.
# https://github.com/matplotlib/matplotlib/pull/18308
# This only affects us when sharing *paired* axes. This is a novel/niche
# behavior, so we will raise rather than hack together a workaround.
if Version(mpl.__version__) < Version("3.4.0"):
from seaborn._core.scales import Nominal
paired_axis = axis in p._pair_spec
cat_scale = isinstance(scale_spec, Nominal)
ok_dim = {"x": "col", "y": "row"}[axis]
shared_axes = share_state not in [False, "none", ok_dim]
if paired_axis and cat_scale and shared_axes:
err = "Sharing paired categorical axes requires matplotlib>=3.4.0"
raise RuntimeError(err)
# Now loop through each subplot, deriving the relevant seed data to setup
# the scale (so that axis units / categories are initialized properly)
# And then scale the data in each layer.
subplots = [view for view in self._subplots if view[axis] == prefix]
# Setup the scale on all of the data and plug it into self._scales
# We do this because by the time we do self._setup_scales, coordinate data
# will have been converted to floats already, so scale inference fails
self._scales[var] = scale_spec.setup(var_df[var], prop)
# Set up an empty series to receive the transformed values.
# We need this to handle piecemeal tranforms of categories -> floats.
transformed_data = []
for layer in layers:
index = layer["data"].frame.index
transformed_data.append(pd.Series(dtype=float, index=index, name=var))
for view in subplots:
axis_obj = getattr(view["ax"], f"{axis}axis")
if share_state in [True, "all"]:
# The all-shared case is easiest, every subplot sees all the data
seed_values = var_df[var]
else:
# Otherwise, we need to setup separate scales for different subplots
if share_state in [False, "none"]:
# Fully independent axes are also easy: use each subplot's data
idx = self._get_subplot_index(var_df, view)
elif share_state in var_df:
# Sharing within row/col is more complicated
use_rows = var_df[share_state] == view[share_state]
idx = var_df.index[use_rows]
else:
# This configuration doesn't make much sense, but it's fine
idx = var_df.index
seed_values = var_df.loc[idx, var]
scale = scale_spec.setup(seed_values, prop, axis=axis_obj)
for layer, new_series in zip(layers, transformed_data):
layer_df = layer["data"].frame
if var in layer_df:
idx = self._get_subplot_index(layer_df, view)
new_series.loc[idx] = scale(layer_df.loc[idx, var])
# TODO need decision about whether to do this or modify axis transform
set_scale_obj(view["ax"], axis, scale.matplotlib_scale)
# Now the transformed data series are complete, set update the layer data
for layer, new_series in zip(layers, transformed_data):
layer_df = layer["data"].frame
if var in layer_df:
layer_df[var] = new_series
def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:
grouping_vars = [v for v in PROPERTIES if v not in "xy"]
grouping_vars += ["col", "row", "group"]
pair_vars = spec._pair_spec.get("structure", {})
for layer in layers:
data = layer["data"]
mark = layer["mark"]
stat = layer["stat"]
if stat is None:
continue
iter_axes = itertools.product(*[
pair_vars.get(axis, [axis]) for axis in "xy"
])
old = data.frame
if pair_vars:
data.frames = {}
data.frame = data.frame.iloc[:0] # TODO to simplify typing
for coord_vars in iter_axes:
pairings = "xy", coord_vars
df = old.copy()
scales = self._scales.copy()
for axis, var in zip(*pairings):
if axis != var:
df = df.rename(columns={var: axis})
drop_cols = [x for x in df if re.match(rf"{axis}\d+", x)]
df = df.drop(drop_cols, axis=1)
scales[axis] = scales[var]
orient = layer["orient"] or mark._infer_orient(scales)
if stat.group_by_orient:
grouper = [orient, *grouping_vars]
else:
grouper = grouping_vars
groupby = GroupBy(grouper)
res = stat(df, groupby, orient, scales)
if pair_vars:
data.frames[coord_vars] = res
else:
data.frame = res
def _get_scale(
self, spec: Plot, var: str, prop: Property, values: Series
) -> ScaleSpec:
if var in spec._scales:
arg = spec._scales[var]
if arg is None or isinstance(arg, ScaleSpec):
scale = arg
else:
scale = prop.infer_scale(arg, values)
else:
scale = prop.default_scale(values)
return scale
def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:
# Identify all of the variables that will be used at some point in the plot
variables = set()
for layer in layers:
if layer["data"].frame.empty and layer["data"].frames:
for df in layer["data"].frames.values():
variables.update(df.columns)
else:
variables.update(layer["data"].frame.columns)
for var in variables:
if var in self._scales:
# Scales for coordinate variables added in _transform_coords
continue
# Get the data all the distinct appearances of this variable.
parts = []
for layer in layers:
if layer["data"].frame.empty and layer["data"].frames:
for df in layer["data"].frames.values():
parts.append(df.get(var))
else:
parts.append(layer["data"].frame.get(var))
var_values = pd.concat(
parts, axis=0, join="inner", ignore_index=True
).rename(var)
# Determine whether this is an coordinate variable
# (i.e., x/y, paired x/y, or derivative such as xmax)
m = re.match(r"^(?P<prefix>(?P<axis>x|y)\d*).*", var)
if m is None:
axis = None
else:
var = m["prefix"]
axis = m["axis"]
prop = PROPERTIES.get(var if axis is None else axis, Property())
scale = self._get_scale(p, var, prop, var_values)
# Initialize the data-dependent parameters of the scale
# Note that this returns a copy and does not mutate the original
# This dictionary is used by the semantic mappings
if scale is None:
# TODO what is the cleanest way to implement identity scale?
# We don't really need a ScaleSpec, and Identity() will be
# overloaded anyway (but maybe a general Identity object
# that can be used as Scale/Mark/Stat/Move?)
# Note that this may not be the right spacer to use
# (but that is only relevant for coordinates where identity scale
# doesn't make sense or is poorly defined — should it mean "pixes"?)
self._scales[var] = Scale([], lambda x: x, None, "identity", None)
else:
self._scales[var] = scale.setup(var_values, prop)
def _plot_layer(self, p: Plot, layer: Layer) -> None:
data = layer["data"]
mark = layer["mark"]
move = layer["move"]
default_grouping_vars = ["col", "row", "group"] # TODO where best to define?
grouping_properties = [v for v in PROPERTIES if v not in "xy"]
pair_variables = p._pair_spec.get("structure", {})
for subplots, df, scales in self._generate_pairings(data, pair_variables):
orient = layer["orient"] or mark._infer_orient(scales)
def get_order(var):
# Ignore order for x/y: they have been scaled to numeric indices,
# so any original order is no longer valid. Default ordering rules
# sorted unique numbers will correctly reconstruct intended order
# TODO This is tricky, make sure we add some tests for this
if var not in "xy" and var in scales:
return scales[var].order
if "width" in mark._mappable_props:
width = mark._resolve(df, "width", None)
else:
width = df.get("width", 0.8) # TODO what default
if orient in df:
df["width"] = width * scales[orient].spacing(df[orient])
if "baseline" in mark._mappable_props:
# TODO what marks should have this?
# If we can set baseline with, e.g., Bar(), then the
# "other" (e.g. y for x oriented bars) parameterization
# is somewhat ambiguous.
baseline = mark._resolve(df, "baseline", None)
else:
# TODO unlike width, we might not want to add baseline to data
# if the mark doesn't use it. Practically, there is a concern about
# Mark abstraction like Area / Ribbon
baseline = df.get("baseline", 0)
df["baseline"] = baseline
if move is not None:
moves = move if isinstance(move, list) else [move]
for move in moves:
move_groupers = [
orient,
*(getattr(move, "by", None) or grouping_properties),
*default_grouping_vars,
]
order = {var: get_order(var) for var in move_groupers}
groupby = GroupBy(order)
df = move(df, groupby, orient)
df = self._unscale_coords(subplots, df, orient)
grouping_vars = mark._grouping_props + default_grouping_vars
split_generator = self._setup_split_generator(
grouping_vars, df, subplots
)
mark._plot(split_generator, scales, orient)
# TODO is this the right place for this?
for view in self._subplots:
view["ax"].autoscale_view()
self._update_legend_contents(mark, data, scales)
def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:
# TODO stricter type on subplots
coord_cols = [c for c in df if re.match(r"^[xy]\D*$", c)]
out_df = (
df
.copy(deep=False)
.drop(coord_cols, axis=1)
.reindex(df.columns, axis=1) # So unscaled columns retain their place
)
for view in subplots:
view_df = self._filter_subplot_data(df, view)
axes_df = view_df[coord_cols]
with | pd.option_context("mode.use_inf_as_null", True) | pandas.option_context |
import calendar
from datetime import date, datetime, time
import locale
import unicodedata
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.timezones import maybe_get_tz
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
bdate_range, date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesDatetimeValues:
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_period = PeriodArray._datetimelike_ops
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = DatetimeIndex._datetimelike_ops
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'day_name', 'month_name']
ok_for_td = TimedeltaIndex._datetimelike_ops
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
assert a == b
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series(date_range('20130101', periods=5), name='xxx'),
Series(date_range('20130101', periods=5, freq='s'),
name='xxx'),
Series(date_range('20130101 00:00:00', periods=5, freq='ms'),
name='xxx')]
for s in cases:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'US/Eastern'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
exp_values = (DatetimeIndex(s.values).tz_localize('UTC')
.tz_convert('US/Eastern'))
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101', periods=5, tz='US/Eastern'),
name='xxx')
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
assert str(tz_result) == 'CET'
freq_result = s.dt.freq
assert freq_result == DatetimeIndex(s.values, freq='infer').freq
# timedelta index
cases = [Series(timedelta_range('1 day', periods=5),
index=list('abcde'), name='xxx'),
Series(timedelta_range('1 day 01:23:45', periods=5,
freq='s'), name='xxx'),
Series(timedelta_range('2 days 01:23:45.012345', periods=5,
freq='ms'), name='xxx')]
for s in cases:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, s.index)
result = s.dt.to_pytimedelta()
assert isinstance(result, np.ndarray)
assert result.dtype == object
result = s.dt.total_seconds()
assert isinstance(result, pd.Series)
assert result.dtype == 'float64'
freq_result = s.dt.freq
assert freq_result == TimedeltaIndex(s.values, freq='infer').freq
# both
index = date_range('20130101', periods=3, freq='D')
s = Series(date_range('20140204', periods=3, freq='s'),
index=index, name='xxx')
exp = Series(np.array([2014, 2014, 2014], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.year, exp)
exp = Series(np.array([2, 2, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.month, exp)
exp = Series(np.array([0, 1, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.second, exp)
exp = pd.Series([s[0]] * 3, index=index, name='xxx')
tm.assert_series_equal(s.dt.normalize(), exp)
# periodindex
cases = [Series(period_range('20130101', periods=5, freq='D'),
name='xxx')]
for s in cases:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_period_methods:
getattr(s.dt, prop)
freq_result = s.dt.freq
assert freq_result == PeriodIndex(s.values).freq
# test limited display api
def get_dir(s):
results = [r for r in s.dt.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101', periods=5,
freq='D', name='xxx').astype(object))
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_period + ok_for_period_methods))))
# 11295
# ambiguous time error on the conversions
s = Series(pd.date_range('2015-01-01', '2016-01-01',
freq='T'), name='xxx')
s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
exp_values = pd.date_range('2015-01-01', '2016-01-01', freq='T',
tz='UTC').tz_convert('America/Chicago')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(s, expected)
# no setting allowed
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
with pytest.raises(ValueError, match="modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment', 'raise'):
with pytest.raises(com.SettingWithCopyError):
s.dt.hour[0] = 5
@pytest.mark.parametrize('method, dates', [
['round', ['2012-01-02', '2012-01-02', '2012-01-01']],
['floor', ['2012-01-01', '2012-01-01', '2012-01-01']],
['ceil', ['2012-01-02', '2012-01-02', '2012-01-02']]
])
def test_dt_round(self, method, dates):
# round
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = getattr(s.dt, method)('D')
expected = Series(pd.to_datetime(dates), name='xxx')
tm.assert_series_equal(result, expected)
def test_dt_round_tz(self):
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = (s.dt.tz_localize('UTC')
.dt.tz_convert('US/Eastern')
.dt.round('D'))
exp_values = pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']).tz_localize('US/Eastern')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('method', ['ceil', 'round', 'floor'])
def test_dt_round_tz_ambiguous(self, method):
# GH 18946 round near "fall back" DST
df1 = pd.DataFrame([
pd.to_datetime('2017-10-29 02:00:00+02:00', utc=True),
pd.to_datetime('2017-10-29 02:00:00+01:00', utc=True),
pd.to_datetime('2017-10-29 03:00:00+01:00', utc=True)
],
columns=['date'])
df1['date'] = df1['date'].dt.tz_convert('Europe/Madrid')
# infer
result = getattr(df1.date.dt, method)('H', ambiguous='infer')
expected = df1['date']
tm.assert_series_equal(result, expected)
# bool-array
result = getattr(df1.date.dt, method)(
'H', ambiguous=[True, False, False]
)
tm.assert_series_equal(result, expected)
# NaT
result = getattr(df1.date.dt, method)('H', ambiguous='NaT')
expected = df1['date'].copy()
expected.iloc[0:2] = pd.NaT
tm.assert_series_equal(result, expected)
# raise
with pytest.raises(pytz.AmbiguousTimeError):
getattr(df1.date.dt, method)('H', ambiguous='raise')
@pytest.mark.parametrize('method, ts_str, freq', [
['ceil', '2018-03-11 01:59:00-0600', '5min'],
['round', '2018-03-11 01:59:00-0600', '5min'],
['floor', '2018-03-11 03:01:00-0500', '2H']])
def test_dt_round_tz_nonexistent(self, method, ts_str, freq):
# GH 23324 round near "spring forward" DST
s = Series([pd.Timestamp(ts_str, tz='America/Chicago')])
result = getattr(s.dt, method)(freq, nonexistent='shift_forward')
expected = Series(
[pd.Timestamp('2018-03-11 03:00:00', tz='America/Chicago')]
)
tm.assert_series_equal(result, expected)
result = getattr(s.dt, method)(freq, nonexistent='NaT')
expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz)
tm.assert_series_equal(result, expected)
with pytest.raises(pytz.NonExistentTimeError,
match='2018-03-11 02:00:00'):
getattr(s.dt, method)(freq, nonexistent='raise')
def test_dt_namespace_accessor_categorical(self):
# GH 19468
dti = DatetimeIndex(['20171111', '20181212']).repeat(2)
s = Series(pd.Categorical(dti), name='foo')
result = s.dt.year
expected = Series([2017, 2017, 2018, 2018], name='foo')
tm.assert_series_equal(result, expected)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
s = Series(date_range('20130101', periods=5, freq='D'))
with pytest.raises(AttributeError,
match="You cannot add any new attribute"):
s.dt.xlabel = "a"
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_dt_accessor_datetime_name_accessors(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
expected_months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September',
'October', 'November', 'December']
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
s = Series(date_range(freq='D', start=datetime(1998, 1, 1),
periods=365))
english_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
for day, name, eng_name in zip(range(4, 11),
expected_days,
english_days):
name = name.capitalize()
assert s.dt.weekday_name[day] == eng_name
assert s.dt.day_name(locale=time_locale)[day] == name
s = s.append(Series([pd.NaT]))
assert np.isnan(s.dt.day_name(locale=time_locale).iloc[-1])
s = Series(date_range(freq='M', start='2012', end='2013'))
result = s.dt.month_name(locale=time_locale)
expected = Series([month.capitalize() for month in expected_months])
# work around https://github.com/pandas-dev/pandas/issues/22342
result = result.str.normalize("NFD")
expected = expected.str.normalize("NFD")
tm.assert_series_equal(result, expected)
for s_date, expected in zip(s, expected_months):
result = s_date.month_name(locale=time_locale)
expected = expected.capitalize()
result = unicodedata.normalize("NFD", result)
expected = unicodedata.normalize("NFD", expected)
assert result == expected
s = s.append(Series([pd.NaT]))
assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1])
def test_strftime(self):
# GH 10086
s = Series(date_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33',
'2015/02/05 11-22-33', '2015/02/06 11-22-33',
'2015/02/07 11-22-33'])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(period_range(
'2015-02-03 11:22:33.4567', periods=5, freq='s'))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34',
'2015/02/03 11-22-35', '2015/02/03 11-22-36',
'2015/02/03 11-22-37'])
tm.assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5))
s.iloc[0] = pd.NaT
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04',
'2013/01/05'])
tm.assert_series_equal(result, expected)
datetime_index = date_range('20150301', periods=5)
result = datetime_index.strftime("%Y/%m/%d")
expected = Index(['2015/03/01', '2015/03/02', '2015/03/03',
'2015/03/04', '2015/03/05'], dtype=np.object_)
# dtype may be S10 or U10 depending on python version
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
import anndata
###########################################################################
################# Related to input/error handling #########################
###########################################################################
###########################################################################
############################## Colors ###################################
###########################################################################
class TestColors(object):
# tests set_metadata_colors
# test set_metadata_colors - vanilla
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {'GM12878': 'red', 'K562': 'blue'}
test = sg.set_metadata_colors('sample', cmap)
assert sg.adata.uns.sample_colors == ['red', 'blue']
# test set_metadata_colors - obs_col does not exist
def test_set_metadata_colors_1(self):
sg = get_die_test_sg()
cmap = {1: 'red', 2: 'blue'}
with pytest.raises(Exception) as e:
test = sg.set_metadata_colors('stage', cmap)
assert 'Metadata column' in str(e.value)
###########################################################################
################# Related to plotting Swan Plots ##########################
###########################################################################
class TestPlotting(object):
# done: test_new_gene, calc_pos_sizes, calc_edge_curves, plot_graph,
# plot_transcript_path
# init_plot_settings test do not check for indicate_novel / indicate settigns
# init_plot_settings tests do not check for new dataset addition
# test init_plot_settings - https://github.com/mortazavilab/swan_vis/issues/8
# gene summary -> transcript path (same gene) -> gene summary (same gene)
def test_init_9(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_transcript_path('test5', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (same gene), also tests working from gene name
def test_init_8(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test2_gid', display=False)
sg.plot_graph('test2_gname', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting gene summary to
# gene summary (different gene)
def test_init_7(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_graph('test4_gid', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'TES'],
[6, 'chr2', 45, False, False, True, 'TES']]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (same gene)
def test_init_6(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test3', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to transcript path (different gene)
def test_init_5(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test5', display=False)
sg.plot_transcript_path('test2', display=False)
# edge_df
sg.pg.edge_df.drop(['curve'], axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon_gray', None],
[14, '-', 'intron', 3, 5, 'intron_gray', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon_gray', None],
[11, '-', 'intron', 1, 4, 'intron_gray', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = pd.DataFrame(data=data, columns=cols)
ctrl_edge_df = swan.create_dupe_index(ctrl_edge_df, 'edge_id')
ctrl_edge_df = swan.set_dupe_index(ctrl_edge_df, 'edge_id')
# loc_df
# sg.pg.loc_df.drop(['annotation'], axis=1, inplace=True)
data = [[0, 'chr2', 100, False, True, False, 'TSS', None, None],
[1, 'chr2', 80, True, False, False, 'internal', None, None],
[2, 'chr2', 75, True, False, False, 'internal', None, None],
[3, 'chr2', 65, True, False, False, 'internal_gray', None, None],
[4, 'chr2', 60, True, False, False, 'internal', None, None],
[5, 'chr2', 50, True, False, True, 'internal', None, None],
[6, 'chr2', 45, False, False, True, 'TES', None, None]]
cols = ['vertex_id', 'chrom', 'coord', 'internal', 'TSS', 'TES', \
'color', 'edgecolor', 'linewidth']
ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
check_dfs(sg.pg.loc_df, ctrl_loc_df, sg.pg.edge_df, ctrl_edge_df)
# test init_plot_settings - going from plotting transcript path
# to gene summary (same gene)
def test_init_4(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.plot_transcript_path('test2', display=False)
sg.plot_graph('test2_gid', display=False)
# edge_df
sg.pg.edge_df.drop('curve', axis=1, inplace=True) # not checking this
data = [[9, '-', 'exon', 5, 6, 'exon', None],
[8, '-', 'intron', 4, 5, 'intron', None],
[12, '-', 'exon', 4, 5, 'exon', None],
[14, '-', 'intron', 3, 5, 'intron', None],
[7, '-', 'exon', 2, 4, 'exon', None],
[13, '-', 'exon', 2, 3, 'exon', None],
[11, '-', 'intron', 1, 4, 'intron', None],
[6, '-', 'intron', 1, 2, 'intron', None],
[5, '-', 'exon', 0, 1, 'exon', None]]
cols = ['edge_id', 'strand', 'edge_type', 'v1', 'v2', 'color', 'line']
ctrl_edge_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
# ### 讀入資料
# - 取10~12月資料
# - 將NR轉換成0
# In[2]:
data = pd.read_excel('hsinchu.xls')
#10~12
data = data[data['日期'].between('2017/10/01','2017/12/31 ')]
# NR->0
data.replace('NR',0, inplace=True)
# In[3]:
data.shape
# In[4]:
data.head(19) # 18 features
# ### 清理缺失值及有問題的數值
# In[5]:
# row 1656 column= 3~27
def f_element(x):
return type(x)==str or math.isnan(x)
def before_element(i,j):
if j-1==2:
i = i - 18
j = 26
else:
j = j-1
if f_element(data.iloc[i,j]):
return before_element(i,j)
else:
return data.iloc[i,j]
def after_element(i,j):
if j+1==27:
i = i + 18
j = 3
else:
j = j+1
if f_element(data.iloc[i,j]):
return after_element(i,j)
return data.iloc[i,j]
for i in range (1656):
for j in range(3,27):
if f_element(data.iloc[i, j]):
data.iloc[i, j] = (before_element(i,j)+after_element(i,j))/2
# ### 資料分割
# #### 先以單一項目PM2.5來預測PM2.5值
# In[6]:
# 分成training data(10~11月) 及 testing data(12月)
train_data = data.iloc[:1098] # 61 day * 18 = 1098
test_data = data.iloc[1098:,] # 31 day
#將數值(0~23)與測項分開,並將數值分成61等份(每個等份分別代表每一天的18個測項)
train_data_values = train_data.drop(['日期','測站','測項'],axis=1).values
train_data_values = np.vsplit(train_data_values, 61) #61 days
#保留測項名稱->index
train_data_18 = np.array(train_data.iloc[0:18,2]).reshape(18,1) # index
#將每一天的18個測項加在index(row)後,形成18個index對應到 0~23, 0~23.....共61次(天)
for i in range(len(train_data_values)):
train_data_18 = np.concatenate((train_data_18, train_data_values[i]), axis=1)
#轉換成DataFrame並設定index(row的名稱)
train_data_18 = pd.DataFrame(train_data_18)
train_data_18 = train_data_18.set_index(0)
#分割訓練集次數
train_num = train_data_18.shape[1] - 6 #1464-6
#取PM2.5
train_pm25= train_data_18.loc['PM2.5'].tolist()
#取前六筆資料預測第七筆,以此類推,可分出train_num組預測集
X_train = []
y_train = []
for i in range(train_num):
X_train.append(train_pm25[i:i+6]) #前六筆
y_train.append(train_pm25[i+6]) #第七筆
#將list轉成np.array性質
X_train, y_train = np.array([X_train]), np.array([y_train])
X_train, y_train = X_train.reshape(1458, 6), y_train.reshape(1458,1)
# In[7]:
# 測試集(12月)分法與訓練集相同
test_data_values = test_data.drop(['日期','測站','測項'],axis=1).values
test_data_values = np.vsplit(test_data_values, 31)
test_data_18 = np.array(test_data.iloc[0:18,2]).reshape(18,1)
for i in range(len(test_data_values)):
test_data_18 = np.concatenate((test_data_18, test_data_values[i]), axis=1)
test_data_18 = | pd.DataFrame(test_data_18) | pandas.DataFrame |
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os
wd = os.chdir('/Users/larslarson/Documents/School/CU/Research/TADD/Data/2022-03-04')
filepaths = [f for f in os.listdir(wd) if f.endswith('.csv')]
df = pd.concat(map(pd.read_csv, filepaths),axis='columns')
file_names = []
data_frames = []
locations = []
for filename in filepaths:
name = os.path.splitext(filename)[0]
file_names.append(name)
df = pd.read_csv(filename, header=None)
name = name.partition("y")[2]
location = name.partition("_")[0]
location = int(location)
df.rename(columns={1: name}, inplace=True)
data_frames.append(df)
locations.append(location)
combined = | pd.concat(data_frames, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not pd.isnull(lp_r03c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c12']''')[0]
if not pd.isnull(lp_r03c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c01']''')[0]
if not pd.isnull(lp_r04c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c02']''')[0]
if not pd.isnull(lp_r04c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c03']''')[0]
if not pd.isnull(lp_r04c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c04']''')[0]
if not pd.isnull(lp_r04c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c05']''')[0]
if not pd.isnull(lp_r04c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c06']''')[0]
if not pd.isnull(lp_r04c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c07']''')[0]
if not | pd.isnull(lp_r04c07) | pandas.isnull |
from typing import Any
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from sklearn.preprocessing import PowerTransformer
from etna.datasets import TSDataset
from etna.transforms.power import BoxCoxTransform
from etna.transforms.power import YeoJohnsonTransform
@pytest.fixture
def non_positive_df() -> pd.DataFrame:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": | pd.date_range("2021-06-01", "2021-07-01", freq="1d") | pandas.date_range |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = | tm.box_expected(pi, box_with_array) | pandas.util.testing.box_expected |
import argparse
import json
import logging
import os
import sys
import warnings
from itertools import product
import numpy as np
import pandas as pd
import torch
from paccmann_chemistry.models import (StackGRUDecoder, StackGRUEncoder, TeacherVAE)
from paccmann_chemistry.utils import get_device
from paccmann_generator.plot_utils import (
plot_and_compare, plot_and_compare_proteins, plot_loss
)
from paccmann_generator.reinforce_sets import ReinforceMultiModalSets
from paccmann_generator.utils import disable_rdkit_logging
from paccmann_predictor.models import MODEL_FACTORY
from paccmann_sets.models.sets_autoencoder import SetsAE
from pytoda.proteins.protein_language import ProteinLanguage
from pytoda.smiles.smiles_language import SMILESLanguage, SMILESTokenizer
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PaccMann^RL training script')
parser.add_argument(
'omics_data_path',
type=str,
help='Omics data path to condition molecule generation.'
)
parser.add_argument(
'protein_data_path',
type=str,
help='Protein data path to condition molecule generation.'
)
parser.add_argument(
'test_cell_line', type=str, help='Name of testing cell line (LOOCV).'
)
parser.add_argument('encoder_model_path', type=str, help='Path to setAE model.')
parser.add_argument('mol_model_path', type=str, help='Path to chemistry model.')
parser.add_argument('ic50_model_path', type=str, help='Path to pretrained IC50 model.')
parser.add_argument(
'affinity_model_path', type=str, help='Path to pretrained affinity model.'
)
parser.add_argument('--tox21_path', help='Optional path to Tox21 model.')
parser.add_argument(
'params_path', type=str, help='Directory containing the model params JSON file.'
)
parser.add_argument(
'encoder_params_path',
type=str,
help='directory containing the encoder parameters JSON file.'
)
parser.add_argument('results_path', type=str, help='Path where results are saved.')
parser.add_argument(
'unbiased_protein_path',
type=str,
help='Path where unbiased protein predictions are saved.'
)
parser.add_argument(
'unbiased_omics_path',
type=str,
help='Path where unbiased omics predictions are saved.'
)
parser.add_argument(
'site', type=str, help='Name of the cancer site for conditioning generation.'
)
parser.add_argument('model_name', type=str, help='Name for the trained model.')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('train_paccmann_rl')
logger_m = logging.getLogger('matplotlib')
logger_m.setLevel(logging.WARNING)
def main(*, parser_namespace):
disable_rdkit_logging()
# read the params json
params = dict()
with open(parser_namespace.params_path) as f:
params.update(json.load(f))
with open(parser_namespace.encoder_params_path) as f:
encoder_params = json.load(f)
# results_path = params.get('results_path', parser_namespace.results_path)
mol_model_path = params.get('mol_model_path', parser_namespace.mol_model_path)
encoder_model_path = params.get(
'encoder_model_path', parser_namespace.encoder_model_path
)
ic50_model_path = params.get('ic50_model_path', parser_namespace.ic50_model_path)
omics_data_path = params.get('omics_data_path', parser_namespace.omics_data_path)
affinity_model_path = params.get(
'affinity_model_path', parser_namespace.affinity_model_path
)
protein_data_path = params.get(
'protein_data_path', parser_namespace.protein_data_path
)
model_name = params.get(
'model_name', parser_namespace.model_name
) # yapf: disable
unbiased_protein_path = params.get(
'unbiased_protein_path', parser_namespace.unbiased_protein_path
) # yapf: disable
unbiased_omics_path = params.get(
'unbiased_omics_path', parser_namespace.unbiased_omics_path
) # yapf: disable
site = params.get(
'site', parser_namespace.site
) # yapf: disable
test_cell_line = params.get('test_cell_line', parser_namespace.test_cell_line)
logger.info(f'Model with name {model_name} starts.')
# passing optional paths to params to possibly update_reward_fn
optional_reward_args = ['tox21_path', 'site']
for arg in optional_reward_args:
if parser_namespace.__dict__[arg]:
params[arg] = params.get(arg, parser_namespace.__dict__[arg])
omics_df = pd.read_csv(omics_data_path)
protein_df = pd.read_csv(protein_data_path)
protein_df.index = protein_df['entry_name']
# Restore SMILES Model
with open(os.path.join(mol_model_path, 'model_params.json')) as f:
mol_params = json.load(f)
gru_encoder = StackGRUEncoder(mol_params)
gru_decoder = StackGRUDecoder(mol_params)
generator = TeacherVAE(gru_encoder, gru_decoder)
generator.load(
os.path.join(
mol_model_path, f"weights/best_{params.get('smiles_metric', 'rec')}.pt"
),
map_location=get_device()
)
# Load languages
generator_smiles_language = SMILESTokenizer(
vocab_file=os.path.join(mol_model_path, 'vocab.json')
)
generator.smiles_language = generator_smiles_language
#load predictors
with open(os.path.join(ic50_model_path, 'model_params.json')) as f:
paccmann_params = json.load(f)
paccmann_predictor = MODEL_FACTORY['mca'](paccmann_params)
paccmann_predictor.load(
os.path.join(
ic50_model_path, f"weights/best_{params.get('ic50_metric', 'rmse')}_mca.pt"
),
map_location=get_device()
)
paccmann_predictor.eval()
paccmann_smiles_language = SMILESLanguage.from_pretrained(
pretrained_path=ic50_model_path
)
paccmann_predictor._associate_language(paccmann_smiles_language)
with open(os.path.join(affinity_model_path, 'model_params.json')) as f:
protein_pred_params = json.load(f)
protein_predictor = MODEL_FACTORY['bimodal_mca'](protein_pred_params)
protein_predictor.load(
os.path.join(
affinity_model_path,
f"weights/best_{params.get('p_metric', 'ROC-AUC')}_bimodal_mca.pt"
),
map_location=get_device()
)
protein_predictor.eval()
affinity_smiles_language = SMILESLanguage.from_pretrained(
pretrained_path=os.path.join(affinity_model_path, 'smiles_serial')
)
affinity_protein_language = ProteinLanguage()
protein_predictor._associate_language(affinity_smiles_language)
protein_predictor._associate_language(affinity_protein_language)
setsae = SetsAE(device, **encoder_params).to(device)
setsae.load_state_dict(torch.load(encoder_model_path, map_location=get_device()))
set_encoder = setsae.encoder
set_encoder.latent_size = set_encoder.hidden_size_encoder
#############################################
# Create a generator model that will be optimized
gru_encoder_rl = StackGRUEncoder(mol_params)
gru_decoder_rl = StackGRUDecoder(mol_params)
generator_rl = TeacherVAE(gru_encoder_rl, gru_decoder_rl)
generator_rl.load(
os.path.join(mol_model_path, f"weights/best_{params.get('metric', 'rec')}.pt"),
map_location=get_device()
)
generator_rl.smiles_language = generator_smiles_language
generator_rl.eval()
# generator
model_folder_name = test_cell_line + '_' + 'SetAE'
learner = ReinforceMultiModalSets(
generator_rl, set_encoder, protein_predictor, paccmann_predictor, protein_df,
omics_df, params, generator_smiles_language, model_folder_name, logger, True
)
train_omics = omics_df[omics_df['cell_line'] != test_cell_line]['cell_line']
train_protein = protein_df['entry_name']
# train_sets = list(product(train_omics, train_protein))
test_sets = list(product([test_cell_line], train_protein))
assert len(test_sets) == len(protein_df)
unbiased_preds_ic50 = np.array(
pd.read_csv(os.path.join(unbiased_omics_path,
test_cell_line + '.csv'))['IC50'].values
)
biased_efficacy_ratios, biased_affinity_ratios, tox_ratios = [], [], []
rewards, rl_losses = [], []
gen_mols, gen_prot, gen_cell = [], [], []
gen_affinity, gen_ic50, modes = [], [], []
proteins_tested = []
batch_size = params['batch_size']
logger.info(f'Model stored at {learner.model_path}')
# total_train = len(train_sets)
protein_name = None
for epoch in range(1, params['epochs'] + 1):
logger.info(f"Epoch {epoch:d}/{params['epochs']:d}")
for step in range(1, params['steps'] + 1):
cell_line = np.random.choice(train_omics)
protein_name = np.random.choice(train_protein)
# sample = np.random.randint(total_train)
# cell_line, protein_name = train_sets[sample]
logger.info(f'Current train cell: {cell_line}')
logger.info(f'Current train protein: {protein_name}')
rew, loss = learner.policy_gradient(
cell_line, protein_name, epoch, batch_size
)
logger.info(
f"Step {step:d}/{params['steps']:d} \t loss={loss:.2f}, mean rew={rew:.2f}"
)
rewards.append(rew.item())
rl_losses.append(loss)
# Save model
if epoch % 5 == 0:
learner.save(f'gen_{epoch}.pt', f'enc_{epoch}.pt')
# unbiased pred files are given by protein accession number, so convert entry_name
protein_accession = protein_df.loc[protein_name, 'accession_number']
train_unbiased_preds_affinity = np.array(
pd.read_csv(
os.path.join(unbiased_protein_path, protein_accession + '.csv')
)['affinity'].values
)
train_unbiased_preds_ic50 = np.array(
pd.read_csv(os.path.join(unbiased_omics_path,
cell_line + '.csv'))['IC50'].values
)
smiles, preds_affinity, preds_ic50, idx = learner.generate_compounds_and_evaluate(
epoch, params['eval_batch_size'], protein_name, cell_line
)
gs = [
s for i, s in enumerate(smiles)
if preds_ic50[i] < learner.ic50_threshold and preds_affinity[i] > 0.5
]
gp_ic50 = preds_ic50[(preds_ic50 < learner.ic50_threshold)
& (preds_affinity > 0.5)]
gp_affinity = preds_affinity[(preds_ic50 < learner.ic50_threshold)
& (preds_affinity > 0.5)]
for ic50, affinity, s in zip(gp_ic50, gp_affinity, gs):
gen_mols.append(s)
gen_cell.append(cell_line)
gen_prot.append(protein_name)
gen_affinity.append(affinity)
gen_ic50.append(ic50)
modes.append('train')
plot_and_compare_proteins(
train_unbiased_preds_affinity, preds_affinity, protein_name, epoch,
learner.model_path, 'train', params['eval_batch_size']
)
plot_and_compare(
train_unbiased_preds_ic50, preds_ic50, site, cell_line, epoch,
learner.model_path, 'train', params['eval_batch_size']
)
# test_cell_line = np.random.choice(test_omics)
# test_protein_name = np.random.choice(test_protein)
if epoch > 10 and epoch % 5 == 0:
for test_idx, test_sample in enumerate(test_sets):
test_cell_line, test_protein_name = test_sample
proteins_tested.append(test_protein_name)
logger.info(f'EVAL cell: {test_cell_line}')
logger.info(f'EVAL protein: {test_protein_name}')
test_protein_accession = protein_df.loc[test_protein_name,
'accession_number']
unbiased_preds_affinity = np.array(
pd.read_csv(
os.path.join(
unbiased_protein_path, test_protein_accession + '.csv'
)
)['affinity'].values
)
smiles, preds_affinity, preds_ic50, idx = (
learner.generate_compounds_and_evaluate(
epoch, params['eval_batch_size'], test_protein_name,
test_cell_line
)
)
gs = [
s for i, s in enumerate(smiles) if
preds_ic50[i] < learner.ic50_threshold and preds_affinity[i] > 0.5
]
gp_ic50 = preds_ic50[(preds_ic50 < learner.ic50_threshold)
& (preds_affinity > 0.5)]
gp_affinity = preds_affinity[(preds_ic50 < learner.ic50_threshold)
& (preds_affinity > 0.5)]
for ic50, affinity, s in zip(gp_ic50, gp_affinity, gs):
gen_mols.append(s)
gen_cell.append(test_cell_line)
gen_prot.append(test_protein_name)
gen_affinity.append(affinity)
gen_ic50.append(ic50)
modes.append('test')
inds = np.argsort(gp_ic50)[::-1]
for i in inds[:5]:
logger.info(
f'Epoch {epoch:d}, generated {gs[i]} against '
f'{test_protein_name} and {test_cell_line}.\n'
f'Predicted IC50 = {gp_ic50[i]}, Predicted Affinity = {gp_affinity[i]}.'
)
plot_and_compare(
unbiased_preds_ic50, preds_ic50, site, test_cell_line, epoch,
learner.model_path, f'test_{test_protein_name}',
params['eval_batch_size']
)
plot_and_compare_proteins(
unbiased_preds_affinity, preds_affinity, test_protein_name, epoch,
learner.model_path, 'test', params['eval_batch_size']
)
biased_affinity_ratios.append(
np.round(
100 * (np.sum(preds_affinity > 0.5) / len(preds_affinity)), 1
)
)
biased_efficacy_ratios.append(
np.round(
100 *
(np.sum(preds_ic50 < learner.ic50_threshold) / len(preds_ic50)),
1
)
)
all_toxes = np.array([learner.tox21(s) for s in smiles])
tox_ratios.append(
np.round(100 * (np.sum(all_toxes == 1.) / len(all_toxes)), 1)
)
logger.info(f'Percentage of non-toxic compounds {tox_ratios[-1]}')
toxes = [learner.tox21(s) for s in gen_mols]
# Save results (good molecules!) in DF
df = pd.DataFrame(
{
'protein': gen_prot,
'cell_line': gen_cell,
'SMILES': gen_mols,
'IC50': gen_ic50,
'Binding probability': gen_affinity,
'mode': modes,
'Tox21': toxes
}
)
df.to_csv(os.path.join(learner.model_path, 'results', 'generated.csv'))
# Plot loss development
loss_df = | pd.DataFrame({'loss': rl_losses, 'rewards': rewards}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 15:43:55 2021
@author: ZeitgeberH
"""
from pathlib import Path
from PyQt5 import QtGui, QtCore, QtSvg
from pyqtgraph.Qt import QtWidgets
from PyQt5.QtWidgets import QMessageBox, QTableWidgetItem
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyqtgraph.parametertree import Parameter, ParameterTree
import gr_pars as AllMyPars
from pyqtgraph import GraphicsLayoutWidget
import sys
import os
import numpy as np
import pandas as pd
import itertools
import time as sysTime
from copy import deepcopy
import glob
from gr_viewClass import (FileView,ParameterView,PlotView,
TabView,TreeView, TableView,showdialog,getfiles,ImageView_gv)
from gr_allenFISH import GeneRenderAPI
from gr_allenFISH_utils import (
check_gene_cached,
check_gene_cached_images,
load_cached_gene,
download_and_cache,
download_and_cache_image,
queryGene_ncbi,rgb2intensity
)
from gr_sys_utils import base_dir
import pdb
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1000000000 ## silence decompression bomb warning
import tqdm
import webbrowser
from skimage import filters as SKIfilters
from skimage import measure as SKImeasure
from matplotlib.cm import jet as jetcm
class MainWindow(QtWidgets.QMainWindow):
"""
Main frame.
"""
def __init__(self, app, parent = None):
super(MainWindow, self).__init__(parent)
self.app = app
self.create_mainWindow()
self.setWindowTitle("FISHViewer")
self.setWindowIcon(pg.QtGui.QIcon('data\\icons\\Fish.png'))
self.setWindowState(QtCore.Qt.WindowMaximized)
def create_mainWindow(self):
# Configure Qt GUI:
self.make_layout()
self.add_menubar()
self.add_toolbar()
self.statusBar = QtWidgets.QStatusBar()
self.setStatusBar(self.statusBar)
self.MouseMode = pg.ViewBox.RectMode
self.mainFrame = pg.QtGui.QWidget()
self.mainFrame.setLayout(self.MainLayout)
self.setCentralWidget(self.mainFrame)
self.otherInitStuff()
self.setRightPanelStrechFactor()
def make_layout(self):
## layout
self.MainLayout = pg.QtGui.QGridLayout()
# horizontal splitter
self.frame_splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
# splitter for file browswer (top) and pul view (bottom)
self.right_panels = QtWidgets.QSplitter(QtCore.Qt.Vertical, parent = self.frame_splitter)
### left pannels for main plotting and data view
self.visulization_view = TabView(self.frame_splitter)
self.plot_splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical, parent = self.visulization_view)
self.ISH_view = pg.ImageView(view=pg.PlotItem()) #, SynAtlasFunc = self.SynAtlas)
self.ISH_view_vLine = pg.InfiniteLine(angle=90, movable=False, name='cursorV1')
self.ISH_view_hLine = pg.InfiniteLine(angle=0, movable=False, name='cursorH1')
self.ISH_view.addItem(self.ISH_view_vLine, ignoreBounds=True)
self.ISH_view.addItem(self.ISH_view_hLine, ignoreBounds=True)
self.ISH_view.view.hoverEvent = self.imageHoverEvent
self.ISH_view.view.scene().sigMouseClicked.connect(self.ISH_view_mouseClicked)
# self.ISH_view.ui.histogram.hide()
self.plot_splitter.addWidget(self.ISH_view)
# self.updateFishImage(self.ISH_view, 'atlas_example.jpg')
self.ISH_view.setColorMap(pg.colormap.get('CET-L1'))
self.ISH_view.ui.roiBtn.hide()
self.ISH_view.ui.menuBtn.hide()
self.ISH_view.roi.hide()
# self.ISH_view.ui.roiPlot.close()
# self.ISH_view.ui.roiPlot.hide()
self.scale1 = pg.ScaleBar(size=50, suffix='pixels')
self.scale1.setParentItem(self.ISH_view.view.getViewBox())
self.scale1.anchor((1, 1), (1, 1), offset=(-20, -20))
self.Expression_view = pg.ImageView(view=pg.PlotItem())
self.plot_splitter.addWidget(self.Expression_view)
self.Expression_view_vLine = pg.InfiniteLine(angle=90, movable=False, name='cursorV2')
self.Expression_view_hLine = pg.InfiniteLine(angle=0, movable=False, name='cursorH2')
self.Expression_view.addItem(self.Expression_view_vLine, ignoreBounds=True)
self.Expression_view.addItem(self.Expression_view_hLine, ignoreBounds=True)
self.Expression_view.view.hoverEvent = self.imageHoverEvent
# self.updateFishImage(self.Expression_view, 'atlas_example.jpg')
self.Expression_view.setColorMap(pg.colormap.get("CET_L16", source='colorcet')) #pg.colormap.get('CET-L1'))
self.Expression_view.ui.roiBtn.hide()
self.Expression_view.ui.menuBtn.hide()
self.Expression_view.roi.hide()
self.Expression_view.ui.roiPlot.getPlotItem().hideAxis('bottom')
self.scale2 = pg.ScaleBar(size=50, suffix='pixels')
self.scale2.setParentItem(self.Expression_view.view.getViewBox())
self.scale2.anchor((1, 1), (1, 1), offset=(-20, -20))
# self.Expression_view.ui.roiPlot.close()
# self.Expression_view.ui.roiPlot.hide()
self.Expression_view.view.setXLink(self.ISH_view.view)
self.Expression_view.view.setYLink(self.ISH_view.view)
self.visulization_view.addTab(self.plot_splitter, 'ABI Views')
self.frame_splitter.addWidget(self.visulization_view)
## right panels for parameters and input
self.frame_splitter.addWidget(self.right_panels)
self.files_view = TabView(self.right_panels)
## parameters for ABI FISH images
self.globalPars = Parameter.create(name='params', type='group', children=AllMyPars.download_options, readonly=False)
self.globalPars.sigTreeStateChanged.connect(self.event_parameters_stateChange) ## call back
self.parTree = ParameterTree()
self.parTree.setHeaderLabels(["Parameter ", "Value"])
self.parTree.setParameters(self.globalPars, showTop=False)
self.parTree.setWindowTitle('Global parameter')
self.files_view.addTab(self.parTree, 'ABA parameters')
self.right_panels.addWidget(self.files_view)
### Middle panel: recording paramters
self.experiments_view = TabView(self.right_panels)
self.right_panels.addWidget(self.experiments_view)
self.experimentsInfo_Tab = TableView(self.experiments_view,editable=False, sortable=False, mdcFunc=self.loadSectionImageInfo)
self.experiments_view.addTab(self.experimentsInfo_Tab, 'Experiments')
self.experimentImage_Tab = TableView(self.experiments_view,editable=False, sortable=False) #, mdcFunc=self.render_sectionImage,kpFunc=self.render_sectionImage)
self.experimentImage_Tab.currentCellChanged.connect(self.render_sectionImage)
self.experiments_view.addTab(self.experimentImage_Tab, 'Section images')
## bottom panel: extra informations
self.trees_view = TabView(self.right_panels)
self.geneInfo_Tab = TableView(self.trees_view,mdcFunc=self.querySelectedGeneInfo) ## Table to show queried gene information
self.trees_view.addTab(self.geneInfo_Tab,'Gene information' )
self.abAtlas_view = TableView(self.trees_view)
self.trees_view.addTab(self.abAtlas_view,'Allen brain atlas ontology' ) ## brain regions names
self.right_panels.addWidget(self.trees_view)
self.setRightPanelStrechFactor()
self.frame_splitter.setStretchFactor(0, 8)
self.frame_splitter.setStretchFactor(1, 1)
self.frame_splitter.setCollapsible(0, False)
self.frame_splitter.setCollapsible(1, True)
# add frame_splitter to main layout
self.MainLayout.addWidget(self.frame_splitter)
def SynAtlas(self):
print('syn in action')
def setRightPanelStrechFactor(self):
self.right_panels.setStretchFactor(0, 4)
self.right_panels.setStretchFactor(1, 5)
self.right_panels.setStretchFactor(2,0)
self.right_panels.setCollapsible(0, True)
self.right_panels.setCollapsible(1, True)
self.right_panels.setCollapsible(2, True)
self.right_panels.setSizes([360, 360, 200])
def add_menubar(self):
"""
set up manu bar for the main window
"""
self.mbar = pg.QtGui.QMenuBar()
self.MainLayout.setMenuBar(self.mbar)
self.OptionMenu = self.mbar.addMenu('&Tools')
self.OptionAction1 = pg.QtGui.QAction("&Pickle current images")
self.OptionAction1.triggered.connect(self.pickeImage_clicked)
self.OptionMenu.addAction(self.OptionAction1)
self.OptionAction2 = pg.QtGui.QAction("&Batch downloading")
self.OptionAction2.triggered.connect(self.batchDownloading_clicked)
self.OptionMenu.addAction(self.OptionAction2)
self.HelpMenu = self.mbar.addMenu('&Help')
self.HelpAction = pg.QtGui.QAction("&LICENSE")
self.HelpAction.setStatusTip('BSD-3')
self.HelpAction.triggered.connect(self.License_clicked)
self.HelpMenu.addAction(self.HelpAction)
def imageHoverEvent(self, event):
"""Show the position, pixel, and value under the mouse cursor.
"""
if event.isExit():
self.Expression_view.view.setTitle("")
self.ISH_view.view.setTitle("")
return
# data = self.ISH_view.image
if type(self.Expression_view.image)!=type(None):
if self.ISH_view.view.sceneBoundingRect().contains(event.pos().x(), event.pos().y()):
ppos = self.ISH_view.view.mapToView(event.pos())
i, j = int(ppos.x()), int(ppos.y())
if i >=0 and j >=0 and i < self.Expression_view.image.shape[1] and j <self.Expression_view.image.shape[0]:
fishVal = self.ISH_view.image[j,i, :]
self.ISH_view.view.setTitle(f"pos:({i},{j}), rgb: {fishVal}")
if len(self.Expression_view.image.shape)==3:
expressionVal = self.Expression_view.image[j, i, :]
pixelIntensity = rgb2intensity(np.reshape(expressionVal,(1,1,3)), jetcm)[0][0]
rgb_txt = f'RGB: {expressionVal}.'
else:
rgb_txt = ''
pixelIntensity = self.Expression_view.image[j, i]
if self.atlas_ImageID!=None:
expTitle = f"pos:({i},{j}),"+rgb_txt +f" Expresssion intensity:{pixelIntensity:.2f}. AtlasImage ID: {self.atlas_ImageID}"
else:
expTitle = f"pos:({i},{j}),"+rgb_txt +f" Expresssion intensity:{pixelIntensity:.2f}. AtlasImage ID: {self.atlas_ImageID}"
self.Expression_view.view.setTitle(expTitle)
self.ISH_view_vLine.setPos(ppos.x())
self.ISH_view_hLine.setPos(ppos.y())
self.ISH_view_vLine.setZValue(1000)
self.ISH_view_hLine.setZValue(1000)
self.Expression_view_vLine.setPos(ppos.x())
self.Expression_view_hLine.setPos(ppos.y())
self.Expression_view_vLine.setZValue(1000)
self.Expression_view_hLine.setZValue(1000)
else:
self.Expression_view.view.setTitle("")
self.ISH_view.view.setTitle("")
def License_clicked(self):
with open("Data/LICENSE.txt") as f:
BSD_3 = f.readlines()
BSD_3b = ""
for l in BSD_3:
BSD_3b = BSD_3b+l
showdialog("This program is under BSD-3 license.\nCopyright (c) 2020-2021, ZeitgeberH@github. All rights reserved.", BSD_3b,False)
def batchDownloading_clicked(self):
WindowTemplate, TemplateBaseClass = pg.Qt.loadUiType('Data/uis/batchDownloadOptionsDialog.ui')
self.bd_dialog = TemplateBaseClass()
self.bd_dialog_form = WindowTemplate()
self.bd_dialog_form.setupUi(self.bd_dialog)
self.bd_dialog_form.fileButton.clicked.connect(self.batchGenesFile)
self.bd_dialog_form.buttonBox.clicked.connect(self.buttonBoxResponse)
self.bd_dialog.exec_()
def getBatchDownloadInfo(self,showDialog=False):
fullResolution =self.bd_dialog.children()[2].children()[0].children()[1].isChecked()
Downsampled =self.bd_dialog.children()[2].children()[0].children()[2].isChecked()
downSampledFactor = self.bd_dialog.children()[2].children()[0].children()[0].children()[4].value()
QFactor = self.bd_dialog.children()[2].children()[0].children()[0].children()[5].value()
for j in range(3):
if self.bd_dialog.children()[2].children()[1].children()[j].isChecked():
planeOption = j
break
userDefinedGenesFiles = self.bd_dialog_form.filePath.text()
with open(userDefinedGenesFiles,'r') as file:
geneNames = file.read().splitlines()
geneNames = [g for g in geneNames if len(g)>0]
self.BatchDownloading_FileInfo(geneNames, planeOption, fullResolution, downSampledFactor, QFactor,showDialog)
def buttonBoxResponse(self,evt):
'''
if 'OK' button pressed, parse values from the UI
Parameters
----------
evt : TYPE: buttonBox responses
DESCRIPTION.
Returns
-------
None.
'''
if evt.text() == 'OK':
if self.bd_dialog_form.filePath.text()!='':
self.getBatchDownloadInfo(True)
else:
showdialog("Press 'Locate file' button to specify the file with names of genes you would like to know!")
def BatchDownloading_FileInfo(self, geneNames, planeOption, fullResolution, downSampledFactor, QFactor, showDialog=False):
p = ['all','coronal','sagittal']
if fullResolution:
ds = f'\nDownloading {p[planeOption]} planes section images for following genes at full resolution:'
else:
ds = f'\nDownloading {p[planeOption]} section images at downsampling factor:{downSampledFactor}, quality:{QFactor} for following genes at reduced of {planeOption}: '
print(ds)
print(f'{geneNames}')
geneDict = {}
inforStr = 'Gene Experiments\n'
for g in geneNames:
geneExpID_all, geneExpData_all = self.geapi.get_gene_experiments2(g)
geneExpID = []
geneExpData = []
if geneExpID_all!=None:
if planeOption!=0:
for gid, ged in zip(geneExpID_all,geneExpData_all):
if ged['plane_of_section_id']== planeOption: ## filter specified image planes!
geneExpID.append(gid)
geneExpData.append(ged)
else:
geneExpID = geneExpID_all
geneExpData = geneExpData_all
inforStr = inforStr+ f'{g:8} {len(geneExpID)}\n'
geneDict[g]={'geneExpID': geneExpID, 'geneExpData':geneExpData}
returnValue = showdialog(ds, details= inforStr)
if returnValue == QtWidgets.QMessageBox.Ok:
self.BatchDownload(geneDict, fullResolution, downSampledFactor, QFactor)
def BatchDownload(self,geneDict, imageQuality, downSampleFactor, qualityFactor):
Expression=False
with pg.ProgressDialog("Downloading FISH images for gene..." , maximum=len(geneDict),\
busyCursor=True, nested=True) as dlg_gene:
for gene in geneDict:
expIDs = geneDict[gene]['geneExpID']
with pg.ProgressDialog("Experiments..." , maximum=len(expIDs),\
busyCursor=True, nested=True) as dlg_exp:
for expID in expIDs:
cache = check_gene_cached_images(self.geapi.fish_images_cache, gene, expID,Expression, imageQuality, downSampleFactor, qualityFactor)
#### check cache and download if not availabe
if not cache:
imageIDs, sectionNumbers, expMetaData = self.geapi.get_gene_experiments_imageList(expID) ## query imageIDs
with pg.ProgressDialog("... ISH & Expression" , maximum=2,\
busyCursor=True, nested=True) as dlg_eh:
for Expression in [True, False]: ## expression images
with pg.ProgressDialog("...... Images..." , maximum=len(imageIDs),\
busyCursor=True, nested=True) as dlg1:
for j, sn in zip(imageIDs, sectionNumbers):
self.geapi.download_gene_section_imageData(gene, expID, j, sn, Expression, imageQuality, downSampleFactor,qualityFactor)
dlg1 += 1
if dlg1.wasCanceled():
print("Canceled stage")
break
dlg_eh += 1
if dlg_eh.wasCanceled():
print("Canceled images stage")
break
dlg_exp += 1
if dlg_exp.wasCanceled():
print("Canceled experiment stage")
break
dlg_gene += 1
if dlg_gene.wasCanceled():
print("Canceled gene stage")
break
def batchGenesFile(self):
fileName = getfiles()
self.bd_dialog_form.filePath.setText(fileName)
# self.getBatchDownloadInfo(showDialog=True)
def add_toolbar(self):
self.toolbar = QtWidgets.QToolBar("Main toolbar")
self.addToolBar(2, self.toolbar) # https://doc.qt.io/qt-5/qt.html#ToolBarArea-enum
self.toolbar.addSeparator()
self.toolbar.addSeparator()
# self.tooglesettingsAction = pg.QtGui.QAction(pg.QtGui.QIcon("Data/icons/lsm.png"), "Open LSM images")
# self.tooglesettingsAction.triggered.connect(self.openLSM_clicked)
# self.toolbar.addAction(self.tooglesettingsAction)
def sycImageToRegion(self,expID, regionName):
##TODO
try:
imgData_meta = self.geapi.getExpTargetXY(expID, regionName)
print('current region: '+ regionName)
imgPath = os.path.join(self.currentImageCacheDir, str(imgData_meta['section_number']) + '_'+str(imgData_meta['section_image_id']))
imgPath_E = imgPath+'_E.jpg'
imgPath_H = imgPath+'_H.jpg'
self.updateFishImage(self.ISH_view, imgPath_H)
self.updateFishImage(self.Expression_view, imgPath_E, True)
row = self.currentSearchISH_imageLst.index(imgPath_H)
self.experimentImage_Tab.setCurrentCell(row, 0)
self.experimentImage_Tab.setFocus()
return True
except:
showdialog('Syncronization failed!')
return False
def event_parameters_stateChange(self, params, changes):
'''
Responding to user's input in the parameter tree
Parameters
----------
params : TYPE
DESCRIPTION.
changes : TYPE
DESCRIPTION.
Returns
-------
None.
'''
pv = self.globalPars.getValues()
for param, change, data in changes:
childName = param.name()
if childName == 'Gene':
self.experiments_view.setCurrentIndex(0)
if childName=='region':
if self.currentSearchDict!=None:
self.currentSearchDict['region'] = data
if (childName == 'Sync') and (self.currentExpID!=None):
regionName = pv['Gene & Brain region & species'][1]['Region'][0]
if regionName!='':
print(f'Sync to region {regionName}')
else:
showdialog("Specifiy a rgion to syn!")
if (childName == 'Syn to atlas after double clicking'):
if not data:
self.clearSVGItems() ## turn off atlas overlay
if childName=='Expression':
if data=='color mask':
self.updateFishImage(self.Expression_view,self.Expression_mask_file, True, False)
else:
self.updateExpressionIntensity()
if childName == 'atlas section offset':
# pdb.set_trace()
if self.atlasID!=None:
idx = self.svgItem_atlasE_index
atlas_ofs_idx = pv['image-to-Atlas'][1]['atlas section offset'][0] + idx
if atlas_ofs_idx>-1 and atlas_ofs_idx<len(self.atlas_sectionID_list):
atlas_imageID = self.atlas_sectionID_list[atlas_ofs_idx]
atlas_fn = self.geapi.getAtlasBoundaryImage(atlas_imageID, self.atlasID, GraphicGroupLabel_id=28)
xP, yP = self.svgItem_atlasE.pos().x(),self.svgItem_atlasE.pos().y()
self.overlayAtlasSVG(xP+self.atlas_x,yP+self.atlas_y,self.atlas_x,self.atlas_y, atlas_fn)
self.atlas_ImageID = atlas_imageID
else:
print('Double clicking to syn an atlas image first!')
if childName == 'x offset':
# pdb.set_trace()
x, y = self.svgItem_atlasE_pos
y_offset = pv['image-to-Atlas'][1]['y offset'][0]
self.svgItem_atlasE.setPos(x+data, y+y_offset)
# self.svgItem_atlasH.setPos(x+data, y+y_offset)
if childName == 'y offset':
# pdb.set_trace()
x_offset = pv['image-to-Atlas'][1]['x offset'][0]
x, y = self.svgItem_atlasE_pos
self.svgItem_atlasE.setPos(x+x_offset, y+data)
# self.svgItem_atlasH.setPos(x+x_offset, y+data)
if childName == 'Expression mask opacity':
opc = pv['image-to-Atlas'][1][childName][0]
self.Expression_view.view.setOpacity(opc)
if (len(changes) == 1) and (childName == 'Sync') and (self.currentExpID!=None):
if self.geapi!=[]:
regionName = pv['Gene & Brain region & species'][1]['Region'][0]
self.sycImageToRegion(self.currentExpID, regionName.upper())
else:
showdialog("Load an experiment first!")
return
#
search_pars = {}
search_pars['gene'] = pv['Gene & Brain region & species'][1]['Gene'][0]
if search_pars['gene']=='':
return
search_pars['region'] = pv['Gene & Brain region & species'][1]['Region'][0]
search_pars['species'] = pv['Gene & Brain region & species'][1]['Species'][0]
search_pars['plane'] = pv['View options'][1]['Plane'][0]
search_pars['Syn to atlas after double clicking'] = pv['image-to-Atlas'][1]['Syn to atlas after double clicking'][0]
search_pars['maximal resolution'] = pv['View options'][1]['Maximal resolution'][0]
search_pars['downsample'] = pv['View options'][1]['Image quality at lower resolution'][1]['Downsample'][0]
search_pars['quality'] = pv['View options'][1]['Image quality at lower resolution'][1]['Downsample'][0]
self.updateGeneOfInterest(search_pars)
def resetMergeImage(self):
for item in self.lsm_bottomRight_view.view.items:
if hasattr(item, 'Name'):
if item.Name[:3]!='br_':
self.lsm_bottomRight_view.view.removeItem(item)
self.lsm_bottomRight_view.setImage(self.mergeImage, axes={'x':0,'y':1,'c':2,'t':None},autoRange=False,autoLevels=False)
def overlay_segmentation(self, bds, imageViewHandle, noMask=False):
'''
Overlay segmented ROIs to given ImageView instance
Parameters
----------
bds : TYPE: ndarray
contour boundaries of segemented results
imageViewHandle : pyqtgraph.imageView instance
imageview window to draw
Returns
-------
None.
'''
for item in imageViewHandle.view.items:
if hasattr(item, 'Name'):
if item.Name[:3]=='sgb':
imageViewHandle.view.removeItem(item)
if noMask: ## no mask
return
if len(bds.shape)==2:
imItem = pg.ImageItem(bds)
imItem.Name = 'sgb'
imageViewHandle.view.addItem(imItem)
imItem.setZValue(100)
else:
allChans = set([0,1,2])
for nMask in range(bds.shape[2]):
mask_ = np.empty_like(bds)
mask_[:,:,nMask] = bds[:,:,nMask]
otherChans = list(allChans.difference([nMask]))
for j in otherChans:
mask0 = np.empty_like(bds[:,:,0])
mask0[bds[:,:,nMask]>0] = 80 ## set other channel's pixels intensity. so we can get a color image
mask_[:,:,j] = mask0
imItem = pg.ImageItem(mask_)
imageViewHandle.view.addItem(imItem)
imItem.Name = 'sgb'+str(nMask)
imItem.setZValue(100)
def initGeneAPI(self):
try:
self.geapi = GeneRenderAPI()
self.geapi.DOWNSAMPLE_FACTOR = 4 ## downsampling factor if not Full image, range(0 to 10), higher the lower quality
self.geapi.QUALITY_FACTOR = 50 ## quality factor if not Full image, range(0 to 100), higher the better quality
return True
except:
self.geapi = []
print('Error to initiate gene query API. Try again')
return False
def loadSectionImageInfo(self,row, col):
'''
load section image metainformation
Parameters
----------
row : TYPE
DESCRIPTION.
col : TYPE
DESCRIPTION.
Returns
-------
'''
expID = self.experimentsInfo_Tab.item(row, 0).value
expPlane = self.experimentsInfo_Tab.item(row, 1).value
expGene = self.experimentsInfo_Tab.item(row, 2).value
if self.geapi==[]:
r=self.initGeneAPI()
if not r:
return
## query and return section image list from this experiment
##TODO
## CHECK cached image file first.If no, then request from ABA
imageIDs, secNums, imageMeta = self.geapi.get_gene_experiments_imageList(expID)
expDF = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received'])
other_feature1.to_csv('data/other_feature1.csv',index=None)
print(other_feature1.shape)
############# coupon related feature #############
"""
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
"""
def calc_discount_rate(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return float(s[0])
else:
return 1.0-float(s[1])/float(s[0])
def get_discount_man(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[0])
def get_discount_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 'null'
else:
return int(s[1])
def is_man_jian(s):
s =str(s)
s = s.split(':')
if len(s)==1:
return 0
else:
return 1
#dataset3
dataset3['day_of_week'] = dataset3.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset3['day_of_month'] = dataset3.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset3['days_distance'] = dataset3.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,6,30)).days)
dataset3['discount_man'] = dataset3.discount_rate.apply(get_discount_man)
dataset3['discount_jian'] = dataset3.discount_rate.apply(get_discount_jian)
dataset3['is_man_jian'] = dataset3.discount_rate.apply(is_man_jian)
dataset3['discount_rate'] = dataset3.discount_rate.apply(calc_discount_rate)
d = dataset3[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset3 = pd.merge(dataset3,d,on='coupon_id',how='left')
dataset3.to_csv('data/coupon3_feature.csv',index=None)
#dataset2
dataset2['day_of_week'] = dataset2.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset2['day_of_month'] = dataset2.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset2['days_distance'] = dataset2.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,5,14)).days)
dataset2['discount_man'] = dataset2.discount_rate.apply(get_discount_man)
dataset2['discount_jian'] = dataset2.discount_rate.apply(get_discount_jian)
dataset2['is_man_jian'] = dataset2.discount_rate.apply(is_man_jian)
dataset2['discount_rate'] = dataset2.discount_rate.apply(calc_discount_rate)
d = dataset2[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset2 = pd.merge(dataset2,d,on='coupon_id',how='left')
dataset2.to_csv('data/coupon2_feature.csv',index=None)
#dataset1
dataset1['day_of_week'] = dataset1.date_received.astype('str').apply(lambda x:date(int(x[0:4]),int(x[4:6]),int(x[6:8])).weekday()+1)
dataset1['day_of_month'] = dataset1.date_received.astype('str').apply(lambda x:int(x[6:8]))
dataset1['days_distance'] = dataset1.date_received.astype('str').apply(lambda x:(date(int(x[0:4]),int(x[4:6]),int(x[6:8]))-date(2016,4,13)).days)
dataset1['discount_man'] = dataset1.discount_rate.apply(get_discount_man)
dataset1['discount_jian'] = dataset1.discount_rate.apply(get_discount_jian)
dataset1['is_man_jian'] = dataset1.discount_rate.apply(is_man_jian)
dataset1['discount_rate'] = dataset1.discount_rate.apply(calc_discount_rate)
d = dataset1[['coupon_id']]
d['coupon_count'] = 1
d = d.groupby('coupon_id').agg('sum').reset_index()
dataset1 = pd.merge(dataset1,d,on='coupon_id',how='left')
dataset1.to_csv('data/coupon1_feature.csv',index=None)
############# merchant related feature #############
"""
1.merchant related:
total_sales. sales_use_coupon. total_coupon
coupon_rate = sales_use_coupon/total_sales.
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
"""
#for dataset3
merchant3 = feature3[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant3[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant3[merchant3.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant3[merchant3.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant3[(merchant3.date!='null')&(merchant3.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant3_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t2,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t3,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t5,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t6,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t7,on='merchant_id',how='left')
merchant3_feature = pd.merge(merchant3_feature,t8,on='merchant_id',how='left')
merchant3_feature.sales_use_coupon = merchant3_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature['merchant_coupon_transfer_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_coupon
merchant3_feature['coupon_rate'] = merchant3_feature.sales_use_coupon.astype('float') / merchant3_feature.total_sales
merchant3_feature.total_coupon = merchant3_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant3_feature.to_csv('data/merchant3_feature.csv',index=None)
#for dataset2
merchant2 = feature2[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant2[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant2[merchant2.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant2[merchant2.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant2[(merchant2.date!='null')&(merchant2.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant2_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t2,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t3,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t5,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t6,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t7,on='merchant_id',how='left')
merchant2_feature = pd.merge(merchant2_feature,t8,on='merchant_id',how='left')
merchant2_feature.sales_use_coupon = merchant2_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature['merchant_coupon_transfer_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_coupon
merchant2_feature['coupon_rate'] = merchant2_feature.sales_use_coupon.astype('float') / merchant2_feature.total_sales
merchant2_feature.total_coupon = merchant2_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant2_feature.to_csv('data/merchant2_feature.csv',index=None)
#for dataset1
merchant1 = feature1[['merchant_id','coupon_id','distance','date_received','date']]
t = merchant1[['merchant_id']]
t.drop_duplicates(inplace=True)
t1 = merchant1[merchant1.date!='null'][['merchant_id']]
t1['total_sales'] = 1
t1 = t1.groupby('merchant_id').agg('sum').reset_index()
t2 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id']]
t2['sales_use_coupon'] = 1
t2 = t2.groupby('merchant_id').agg('sum').reset_index()
t3 = merchant1[merchant1.coupon_id!='null'][['merchant_id']]
t3['total_coupon'] = 1
t3 = t3.groupby('merchant_id').agg('sum').reset_index()
t4 = merchant1[(merchant1.date!='null')&(merchant1.coupon_id!='null')][['merchant_id','distance']]
t4.replace('null',-1,inplace=True)
t4.distance = t4.distance.astype('int')
t4.replace(-1,np.nan,inplace=True)
t5 = t4.groupby('merchant_id').agg('min').reset_index()
t5.rename(columns={'distance':'merchant_min_distance'},inplace=True)
t6 = t4.groupby('merchant_id').agg('max').reset_index()
t6.rename(columns={'distance':'merchant_max_distance'},inplace=True)
t7 = t4.groupby('merchant_id').agg('mean').reset_index()
t7.rename(columns={'distance':'merchant_mean_distance'},inplace=True)
t8 = t4.groupby('merchant_id').agg('median').reset_index()
t8.rename(columns={'distance':'merchant_median_distance'},inplace=True)
merchant1_feature = pd.merge(t,t1,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t2,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t3,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t5,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t6,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t7,on='merchant_id',how='left')
merchant1_feature = pd.merge(merchant1_feature,t8,on='merchant_id',how='left')
merchant1_feature.sales_use_coupon = merchant1_feature.sales_use_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature['merchant_coupon_transfer_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_coupon
merchant1_feature['coupon_rate'] = merchant1_feature.sales_use_coupon.astype('float') / merchant1_feature.total_sales
merchant1_feature.total_coupon = merchant1_feature.total_coupon.replace(np.nan,0) #fillna with 0
merchant1_feature.to_csv('data/merchant1_feature.csv',index=None)
############# user related feature #############
"""
3.user related:
count_merchant.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
buy_use_coupon/buy_total
user_date_datereceived_gap
"""
def get_user_date_datereceived_gap(s):
s = s.split(':')
return (date(int(s[0][0:4]),int(s[0][4:6]),int(s[0][6:8])) - date(int(s[1][0:4]),int(s[1][4:6]),int(s[1][6:8]))).days
#for dataset3
user3 = feature3[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user3[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user3[user3.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user3[(user3.date!='null')&(user3.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user3[user3.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user3[user3.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user3[(user3.date_received!='null')&(user3.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user3_feature = pd.merge(t,t1,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t3,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t4,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t5,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t6,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t7,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t8,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t9,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t11,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t12,on='user_id',how='left')
user3_feature = pd.merge(user3_feature,t13,on='user_id',how='left')
user3_feature.count_merchant = user3_feature.count_merchant.replace(np.nan,0)
user3_feature.buy_use_coupon = user3_feature.buy_use_coupon.replace(np.nan,0)
user3_feature['buy_use_coupon_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.buy_total.astype('float')
user3_feature['user_coupon_transfer_rate'] = user3_feature.buy_use_coupon.astype('float') / user3_feature.coupon_received.astype('float')
user3_feature.buy_total = user3_feature.buy_total.replace(np.nan,0)
user3_feature.coupon_received = user3_feature.coupon_received.replace(np.nan,0)
user3_feature.to_csv('data/user3_feature.csv',index=None)
#for dataset2
user2 = feature2[['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']]
t = user2[['user_id']]
t.drop_duplicates(inplace=True)
t1 = user2[user2.date!='null'][['user_id','merchant_id']]
t1.drop_duplicates(inplace=True)
t1.merchant_id = 1
t1 = t1.groupby('user_id').agg('sum').reset_index()
t1.rename(columns={'merchant_id':'count_merchant'},inplace=True)
t2 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id','distance']]
t2.replace('null',-1,inplace=True)
t2.distance = t2.distance.astype('int')
t2.replace(-1,np.nan,inplace=True)
t3 = t2.groupby('user_id').agg('min').reset_index()
t3.rename(columns={'distance':'user_min_distance'},inplace=True)
t4 = t2.groupby('user_id').agg('max').reset_index()
t4.rename(columns={'distance':'user_max_distance'},inplace=True)
t5 = t2.groupby('user_id').agg('mean').reset_index()
t5.rename(columns={'distance':'user_mean_distance'},inplace=True)
t6 = t2.groupby('user_id').agg('median').reset_index()
t6.rename(columns={'distance':'user_median_distance'},inplace=True)
t7 = user2[(user2.date!='null')&(user2.coupon_id!='null')][['user_id']]
t7['buy_use_coupon'] = 1
t7 = t7.groupby('user_id').agg('sum').reset_index()
t8 = user2[user2.date!='null'][['user_id']]
t8['buy_total'] = 1
t8 = t8.groupby('user_id').agg('sum').reset_index()
t9 = user2[user2.coupon_id!='null'][['user_id']]
t9['coupon_received'] = 1
t9 = t9.groupby('user_id').agg('sum').reset_index()
t10 = user2[(user2.date_received!='null')&(user2.date!='null')][['user_id','date_received','date']]
t10['user_date_datereceived_gap'] = t10.date + ':' + t10.date_received
t10.user_date_datereceived_gap = t10.user_date_datereceived_gap.apply(get_user_date_datereceived_gap)
t10 = t10[['user_id','user_date_datereceived_gap']]
t11 = t10.groupby('user_id').agg('mean').reset_index()
t11.rename(columns={'user_date_datereceived_gap':'avg_user_date_datereceived_gap'},inplace=True)
t12 = t10.groupby('user_id').agg('min').reset_index()
t12.rename(columns={'user_date_datereceived_gap':'min_user_date_datereceived_gap'},inplace=True)
t13 = t10.groupby('user_id').agg('max').reset_index()
t13.rename(columns={'user_date_datereceived_gap':'max_user_date_datereceived_gap'},inplace=True)
user2_feature = pd.merge(t,t1,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t3,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t4,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t5,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t6,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t7,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t8,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t9,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t11,on='user_id',how='left')
user2_feature = pd.merge(user2_feature,t12,on='user_id',how='left')
user2_feature = | pd.merge(user2_feature,t13,on='user_id',how='left') | pandas.merge |
import requests
from bs4 import BeautifulSoup
import pandas as pd
pages=list(range(0,250,25))
def request_douban(url):
htmls=[]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36 Edg/96.0.1054.62'
}
try:
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
return response.text
except requests.RequestException:
return None
def get_htmls():
htmls=[]
for idx in pages:
htmls.append(request_douban(f"https://movie.douban.com/top250?start={idx}&filter="))
return htmls
def save_results():
results = {
'排名': [],
'名称': [],
'主演': [],
}
htmls=get_htmls()
for html in htmls:
soup=BeautifulSoup(html,'html.parser')
for tag in soup.find_all(attrs={'class':'item'}):
rank=tag.find('em').string
name=tag.find('span',class_='title').string
author=tag.p.get_text()
results['排名'].append(rank)
results['名称'].append(name)
results['主演'].append(author)
print(rank,' | ',name,' | ',author)
df= | pd.DataFrame(results) | pandas.DataFrame |
# coding: utf8
import os
import numpy as np
from tqdm import tqdm
import pandas as pd
import time
from upsetplot import plot
from matplotlib import pyplot
from scipy import stats
from intervaltree import IntervalTree
def PeakOverlap(genesfile, peaksfile,tssdistance=[0,0],peakname='null'):
LuckPeak, LuckGen, LuckTree, LuckBegin , Genlist = {},{},{},{},{}
####### CREATE A INTERVALTREE VARIABLE
tree = IntervalTree()
n = 0
m = 0
intergenic = set()
intergenic_output = {}
for lines in open(peaksfile):
fields = lines.split()
namegain, chromogain, begingain, endgain = fields[3], fields[0], int(fields[1]), int(fields[2])
space4, space5 = fields[4], fields[5]
LuckPeak[namegain] = [chromogain, begingain, endgain, namegain, space4, space5]
LuckBegin[begingain] = [namegain,begingain,endgain]
intergenic = intergenic|set([namegain])
if chromogain not in LuckTree:
print('Chromosome '+chromogain+' of ' +peakname+'...')
LuckTree[chromogain] = 0
if n == 1:
for lines2 in open(genesfile):
fields2 = lines2.split()
if fields2[0] != k:
continue
else:
nameid = fields2[3]
begingen = int(fields2[1]) - tssdistance[0]
endgen = int(fields2[2]) + tssdistance[1]
chromogen = fields2[0]
strand = fields2[5]
if tree.overlap(begingen, endgen) != set():
for x in tree.overlap(begingen, endgen):
LuckGen[m] = [chromogen] + [fields2[1]] + [fields2[2]] + [nameid] + [strand] + LuckBegin[x.begin]
intergenic = intergenic - set([LuckBegin[x.begin][0]])
m+=1
else:
tree[begingain:endgain] = (begingain, endgain)
n = 1
### RESET THE TREE EACH TIME BEFORE START A NEW CHROMOSOME
tree = IntervalTree()
tree[begingain:endgain] = (begingain, endgain)
### get all the peaks of the chromosome to fill the tree until the next item of the field is another chromosome. Then start to compare all items of the tree with all the genes int he same chromosome
else:
k = chromogain
tree[begingain:endgain] = (begingain,endgain)
for lines2 in open(genesfile):
fields2 = lines2.split()
if fields2[0] != k:
continue
else:
nameid = fields2[3]
begingen = int(fields2[1]) - tssdistance[0]
endgen = int(fields2[2]) + tssdistance[1]
chromogen = fields2[0]
strand = fields2[5]
if tree.overlap(begingen, endgen) != set():
for x in tree.overlap(begingen, endgen):
LuckGen[m] = [chromogen] + [fields2[1]] + [fields2[2]] + [nameid] + [strand] + LuckBegin[x.begin]
intergenic = intergenic - set([LuckBegin[x.begin][0]])
m += 1
for x in intergenic:
intergenic_output[x] = LuckPeak[x]
### OUTPUT
if not os.path.exists(peakname):
os.makedirs(peakname)
if len(intergenic) == 0:
print('No Intergenic peaks')
else:
results_intragenic = pd.DataFrame(list(intergenic_output.values())).sort_values(by=[0])
results_intragenic.to_csv('./' + peakname + '/' + peakname + '_intergenic.bed', index=None, sep='\t', header=False)
results = pd.DataFrame(list(LuckGen.values()))
results.to_csv('./' + peakname + '/' + peakname + 'PeaksInGenes', index=None, sep='\t', header= False)
return ('./' + peakname + '/' + peakname + 'PeaksInGenes',
'./' + peakname + '/' + peakname + '_intergenic.bed')
def CheckExpression(peakgenes, transcribe, limit=0.5,peakname='null',TSSTTSdistance=[0,0]):
overlap_expres_trans, peak_overlap_trans = [], []
# LOAD THE TRANSCRIPT/GENES DATA
tic = time.clock()
transcript_info = {}
count_lines = 0
print('Checking Transcripts/genes overlapped by peaks and selecting the best match')
if transcribe != None:
if os.stat(transcribe).st_size == 0:
return print('Expression file is empty')
for line in open(transcribe, "r"):
fields = line.split()
if len(fields) != 2:
print(fields[0])
return print('Please input a correct expression file')
try:
FPKM = float(fields[1])
pass
except:
count_lines +=1
if count_lines != 1:
return print('Please input a correct expression file')
continue
trascr_id = fields[0]
transcript_info[trascr_id] = [FPKM]
else:
for line in open(peakgenes, "r"):
fields = line.split()
FPKM = float(0)
trascr_id = fields[3]
transcript_info[trascr_id] = [FPKM]
# LOAD THE PEAK-IN-GENE DATA
transcr_id_not_found = 0
peaks_to_transcripts = {}
total = 0
for line in open(peakgenes, "r"):
total += 1
chrom, transcr_start, transcr_end, transcr_id, strand, peak_name, peak_start, peak_end = line.split()
if peak_name not in peaks_to_transcripts:
peaks_to_transcripts[peak_name] = []
if transcr_id not in transcript_info:
transcr_id_not_found += 1
continue
transcript_data = transcript_info[transcr_id]
peaks_to_transcripts[peak_name].append(transcript_data + [chrom, transcr_start, transcr_end, transcr_id, strand, peak_start, peak_end])
print('Files Loaded')
best_transcript_under = {}
best_transcript_over = {}
nobest_transcript = {}
over_over = 0
over_only = 0
under_over = 0
under_only = 0
for peak in peaks_to_transcripts:
transcripts = peaks_to_transcripts[peak]
fpkm = [x[0] for x in transcripts]
maxfpkm = max(fpkm)
indices = [i for i, x in enumerate(fpkm) if x == maxfpkm]
### IF MAX OF FPKM IS LESS THAN THE LIMIT, USE THE QUANTITTY OF OVERLAPPING BASES AS A SELECTIVE PROCEDURE
if maxfpkm < limit and len(fpkm) > 1:
under_over+=1
overlapbases = []
for x in transcripts:
beginpeak = int(x[6])
endpeak = int(x[7])
begingen = int(x[2]) - TSSTTSdistance[0]
endgen = int(x[3]) + TSSTTSdistance[1]
if beginpeak in range(begingen, endgen) and endpeak not in range(begingen, endgen):
overlapbases.append(endgen - beginpeak)
elif beginpeak in range(begingen, endgen) and endpeak in range(begingen, endgen):
overlapbases.append(endpeak - beginpeak)
elif beginpeak not in range(begingen, endgen) and endpeak in range(begingen, endgen):
overlapbases.append(endpeak - begingen)
else:
overlapbases.append(endgen - begingen)
best_transcript_under[peak] = transcripts[overlapbases.index(max(overlapbases))]
indices_max = [j for j, m in enumerate(overlapbases) if m == max(overlapbases)]
if len(indices_max) > 1:
for y in indices_max:
if peak not in nobest_transcript:
nobest_transcript[peak] = [transcripts[y][5]]
else:
nobest_transcript[peak] = nobest_transcript[peak] + [transcripts[y][5]]
### IF THERE IS MORE THAN 1 MAX, SELECT THE ONE THAT HAS MORE OVERLAPPING BASES
elif len(indices) > 1:
## SAVE TRANSCRIPTS THAT ARE EXPRESSED (OVER FPKM)
overlapbases = []
aux_trans = [0 for x in range(len(indices))]
n=0
peak_overlap_trans.append(peak)
overlap_expres_trans = []
for x in indices:
overlap_expres_trans = overlap_expres_trans + [transcripts[x]]
aux_trans[n] = transcripts[x]
beginpeak = int(aux_trans[n][6])
endpeak = int(aux_trans[n][7])
begingen = int(aux_trans[n][2]) - TSSTTSdistance[0]
endgen = int(aux_trans[n][3]) + TSSTTSdistance[1]
if beginpeak in range(begingen, endgen) and endpeak not in range(begingen, endgen):
overlapbases.append(endgen - beginpeak)
elif beginpeak in range(begingen, endgen) and endpeak in range(begingen, endgen):
overlapbases.append(endpeak - beginpeak)
elif beginpeak not in range(begingen, endgen) and endpeak in range(begingen, endgen):
overlapbases.append(endpeak - begingen)
else:
overlapbases.append(endgen - begingen)
n+=1
if max(overlapbases) > limit:
over_over += 1
best_transcript_over[peak] = aux_trans[overlapbases.index(max(overlapbases))]
else:
best_transcript_under[peak] = aux_trans[overlapbases.index(max(overlapbases))]
under_over += 1
else:
max_trans = max(transcripts, key=lambda x: x[0])
if max_trans[0] > limit:
best_transcript_over[peak] = max_trans
if len(fpkm) > 1:
over_over += 1
else:
over_only += 1
else:
best_transcript_under[peak] = max_trans
if len(fpkm) > 1:
under_over += 1
else:
under_only += 1
toc = time.clock()
print('Done in ' + str(round(toc - tic, 2)) + ' sec. Printing results')
### ORDER THE RESULTS FOR OUTPUT AND SAVE THEM IN A FILE
if len(best_transcript_under) > 0:
best_transcript_under = pd.DataFrame(best_transcript_under).transpose()
best_transcript_under['Peak_Name'] = best_transcript_under.index
best_transcript_under.columns=['FPKM','Chromo', 'Gen_Start','Gen_End','Gen_TransID','Strand','Peak_Start','Peak_End','Peak_Name']
best_transcript_under = best_transcript_under[['Chromo','Gen_Start','Gen_End','Gen_TransID','Strand','Peak_Name','Peak_Start','Peak_End','FPKM']]
best_transcript_under.to_csv('./' + peakname + '/NotExpressed_' + peakname, index=None, sep='\t')
if len(best_transcript_over) > 0:
best_transcript_over = pd.DataFrame(best_transcript_over).transpose()
best_transcript_over['Peak_Name'] = best_transcript_over.index
best_transcript_over.columns = ['FPKM', 'Chromo', 'Gen_Start', 'Gen_End', 'Gen_TransID', 'Strand',
'Peak_Start', 'Peak_End', 'Peak_Name']
best_transcript_over = best_transcript_over[
['Chromo', 'Gen_Start', 'Gen_End', 'Gen_TransID', 'Strand', 'Peak_Name', 'Peak_Start', 'Peak_End', 'FPKM']]
best_transcript_over.to_csv('./' + peakname + '/Expressed_' + peakname, index=None, sep='\t')
print(' Number of peaks: ' + str(len(peaks_to_transcripts)))
print(' There are ' + str(over_over) + ' peaks with overlapping genes and over the expression threshold \n There are ' + str(under_over) + ' peaks with overlapping genes and under the expression threshold'
+ '\n There are ' + str(
over_only) + ' peaks with no overlapping genes and over the expression threshold \n There are ' + str(
under_only) + ' peaks with no overlapping genes and under the expression threshold')
print('Total peaks overlapping transcripts/genes: ' + str(over_over+over_only+under_over+under_only))
return ('./' + peakname + '/Expressed_' + peakname, './' + peakname + '/NotExpressed_' + peakname)
def TableCreator(peakexon,namepeak='null',lap='null',peaksingenes = 'null', warnings = False):
exonchromo,exonbegin, exonend, trans_name,exondirection,exontype,peakname,peakchromo,peakbegin, peakend,begingen, endgen=[[],[],[],[],[],[],[],[],[],[],[],[]]
score, upstream_warning_peak, upstream_warning_tran, upstream_warning_chrom, upstream_warning_peak_start, upstream_warning_peak_end, upstream_warning_FPKM, upstream_warning_direction = [],[],[],[],[],[],[],[]
downstream_warning_peak, downstream_warning_tran, downstream_warning_chrom, downstream_warning_peak_start, downstream_warning_peak_end, downstream_warning_FPKM, downstream_warning_direction = [],[],[],[],[],[],[]
data=open(peakexon)
a=1
for lines in data.readlines():
if a>1:
exonchromo.append(lines.split('\t')[0])
exonbegin.append(int(lines.split('\t')[9]))
exonend.append(int(lines.split('\t')[10]))
trans_name.append(lines.split('\t')[3])
peakend.append(int(lines.split('\t')[7]))
peakbegin.append(int(lines.split('\t')[6]))
exondirection.append(lines.split('\t')[4])
exontype.append(lines.split('\t')[11].replace('\n',''))
peakname.append(lines.split('\t')[5])
score.append(lines.split('\t')[8])
peakchromo.append(lines.split('\t')[0])
begingen.append(int(lines.split('\t')[1]))
endgen.append(int(lines.split('\t')[2]))
a+=1
data.close()
begingen,endgen=[np.array(begingen),np.array(endgen)]
exonbegin, exonend, peakbegin, peakend, exondirection = [np.array(exonbegin),np.array(exonend),np.array(peakbegin),np.array(peakend),np.array(exondirection)]
countpeakexons, countpeakintrons, countpeak5UTR, countpeak3UTR,countpeakTSS, countpeakTTS =[[],[],[],[],[],[]]
score=np.array(score)
matrixlong=-1
resnamegen, reschromo, resdirection,respeakstart,respeakends,respeakname =[[],[],[],[],[],[]]
TTSwarning, TSSwarning = [],[]
trans_name,exontype = np.array(trans_name),np.array(exontype)
peakname, exonchromo = [np.array(peakname), np.array(exonchromo)]
resscore=[]
uniquepeak=np.unique(peakname)
for peak in tqdm(range(len(uniquepeak)), desc='TableCreator'):
indices = np.where(peakname==uniquepeak[peak])
reschromo.append(exonchromo[indices][0])
resnamegen.append(trans_name[indices][0])
resdirection.append(exondirection[indices][0])
respeakstart.append(peakbegin[indices][0])
respeakends.append(peakend[indices][0])
respeakname.append(uniquepeak[peak])
resscore.append(score[indices][0])
matrixlong += 1
countpeak5UTR.append(0)
countpeak3UTR.append(0)
countpeakexons.append(0)
countpeakintrons.append(0)
countpeakTSS.append(0)
countpeakTTS.append(0)
for indice in range(len(trans_name[indices])):
if ((peakbegin[indices][indice] <= begingen[indices][indice] and exondirection[indices][indice] == '+') or (
peakend[indices][indice] >= endgen[indices][indice] and exondirection[indices][indice] == '-'))and countpeakTSS[matrixlong] != 1:
countpeakTSS[matrixlong] = 1
if ((peakbegin[indices][indice] <= begingen[indices][indice] and exondirection[indices][indice] == '-') or (
peakend[indices][indice] >= endgen[indices][indice] and exondirection[indices][indice] == '+')) and countpeakTTS[matrixlong] != 1:
countpeakTTS[matrixlong] = 1
if (exonbegin[indices][indice] <= peakbegin[indices][indice] <= exonend[indices][indice] or exonbegin[indices][indice] <= peakend[indices][indice] <= exonend[indices][indice] or (
exonbegin[indices][indice] >= peakbegin[indices][indice] and peakend[indices][indice] >= exonend[indices][indice])):
if exontype[indices][indice]=='5UTR' and countpeak5UTR[matrixlong] != 1:
countpeak5UTR[matrixlong] = 1
elif exontype[indices][indice]=='3UTR' and countpeak3UTR[matrixlong] != 1:
countpeak3UTR[matrixlong] = 1
elif countpeakexons[matrixlong] != 1:
countpeakexons[matrixlong] = 1
try:
if ((exonend[indices][indice] <= peakbegin[indices][indice] <= exonbegin[indices][indice+1] or exonend[indices][indice] <= peakend[indices][indice] <= exonbegin[indices][indice+1] or (exonend[indices][indice] >= peakbegin[indices][indice] and peakend[indices][indice] >= exonbegin[indices][indice]))) and countpeakintrons[matrixlong] != 1:
countpeakintrons[matrixlong] = 1
except:
pass
if countpeakintrons[matrixlong] == 0 and countpeak5UTR[matrixlong] == 0 and countpeak3UTR[matrixlong] == 0 and countpeakexons[matrixlong] == 0 and countpeakTSS[matrixlong] == 0 and countpeakTTS[matrixlong] == 0:
countpeakintrons[matrixlong] = 1
if countpeakexons[matrixlong] == 0 and countpeakintrons[matrixlong] == 0 and countpeak3UTR[matrixlong] == 0 and countpeak5UTR[matrixlong] == 0:
if countpeakTSS[matrixlong] == 1:
upstream_warning_peak.append(uniquepeak[peak])
upstream_warning_tran.append(trans_name[indices][0])
upstream_warning_chrom.append(exonchromo[indices][0])
upstream_warning_direction.append(exondirection[indices][0])
upstream_warning_FPKM.append(score[indices][0])
upstream_warning_peak_end.append(peakbegin[indices][0])
upstream_warning_peak_start.append(peakend[indices][0])
TSSwarning.append(True)
TTSwarning.append(False)
elif countpeakTTS[matrixlong] == 1:
downstream_warning_peak.append(uniquepeak[peak])
downstream_warning_tran.append(trans_name[indices][0])
downstream_warning_chrom.append(exonchromo[indices][0])
downstream_warning_direction.append(exondirection[indices][0])
downstream_warning_FPKM.append(score[indices][0])
downstream_warning_peak_end.append(peakbegin[indices][0])
downstream_warning_peak_start.append(peakend[indices][0])
TTSwarning.append(True)
TSSwarning.append(False)
else:
TTSwarning.append(False)
TSSwarning.append(False)
else:
TTSwarning.append(False)
TSSwarning.append(False)
##### Look peaks with antisense transcripts #####
peaks_dir, peaks_inv_dir, peaks_inv_tran, peaks_tran = {},{},{},{}
for lines in open(peaksingenes):
fields = lines.split()
if fields[5] in peaks_tran.keys():
if fields[4] not in peaks_dir[fields[5]]:
peaks_tran.update({fields[5]: peaks_tran[fields[5]]+[fields[3]]})
peaks_dir.update({fields[5]: peaks_dir[fields[5]]+[fields[4]]})
peaks_inv_tran[fields[5]] = peaks_tran[fields[5]]
peaks_inv_dir[fields[5]] = peaks_dir[fields[5]]
else:
peaks_tran.update({fields[5]: peaks_tran[fields[5]]+[fields[3]]})
peaks_dir.update({fields[5]: peaks_dir[fields[5]]+[fields[4]]})
else:
peaks_dir.update({fields[5]: [fields[4]]})
peaks_tran.update({fields[5]: [fields[3]]})
antisense_warning = []
for peak in respeakname:
if peak in peaks_inv_tran.keys():
antisense_warning.append('True')
else:
antisense_warning.append('False')
###### -~- ######
if not os.path.exists(namepeak):
os.makedirs(namepeak)
##### Create the files with the Upstream and Downstream warnings ######
if warnings == True:
results=open('./'+namepeak+'/'+namepeak+'_Upstream_Warnings','w')
results.write('Chromo'+'\t'+'Peak_Start'+'\t'+'Peak_End'+'\t'+'Peak_Name'+'\t'+'ID_transc'+'\t'+'FPKM'+'\t'+'Direction'+'\n')
for x in range(len(upstream_warning_peak)):
results.write(upstream_warning_chrom[x] + '\t' + str(upstream_warning_peak_start[x]) + '\t' + str(upstream_warning_peak_end[x]) +'\t' + str(upstream_warning_peak[x]) +'\t' + upstream_warning_tran[x] +'\t' + str(upstream_warning_FPKM[x]) +'\t' + upstream_warning_direction[x] + "\n")
results.close()
results=open('./'+namepeak+'/'+namepeak+'_Downstream_Warnings','w')
results.write('Chromo'+'\t'+'Peak_Start'+'\t'+'Peak_End'+'\t'+'Peak_Name'+'\t'+'ID_transc'+'\t'+'FPKM'+'\t'+'Direction'+'\n')
for x in range(len(downstream_warning_peak)):
results.write(downstream_warning_chrom[x] + '\t' + str(downstream_warning_peak_start[x]) + '\t' + str(downstream_warning_peak_end[x]) +'\t' + downstream_warning_peak[x] +'\t' + downstream_warning_tran[x] +'\t' + str(downstream_warning_FPKM[x]) +'\t' + downstream_warning_direction[x] + "\n")
results.close()
results=open('./'+namepeak+'/'+namepeak+'_Antisense_Warnings','w')
results.write('Peak'+'\t'+'transcripts' + '\t' + 'Strand' + '\n')
for x in peaks_inv_tran.keys():
results.write(x + '\t' + str(peaks_tran[x])[1:-1][0:] + "\t" + str(peaks_dir[x])[1:-1][0:] + "\n")
results.close()
##### -~- #####
results=open('./'+namepeak+'/'+namepeak+"_"+lap+'_Annotation.table','w')
results.write('Chromo'+'\t'+'Peak_Start'+'\t'+'Peak_End'+'\t'+'Peak_Name'+'\t'+'ID_transc'+'\t'+'FPKM'+'\t'+'Direction'+'\t'+'TSS'+'\t'+'5UTR'+'\t'+'Exons'+'\t'+'Introns'+'\t'+'3UTR'+'\t'+'TTS'+'\t'+'Warning_Upstream'+'\t'+'Warning_downstream'+'\t'+'Warning_Antisense'+'\t'+'\n')
for res in range(len(resnamegen)):
results.write(reschromo[res]+'\t'+str(respeakstart[res])+'\t'+str(respeakends[res])+'\t'+respeakname[res]+'\t'+resnamegen[res]+'\t'+str(resscore[res])+'\t'+resdirection[res]+'\t'+str(countpeakTSS[res])+'\t'+str(countpeak5UTR[res])+'\t'+str(countpeakexons[res])+'\t'+str(countpeakintrons[res])+'\t'+str(countpeak3UTR[res])+'\t'+str(countpeakTTS[res])+'\t'+str(TSSwarning[res])+'\t'+str(TTSwarning[res])+'\t'+str(antisense_warning[res])+'\t'+'\n')
results.close()
return('./'+namepeak+'/'+namepeak+"_"+lap+'_Annotation.table')
# This program take the list of exons that are in the genes and put them in a new list where the each row is a exon with the name of the file
# Need a bed file and for the 2 inputs, been one the list of the genes with the exons size and position and the other the clean list of gain or lost genes.
def FeatureAssign(cleanpeaklist,UTR5='5UTR.bed',UTR3='3UTR.bed',CDE='CDE.bed',peakname='null',lap='null'):
print('FeatureAssign')
tic=time.clock()
peaktable=pd.read_csv(cleanpeaklist, sep="\t")
# Getting the 5UTR data and merging with the table of peaks in genes
UTR5table = pd.read_csv(UTR5, header=None, sep="\t")
UTR5table = UTR5table.rename(columns={0:'Chromo',1: 'BeginExon',2: 'EndExon',3: 'Gen_TransID'})
del UTR5table[4],UTR5table[5],UTR5table['Chromo']
mergedUTR5peak = pd.merge(peaktable, UTR5table, left_on='Gen_TransID', right_on='Gen_TransID', how='inner')
mergedUTR5peak['Type'] = pd.Series('5UTR', index=mergedUTR5peak.index)
# Getting the 3UTR data and merging with the table of peaks in genes
UTR3table = pd.read_csv(UTR3, header=None, sep="\t")
UTR3table = UTR3table.rename(columns={0:'Chromo',1: 'BeginExon',2: 'EndExon',3: 'Gen_TransID'})
del UTR3table[4],UTR3table[5],UTR3table['Chromo']
mergedUTR3peak = | pd.merge(peaktable, UTR3table, left_on='Gen_TransID', right_on='Gen_TransID', how='inner') | pandas.merge |
import dataclasses
from collections import namedtuple
from copy import deepcopy, copy
from typing import NoReturn
import numpy as np
import pandas as pd
from numpy import datetime64
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from IMLearn import BaseEstimator
from challenge.common_preproc_pipe_creator import CommonPreProcPipeCreator
from challenge.period_cancellation_estimator import PeriodCancellationEstimator
Period = namedtuple("Period", ("days_until", 'length'))
def get_response_for_period(train_data: pd.DataFrame, period: Period) -> pd.Series:
return (train_data.cancellation_datetime >=
train_data.booking_datetime + pd.DateOffset(day=period.days_until)) & \
(train_data.cancellation_datetime <=
train_data.booking_datetime + pd.DateOffset(day=period.days_until + period.length))
class ModelForPeriodExistsError(BaseException):
pass
class GeneralCancellationEstimator:
def __init__(self, period_length: int, cancellation_period_start=None):
super().__init__()
self._models = {}
self.cancellation_period_start = cancellation_period_start
self.__def_days_until_cancellation = 0
self.__period_length = period_length
def _get_days_until_cancellation_period(self, data_row: pd.Series):
return (self.cancellation_period_start - data_row.booking_datetime).days \
if self.cancellation_period_start is not None else self.__def_days_until_cancellation
def add_model(self, X: np.ndarray, y: np.ndarray, pipe: Pipeline, period: Period, threshold=0.5):
if period.days_until in self._models:
raise ModelForPeriodExistsError(f'There already exists a model with {period.days_until} '
f'days until the start of the relevant cancellation period.')
assert period.length == self.__period_length, \
f'Error: estimator only deals with periods of length {self.__period_length}.'
train_X = pipe.transform(X)
model_estimator = PeriodCancellationEstimator(threshold).fit(train_X, y)
pipe.steps.append(('estimator', model_estimator))
self._models[period.days_until] = pipe
def predict(self, X: pd.DataFrame) -> pd.Series:
periods = X.apply(self._get_days_until_cancellation_period, axis='columns')
return X.groupby(periods, as_index=False) \
.apply(lambda data: pd.Series(self._models[data.name].predict(data), index=data.index)) \
.droplevel(0, axis='index').sort_index()
def test_models(self, test_data: pd.DataFrame):
for days_until_canc_period_start in self._models:
self.__def_days_until_cancellation = days_until_canc_period_start
test_data_resp = get_response_for_period(test_data,
Period(days_until_canc_period_start, self.__period_length))
model_score = self._models[days_until_canc_period_start].score(test_data, test_data_resp)
print(f'Score for model with {days_until_canc_period_start} days until start'
f' of cancellation period: {model_score:.3f}')
def loss(self, X: np.ndarray, y: np.ndarray) -> float:
raise NotImplementedError
@dataclasses.dataclass
class GeneralCancellationEstimatorBuilder:
period_length: int
min_days_until_cancellation_period: int
max_days_until_cancellation_period: int
__NONE_OUTPUT_COLUMNS = ['checkin_date',
'checkout_date',
'booking_datetime',
'hotel_live_date',
'hotel_country_code',
'origin_country_code',
'cancellation_policy_code']
__CATEGORICAL_COLUMNS = ['hotel_star_rating',
'guest_nationality_country_name',
'charge_option',
'accommadation_type_name',
'language',
'is_first_booking',
'customer_nationality',
'original_payment_currency',
'is_user_logged_in',
]
__RELEVANT_COLUMNS = ['no_of_adults',
'no_of_children',
'no_of_extra_bed',
'no_of_room',
'original_selling_amount'] + __NONE_OUTPUT_COLUMNS + __CATEGORICAL_COLUMNS
__COL_TYPE_CONVERSIONS = {'checkout_date': 'datetime64',
'checkin_date': 'datetime64',
'hotel_live_date': 'datetime64',
'booking_datetime': 'datetime64'}
def build_pipeline(self, train_data: pd.DataFrame,
cancellation_period_start: np.datetime64) -> GeneralCancellationEstimator:
base_pipe = self.__create_common_preproc_pipeline()
general_estimator = GeneralCancellationEstimator(self.period_length, cancellation_period_start)
for days_until_cancellation_period in range(self.min_days_until_cancellation_period,
self.max_days_until_cancellation_period + 1):
print(f'Creating model for {days_until_cancellation_period} days until cancellation.')
preproc_pipe = deepcopy(base_pipe)
period = Period(days_until_cancellation_period, self.period_length)
train_data['cancelled_in_period'] = get_response_for_period(train_data.astype(self.__COL_TYPE_CONVERSIONS),
period)
preproc_pipe = self.__add_period_dependent_preproc_to_pipe(preproc_pipe, train_data)
general_estimator.add_model(train_data.drop('cancelled_in_period', axis='columns'),
train_data.cancelled_in_period, preproc_pipe, period)
return general_estimator
@classmethod
def __create_common_preproc_pipeline(cls) -> Pipeline:
return CommonPreProcPipeCreator.build_pipe(cls.__RELEVANT_COLUMNS)
@classmethod
def __add_period_dependent_preproc_to_pipe(cls, preproc_pipe: Pipeline, train_data: pd.DataFrame) -> Pipeline:
preproc_pipe = cls.__add_categorical_prep_to_pipe(train_data, preproc_pipe, cls.__CATEGORICAL_COLUMNS)
preproc_pipe.steps.append(('drop irrelevant columns',
FunctionTransformer(lambda df: df.drop(cls.__NONE_OUTPUT_COLUMNS, axis='columns'))))
return preproc_pipe
@classmethod
def __add_categorical_prep_to_pipe(cls, train_features: pd.DataFrame, pipeline: Pipeline, cat_vars: list,
one_hot=False, calc_probs=True) -> Pipeline:
assert one_hot ^ calc_probs, \
'Error: can only do either one-hot encoding or probability calculations, not neither/both!'
# one-hot encoding
if one_hot:
# TODO - use sklearn OneHotEncoder
pipeline.steps.append(('one-hot encoding',
FunctionTransformer(lambda df: | pd.get_dummies(df, columns=cat_vars) | pandas.get_dummies |
#! /usr/bin/env python3
"""My Podcaster."""
import datetime
import email.utils
from subprocess import call, check_output
import mimetypes
import os
import re
import shutil
import socket
import urllib.error
import urllib.request
import requests
import tqdm
import random
import signal
from Podcast import Podcast
import configparser
from prompt_toolkit import print_formatted_text, HTML
import bs4
import pandas
random.seed(os.urandom(128))
mimetypes.init()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0) like Gecko",
}
podconfig = configparser.ConfigParser()
podconfig.read(os.path.abspath(os.path.expanduser("~/.podcasterrc")))
PODFILE = podconfig["default"]["podfile"]
BETTERRANDOM = str(podconfig["betterrandom"]["master"]).upper()
BETTERRANDOM_HISTCOUNT = int(podconfig["betterrandom"]["histcount"])
BETTERRANDOM_HIST = os.path.abspath(
os.path.expanduser(podconfig["betterrandom"]["file"]),
)
TIMEOUT = int(podconfig["default"]["timeout"])
DOWNLOADDIR = os.path.abspath(os.path.expanduser(podconfig["default"]["downloaddir"]))
def write_history(pod, title):
"""Append history to a file."""
try:
PLAYED = pandas.read_csv(BETTERRANDOM_HIST, index_col=0)
except FileNotFoundError:
PLAYED = | pandas.DataFrame(columns=["Podcast", "Title"]) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": | pandas.StringDtype() | pandas.StringDtype |
import sys
import os
import traceback
from shapely.geometry import Point
import core.download as dlf
import pandas as pd
import geopandas as gpd
def err_to_parent(UDF):
def handling(connection, load, message):
try:
UDF(connection, load, message)
except Exception as e:
connection.send({"BREAK": e,
"TB": traceback.format_exc()})
handling.__name__ = UDF.__name__
return handling
@err_to_parent
def simple_groupby(connection, load, message):
"""
Return the result of a Pandas groupby method for
the load dataframe.
Inputs:
load (must be DataFrame or GeoDataFrame)
message['col_name'], the series to group by
message['groupby_method'], may be count, mean, sum, or median
Output: sends the result of the method back up the pipe
"""
if 'groupby_method' in message:
groupby_method = message['groupby_method']
else:
raise ValueError('groupby_method must be sum, count, mean, or median')
if 'col_name' not in message:
raise ValueError("message['col_name'] not defined")
if type(load).__name__ not in ['DataFrame', 'GeoDataFrame']:
raise TypeError('load must be DataFrame or GeoDataFrame, not {}'.format(
type(load).__name__))
valid_methods = ['sum', 'count', 'mean', 'median']
if groupby_method not in(valid_methods):
raise ValueError(
'groupby must be sum, count, mean, or median , not {}'.format(
groupby_method))
if groupby_method == 'sum':
grouped = load.groupby(message['col_name']).sum()
if groupby_method == 'count':
grouped = load.groupby(message['col_name']).count()
if groupby_method == 'mean':
grouped = load.groupby(message['col_name']).mean()
if groupby_method == 'median':
grouped = load.groupby(message['col_name']).median()
connection.send(grouped)
@err_to_parent
def points_from_latlong(connection, load, message):
"""
Render a lat and long column to shapely points.
Inputs: connection, load (a DataFrame or GeoDataFrame), message (see below)
Message:
message['lat'], the key/column name for lattitude listlike
message['long'], the key/column name for the longitude listlike
message['out_column'], name of new points column to be created
Output: sends the load back up the pipe with a new points column
"""
long = message['long']
lat = message['lat']
load[message['out_column']] = [Point(x, y) for x, y in zip(
load[long], load[lat]
)]
connection.send(load)
@err_to_parent
def col_to_datetime (connection, load=None, message=None):
#turn list of columns into a date time object
if type(load).__name__ == 'DataFrame':
if not isinstance(message['col_to_datetime'], list):
raise TypeError("message['col_to_datetime'] must be a list")
if message['col_to_datetime'] == []:
raise ValueError(
'List of columns to be converted to datetime object is empty')
for i in message['col_to_datetime']:
load[i] = pd.to_datetime(i)
connection.send(load)
@err_to_parent
def data_down(connection, load, message):
#A simple way to use the scheduler for downloading data
if 'urls' not in message:
raise ValueError("Required value 'message['urls']' missing!")
for i in message['urls']:
dlf.simple_download(i)
print("Sending filepath!")
connection.send("Finished Downloading")
@err_to_parent
def pd_simple_merge(connection, load, message):
"""
Perform a merge on two dataframes with the pandas merge.
The left DataFrame is always the load argument while the
right DataFrame is always constructed from a file.
Requires: Pandas
Inputs: connection, load (the left side,
a DataFrame), and message(see below)
Message:
message['path'], filepath for the right side of the join
message['merge_args'], dict of required args for joining
the DataFrames
message['DataFrame_right_args'], dict of
required args for constructing the DataFrame
from the path using the pd.DataFrame command
Output: send the merged load back up the pipe
"""
if type(load).__name__ != 'DataFrame':
raise ValueError("load must be a Pandas DataFrame not {}".format(
type(load).__name__))
left = load
right = pd.DataFrame(message['path'], **message['DataFrame_right_args'])
merged = pd.merge(left, right, **message['merge_args'])
@err_to_parent
def pandas_frame_file(connection, load, message):
"""Simply read a file into a dataframe and send it back up the pipe.
Input: filepath, filetype, read_args in message.
Note: If message['use_chunk'] is True, then the function
takes the filepath from the scheduler's current chunk
Output: Sends a dataframe back up the pipe."""
if 'use_chunk' in message:
if message['use_chunk']:
filepath = message['chunk']
elif 'filepath' in message:
filepath = message['filepath']
try:
os.path.exists(filepath)
except Exception as e:
raise e
#Pick a reader and send back the df
df = _pd_reader_pick(message)
connection.send(df)
@err_to_parent
def simple_gpd_sjoin(connection, load, message):
"""
Perform an sjoin on two dataframes with GeoPandas.
If the load is not of type GeoDataFrame then try to
construct it into one.
The other GeoDataFrame is always constructed from a file.
Requires: Geopandas
Inputs: connection, load (the left side,
a DataFrame or GeoDataFrame), and message(see below)
Message:
message['path'], filepath for the right side of the join
message['sjoin_args'], required args for joining
the GeoDataFrames
message['GeoDataFrame_right_args'], required args for constructing
the GeoDataFrame from the path using the gpd.read_file command
message['GeoDataFrame_left_args'], optional args for constructing
the GeoDataFrame from the load using the gpd.read_file command
Output: send the sjoined load back up the pipe
"""
for i in ['sjoin_args', 'GeoDataFrame_right_args', 'path']:
if i not in message:
raise KeyError("{}".format(i))
if type(load).__name__ != 'GeoDataFrame':
if 'GeoDataFrame_left_args' in message:
left = gpd.GeoDataFrame(load, **message['GeoDataFrame_left_args'])
else:
raise ValueError('GeoDataFrame_left_args is required if the load'
' is not already a GeoDataFrame')
else:
left = load
right = gpd.read_file(message['right_path'],
**message['GeoDataFrame_right_args'])
@err_to_parent
def pd_simple_merge(connection, load, message):
"""
Perform a merge on two dataframes with the pandas merge.
The left DataFrame is always the load argument while the
right DataFrame is always constructed from a file.
Requires: Pandas
Inputs: connection, load (the left side,
a DataFrame), and message(see below)
Message:
message['path'], filepath for the right side of the join
message['merge_args'], required args for joining
the DataFrames
message['DataFrame_right_args'], required args for constructing
the DataFrame from the path using the gpd.read_file command
Output: send the merged load back up the pipe
"""
if type(load).__name__ != 'DataFrame':
raise ValueError("load must be a Pandas DataFrame not {}".format(
type(load).__name__))
left = load
right = pd.DataFrame(message['path'], **message['DataFrame_right_args'])
merged = pd.merge(left, right, **message['merge_args'])
connection.send(merged)
def _pd_reader_pick(message):
if 'use_chunk' in message:
if message['use_chunk']:
filepath = message['chunk']
elif not 'filepath' or 'use_chunk' in message:
raise ValueError("Must pass message['filepath'] or message['use_chunk']")
else:
filepath = message['filepath']
if not 'filetype' in message:
raise ValueError("Must pass message['filetype'] \n"
"Supported types: 'csv', 'excel', 'pkl'")
#Construct dataframes (with args if neccessary)
if message['filetype'] == "csv":
if 'read_args' in message:
df = pd.read_csv(filepath, **message['read_args'])
else:
df = pd.read_csv(filepath)
if message['filetype'] == "pkl":
if 'read_args' in message:
df = | pd.read_pickle(filepath, **message['read_args']) | pandas.read_pickle |
import os
from distutils.util import strtobool
import numpy as np
import pytest
import opendp.smartnoise.core as sn
from tests import (TEST_PUMS_PATH, TEST_PUMS_NAMES)
# Used to skip showing plots, etc.
#
IS_CI_BUILD = strtobool(os.environ.get('IS_CI_BUILD', 'False'))
def test_multilayer_analysis(run=True):
with sn.Analysis() as analysis:
PUMS = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
age = sn.to_float(PUMS['age'])
sex = sn.to_bool(PUMS['sex'], true_label="TRUE")
age_clamped = sn.clamp(age, lower=0., upper=150.)
age_resized = sn.resize(age_clamped, number_rows=1000)
race = sn.to_float(PUMS['race'])
mean_age = sn.dp_mean(
data=race,
privacy_usage={'epsilon': .65},
data_lower=0.,
data_upper=100.,
data_rows=500
)
analysis.release()
sex_plus_22 = sn.add(
sn.to_float(sex),
22.,
left_rows=1000, left_lower=0., left_upper=1.)
sn.dp_mean(
age_resized / 2. + sex_plus_22,
privacy_usage={'epsilon': .1},
data_lower=mean_age - 5.2,
data_upper=102.,
data_rows=500) + 5.
sn.dp_variance(
data=sn.to_float(PUMS['educ']),
privacy_usage={'epsilon': .15},
data_rows=1000,
data_lower=0.,
data_upper=12.
)
# sn.dp_raw_moment(
# sn.to_float(PUMS['married']),
# privacy_usage={'epsilon': .15},
# data_rows=1000000,
# data_lower=0.,
# data_upper=12.,
# order=3
# )
#
# sn.dp_covariance(
# left=sn.to_float(PUMS['age']),
# right=sn.to_float(PUMS['married']),
# privacy_usage={'epsilon': .15},
# left_rows=1000,
# right_rows=1000,
# left_lower=0.,
# left_upper=1.,
# right_lower=0.,
# right_upper=1.
# )
if run:
analysis.release()
return analysis
def test_dp_linear_stats(run=True):
with sn.Analysis() as analysis:
dataset_pums = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
age = dataset_pums['age']
analysis.release()
num_records = sn.dp_count(
age,
privacy_usage={'epsilon': .5},
lower=0,
upper=10000
)
analysis.release()
print("number of records:", num_records.value)
vars = sn.to_float(dataset_pums[["age", "income"]])
covariance = sn.dp_covariance(
data=vars,
privacy_usage={'epsilon': .5},
data_lower=[0., 0.],
data_upper=[150., 150000.],
data_rows=num_records)
print("covariance released")
num_means = sn.dp_mean(
data=vars,
privacy_usage={'epsilon': .5},
data_lower=[0., 0.],
data_upper=[150., 150000.],
data_rows=num_records)
analysis.release()
print("covariance:\n", covariance.value)
print("means:\n", num_means.value)
age = sn.to_float(age)
age_variance = sn.dp_variance(
age,
privacy_usage={'epsilon': .5},
data_lower=0.,
data_upper=150.,
data_rows=num_records)
analysis.release()
print("age variance:", age_variance.value)
# If I clamp, impute, resize, then I can reuse their properties for multiple statistics
clamped_age = sn.clamp(age, lower=0., upper=100.)
imputed_age = sn.impute(clamped_age)
preprocessed_age = sn.resize(imputed_age, number_rows=num_records)
# properties necessary for mean are statically known
mean = sn.dp_mean(
preprocessed_age,
privacy_usage={'epsilon': .5}
)
# properties necessary for variance are statically known
variance = sn.dp_variance(
preprocessed_age,
privacy_usage={'epsilon': .5}
)
# sum doesn't need n, so I pass the data in before resizing
age_sum = sn.dp_sum(
imputed_age,
privacy_usage={'epsilon': .5}
)
# mean with lower, upper properties propagated up from prior bounds
transformed_mean = sn.dp_mean(
-(preprocessed_age + 2.),
privacy_usage={'epsilon': .5}
)
analysis.release()
print("age transformed mean:", transformed_mean.value)
# releases may be pieced together from combinations of smaller components
custom_mean = sn.laplace_mechanism(
sn.mean(preprocessed_age),
privacy_usage={'epsilon': .5})
custom_maximum = sn.laplace_mechanism(
sn.maximum(preprocessed_age),
privacy_usage={'epsilon': .5})
custom_maximum = sn.laplace_mechanism(
sn.maximum(preprocessed_age),
privacy_usage={'epsilon': .5})
custom_quantile = sn.laplace_mechanism(
sn.quantile(preprocessed_age, alpha=.5),
privacy_usage={'epsilon': 500})
income = sn.to_float(dataset_pums['income'])
income_max = sn.laplace_mechanism(
sn.maximum(income, data_lower=0., data_upper=1000000.),
privacy_usage={'epsilon': 10})
# releases may also be postprocessed and reused as arguments to more components
age_sum + custom_maximum * 23.
analysis.release()
print("laplace quantile:", custom_quantile.value)
age_histogram = sn.dp_histogram(
sn.to_int(age, lower=0, upper=100),
edges=list(range(0, 100, 25)),
null_value=150,
privacy_usage={'epsilon': 2.}
)
sex_histogram = sn.dp_histogram(
sn.to_bool(dataset_pums['sex'], true_label="1"),
privacy_usage={'epsilon': 2.}
)
education_histogram = sn.dp_histogram(
dataset_pums['educ'],
categories=["5", "7", "10"],
null_value="-1",
privacy_usage={'epsilon': 2.}
)
analysis.release()
print("age histogram: ", age_histogram.value)
print("sex histogram: ", sex_histogram.value)
print("education histogram: ", education_histogram.value)
if run:
analysis.release()
# get the mean computed when release() was called
print(mean.value)
print(variance.value)
return analysis
def test_dp_count(run=True):
with sn.Analysis() as analysis:
dataset_pums = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
count = sn.dp_count(
dataset_pums['sex'] == '1',
privacy_usage={'epsilon': 0.5})
if run:
analysis.release()
print(count.value)
return analysis
def test_raw_dataset(run=True):
with sn.Analysis() as analysis:
data = sn.to_float(sn.Dataset(value=[1., 2., 3., 4., 5.]))
sn.dp_mean(
data=data,
privacy_usage={'epsilon': 1},
data_lower=0.,
data_upper=10.,
data_rows=10,
data_columns=1)
if run:
analysis.release()
return analysis
def test_everything(run=True):
with sn.Analysis() as analysis:
data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
age_int = sn.to_int(data['age'], 0, 150)
sex = sn.to_bool(data['sex'], "1")
educ = sn.to_float(data['educ'])
race = data['race']
income = sn.to_float(data['income'])
married = sn.to_bool(data['married'], "1")
numerics = sn.to_float(data[['age', 'income']])
# intentionally busted component
# print("invalid component id ", (sex + "a").component_id)
# broadcast scalar over 2d, broadcast scalar over 1d, columnar broadcasting, left and right mul
numerics * 2. + 2. * educ
# add different values for each column
numerics + [[1., 2.]]
# index into first column
age = sn.index(numerics, indices=0)
income = sn.index(numerics, mask=[False, True])
# boolean ops and broadcasting
mask = sex & married | (~married ^ False) | (age > 50.) | (age_int == 25)
# numerical clamping
sn.clamp(numerics, 0., [150., 150_000.])
sn.clamp(data['educ'], categories=[str(i) for i in range(8, 10)], null_value="-1")
sn.count(mask)
sn.covariance(age, income)
sn.digitize(educ, edges=[1., 3., 10.], null_value=-1)
# checks for safety against division by zero
income / 2.
income / sn.clamp(educ, 5., 20.)
sn.dp_count(data, privacy_usage={"epsilon": 0.5})
sn.dp_count(mask, privacy_usage={"epsilon": 0.5})
sn.dp_histogram(mask, privacy_usage={"epsilon": 0.5})
age = sn.impute(sn.clamp(age, 0., 150.))
sn.dp_maximum(age, privacy_usage={"epsilon": 0.5})
sn.dp_minimum(age, privacy_usage={"epsilon": 0.5})
sn.dp_median(age, privacy_usage={"epsilon": 0.5})
age_n = sn.resize(age, number_rows=800)
sn.dp_mean(age_n, privacy_usage={"epsilon": 0.5})
sn.dp_raw_moment(age_n, order=3, privacy_usage={"epsilon": 0.5})
sn.dp_sum(age, privacy_usage={"epsilon": 0.5})
sn.dp_variance(age_n, privacy_usage={"epsilon": 0.5})
sn.filter(income, mask)
race_histogram = sn.histogram(race, categories=["1", "2", "3"], null_value="3")
sn.histogram(income, edges=[0., 10000., 50000.], null_value=-1)
sn.dp_histogram(married, privacy_usage={"epsilon": 0.5})
sn.gaussian_mechanism(race_histogram, privacy_usage={"epsilon": 0.5, "delta": .000001})
sn.laplace_mechanism(race_histogram, privacy_usage={"epsilon": 0.5, "delta": .000001})
sn.raw_moment(educ, order=3)
sn.log(sn.clamp(educ, 0.001, 50.))
sn.maximum(educ)
sn.mean(educ)
sn.minimum(educ)
educ % 2.
educ ** 2.
sn.quantile(educ, .32)
sn.resize(educ, number_rows=1200, lower=0., upper=50.)
sn.resize(race, number_rows=1200, categories=["1", "2"], weights=[1, 2])
sn.resize(data[["age", "sex"]], 1200, categories=[["1", "2"], ["a", "b"]], weights=[1, 2])
sn.resize(
data[["age", "sex"]], 1200,
categories=[["1", "2"], ["a", "b", "c"]],
weights=[[1, 2], [3, 7, 2]])
sn.sum(educ)
sn.variance(educ)
if run:
analysis.release()
return analysis
def test_histogram():
import numpy as np
# establish data information
data = np.genfromtxt(TEST_PUMS_PATH, delimiter=',', names=True)
education_categories = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17"]
income = list(data[:]['income'])
income_edges = list(range(0, 100_000, 10_000))
print('actual', np.histogram(income, bins=income_edges)[0])
with sn.Analysis() as analysis:
data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
income = sn.to_int(data['income'], lower=0, upper=0)
sex = sn.to_bool(data['sex'], true_label="1")
income_histogram = sn.dp_histogram(
income,
edges=income_edges,
privacy_usage={'epsilon': 1.})
analysis.release()
print("Income histogram Geometric DP release: " + str(income_histogram.value))
def test_covariance():
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = np.genfromtxt(TEST_PUMS_PATH, delimiter=',', names=True)
with sn.Analysis() as analysis:
wn_data = sn.Dataset(path=TEST_PUMS_PATH, column_names=TEST_PUMS_NAMES)
# get full covariance matrix
cov = sn.dp_covariance(data=sn.to_float(wn_data['age', 'sex', 'educ', 'income', 'married']),
privacy_usage={'epsilon': 10},
data_lower=[0., 0., 1., 0., 0.],
data_upper=[100., 1., 16., 500_000., 1.],
data_rows=1000)
analysis.release()
# store DP covariance and correlation matrix
dp_cov = cov.value
print(dp_cov)
dp_corr = dp_cov / np.outer(np.sqrt(np.diag(dp_cov)), np.sqrt(np.diag(dp_cov)))
# get non-DP covariance/correlation matrices
age = list(data[:]['age'])
sex = list(data[:]['sex'])
educ = list(data[:]['educ'])
income = list(data[:]['income'])
married = list(data[:]['married'])
non_dp_cov = np.cov([age, sex, educ, income, married])
non_dp_corr = non_dp_cov / np.outer(np.sqrt(np.diag(non_dp_cov)), np.sqrt(np.diag(non_dp_cov)))
print('Non-DP Covariance Matrix:\n{0}\n\n'.format(pd.DataFrame(non_dp_cov)))
print('Non-DP Correlation Matrix:\n{0}\n\n'.format( | pd.DataFrame(non_dp_corr) | pandas.DataFrame |
#%%
# A. Importing packages, necessary datasets and concluding to our final dataset
# i. Importing the packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas_datareader import wb
import ipywidgets as widgets
#%%
# ii. Dowloading data from the World Bank (Countries, Years and GDP Growth)
gdp = wb.download(indicator='NY.GDP.MKTP.KD.ZG', country=['all'], start=1997, end=2018)
gdp = gdp.rename(columns={'NY.GDP.MKTP.KD.ZG':'gdp growth'})
gdp = gdp.reset_index()
gdp['year'] = gdp['year'].astype(int) #datetime %year%
print(gdp)
# iii(a). The rest of the datasets were downloaded from OECD and imported manually
# iii(b). Data (Country code and Country)
country_codes = "./dataproject/data/ccode.xls"
ccode = | pd.read_excel(country_codes) | pandas.read_excel |
from collections import namedtuple
import pandas as pd
import numpy as np
Scores = namedtuple('Scores', ['Benign', 'Likely_benign', 'Uncertain_significance', 'not_provided',
'Conflicting_interpretations_of_pathogenicity',
'Likely_pathogenic', 'Pathogenic', 'missing'
])
class ClinvarLookup:
"""
Matches mutations against a file of Clinvar annotated mutations.
The Clinvar variant summary file must first be downloaded and unzipped.
https://ftp.ncbi.nlm.nih.gov/pub/clinvar/tab_delimited/variant_summary.txt.gz
Mutations are matched based on chromosome, position, reference nucleotide and mutant nucleotide.
The scores are based on either the ClinicalSignificance or the ClinSigSimple column of the Clinar file.
ClinSigSimple scores are used directly.
If using ClinicalSignificance, the scores can be customised.
By default, Likely_pathogenic and Pathogenic annotations are scored 1, and all
other annotations (Benign, Likely_benign, Uncertain_significance, not_provided,
Conflicting_interpretations_of_pathogenicity) are scored 0.
Where there are multiple annotations for the same mutation, the highest score will be used.
Mutations not present in the clinvar file are also given a score of 0 by default.
"""
default_scores = Scores(Benign=0, Likely_benign=0,
Uncertain_significance=0, not_provided=0,
Conflicting_interpretations_of_pathogenicity=0,
Likely_pathogenic=1, Pathogenic=1, missing=0)
def __init__(self, clinvar_variant_summary_file, assembly, scores=None, clinsigsimple=False,
clinsigsimple_missing_score=0):
"""
:param clinvar_variant_summary_file: path to the Clinvar variant summary file.
:param assembly: The genome assembly to use. "GRCh37" or "GRCh38".
:param scores: The scoring system to use to convert ClinicalSignificance to numbers.
If None, will use the default scores.
:param clinsigsimple: Use the ClinSigSimple column to score the mutations instead of ClinicalSignificance.
:param clinsigsimple_missing_score: Score for missing mutations if using ClinSigSimple. Default=0.
"""
self.clinvar_data = | pd.read_csv(clinvar_variant_summary_file, sep="\t") | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
| pd.Series([100], index=["A"]) | pandas.Series |
def NMDS_analysis(TaXon_table_xlsx, meta_data_to_test, taxonomic_level, width, height, nmds_s, max_iter_val, n_init_val, path_to_outdirs, template, font_size, color_discrete_sequence, nmds_dissimilarity):
import pandas as pd
import numpy as np
from skbio.diversity import beta_diversity
from sklearn.manifold import MDS
import plotly.graph_objects as go
import plotly.express as px
from pathlib import Path
import PySimpleGUI as sg
import os, webbrowser
from itertools import combinations
TaXon_table_xlsx = Path(TaXon_table_xlsx)
Meta_data_table_xlsx = Path(str(path_to_outdirs) + "/" + "Meta_data_table" + "/" + TaXon_table_xlsx.stem + "_metadata.xlsx")
TaXon_table_df = pd.read_excel(TaXon_table_xlsx, header=0).fillna("unidentified")
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
Meta_data_table_df = pd.read_excel(Meta_data_table_xlsx, header=0).fillna("nan")
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
metadata_list = Meta_data_table_df[meta_data_to_test].values.tolist()
metadata_loc = Meta_data_table_df.columns.tolist().index(meta_data_to_test)
## drop samples with metadata called nan (= empty)
drop_samples = [i[0] for i in Meta_data_table_df.values.tolist() if i[metadata_loc] == "nan"]
if drop_samples != []:
## filter the TaXon table
TaXon_table_df = TaXon_table_df.drop(drop_samples, axis=1)
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
## also remove empty OTUs
row_filter_list = []
for row in TaXon_table_df.values.tolist():
reads = set(row[10:])
if reads != {0}:
row_filter_list.append(row)
columns = TaXon_table_df.columns.tolist()
TaXon_table_df = pd.DataFrame(row_filter_list, columns=columns)
Meta_data_table_df = pd.DataFrame([i for i in Meta_data_table_df.values.tolist() if i[0] not in drop_samples], columns=Meta_data_table_df.columns.tolist())
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
## create a y axis title text
taxon_title = taxonomic_level.lower()
## adjust taxonomic level if neccessary
if taxonomic_level in ["ASVs", "ESVs", "OTUs", "zOTUs"]:
taxon_title = taxonomic_level
taxonomic_level = "ID"
## create a subfolder for better sorting and overview
dirName = Path(str(path_to_outdirs) + "/" + "NMDS_plots" + "/" + TaXon_table_xlsx.stem + "/")
if not os.path.exists(dirName):
os.mkdir(dirName)
# check if the meta data differs
if len(set(Meta_data_table_df[meta_data_to_test])) == len(Meta_data_table_df['Samples'].tolist()):
sg.Popup("The meta data is unique for all samples. Please adjust the meta data table!", title=("Error"))
raise RuntimeError
if sorted(TaXon_table_samples) == sorted(Meta_data_table_samples):
samples = Meta_data_table_samples
## extract the relevant data
TaXon_table_df = TaXon_table_df[[taxonomic_level] + samples]
## define an aggregation function to combine multiple hit of one taxonimic level
aggregation_functions = {}
## define samples functions
for sample in samples:
## 'sum' will calculate the sum of p/a data
aggregation_functions[sample] = 'sum'
## define taxon level function
aggregation_functions[taxonomic_level] = 'first'
## create condensed dataframe
df_new = TaXon_table_df.groupby(TaXon_table_df[taxonomic_level]).aggregate(aggregation_functions)
if 'unidentified' in df_new.index:
df_new = df_new.drop('unidentified')
## collect reads
data = df_new[samples].transpose().values.tolist()
## calculate jaccard distances
jaccard_dm = beta_diversity(nmds_dissimilarity, data, samples)
## NMDS function
def nmds_function(matrix, dimensions):
nmds = MDS(n_components=dimensions, metric=False, dissimilarity='precomputed', max_iter=int(max_iter_val), n_init=int(n_init_val))
nmds_results = nmds.fit(jaccard_dm[:100])
stress = round(nmds_results.stress_, 2)
nmds_array = nmds_results.embedding_
return({"stress":stress,"nmds_results":nmds_array})
answer = sg.PopupOKCancel("The NMDS calculation may take a while. Continue?")
if answer == "OK":
## test different dimensions
nmds_results_dict = {}
stress_dict = {}
for i in range(1,11):
nmds_results = nmds_function(jaccard_dm, i)
nmds_results_dict[i] = nmds_results
stress_dict[i] = nmds_results["stress"]
####################################################################################################
win2_active = True
layout2 = [[sg.Text("NMDS analysis options", size=(20,1))],
[sg.CB("Show stress plot", default=True, key="stress_plot")],
[sg.CB("Show NMDS 2D plot", default=True, key="2d_plot")],
[sg.CB("Show NMDS 3D plot", default=True, key="3d_plot")],
[sg.CB("Connect categories", default=True, key="draw_mesh")],
[sg.Text("")],
[sg.Button("Apply")]]
win2 = sg.Window('NMDS analysis', layout2, keep_on_top=False)
while True:
event2, values2 = win2.Read()
if event2 is None or event2 == 'Apply':
win2.close()
win2_active = False
break
####################################################################################################
## plot stress and dimensions
fig = go.Figure()
fig.add_trace(go.Scatter(x=list(stress_dict.keys()), y=list(stress_dict.values()), mode='markers+lines', name=sample, marker=dict(color="Blue", size=int(10))))
fig.update_layout(showlegend=False, xaxis_title="Dimensions", yaxis_title="Stress")
fig.update_layout(height=int(600), width=int(800), template=template, showlegend=False, font_size=font_size, title_font_size=font_size)
## define output files
output_pdf = Path(str(dirName) + "/" + meta_data_to_test + "_" + taxon_title + "_stress.pdf")
output_html = Path(str(dirName) + "/" + meta_data_to_test + "_" + taxon_title + "_stress.html")
## write output files
fig.write_image(str(output_pdf))
fig.write_html(str(output_html))
## ask to show file
if values2['stress_plot'] == True:
webbrowser.open('file://' + str(output_html))
####################################################################################################
## plot 2D
stress = stress_dict[2]
if values2["draw_mesh"] == True:
## create dataframe from NMDS results
nmds_results_df = | pd.DataFrame(nmds_results_dict[2]["nmds_results"], index=[samples]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/26 13:10
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_ | numeric(temp_df["昨收盘"]) | pandas.to_numeric |
import datareader
import dataextractor
import bandreader
import numpy as np
from _bisect import bisect
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import pandas as pd
from scipy import stats
from sklearn import metrics
def full_signal_extract(path, ident):
"""Extract breathing and heartbeat features from one user and save features to file.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: Nothing. It saves features (dataframe) to a .csv file
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok'] = extracted_hr_features['hr_ok']
extracted_hr_features2 = dataextract.raw_windowing_heartrate(100, 1) # longer time to extract HRV frequency feat.
extracted_hr_features2 = extracted_hr_features2[['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf', 'times']]
extracted_hr_features2_roll_avg = extracted_hr_features2.loc[:, extracted_hr_features2.columns != 'times'].rolling(
10).mean()
extracted_hr_features2_roll_avg['times'] = extracted_hr_features2['times']
all_features = extracted_br_features_roll_avg
all_features = pd.merge(all_features, extracted_hr_features_roll_avg, on='times')
all_features = pd.merge(all_features, extracted_hr_features2_roll_avg, on='times')
task_timestamps = dataread.get_data_task_timestamps()
relax_timestamps = dataread.get_relax_timestamps()
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new__data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = pd.DataFrame()
band_data['times'] = band_data_new__data[0]
band_data['times'] = band_data['times'].astype(int)
band_data['band_rate'] = band_data_new__data[1]
band_data = band_data.drop_duplicates(subset=['times'])
together_data = pd.merge(hr_data, band_data, on='times')
together_data = together_data.dropna()
for i in range(len(all_features['times'])):
find_in_hr_data = bisect(together_data['times'], all_features['times'][i])
all_features.ix[i, 'band_rate'] = together_data['band_rate'][find_in_hr_data]
for i in range(len(cog_res)):
all_feat_ind_task_start = bisect(all_features['times'], task_timestamps[i][0])
all_feat_ind_task_end = bisect(all_features['times'], task_timestamps[i][1])
for j in cog_res.columns:
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, j] = cog_res.iloc[i][j]
if cog_res.iloc[i][j] == 'GC' or cog_res.iloc[i][j] == 'PT':
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = True
elif cog_res.iloc[i][j] == 'HP' or cog_res.iloc[i][j] == 'FA' or cog_res.iloc[i][j] == 'NC' or \
cog_res.iloc[i][j] == 'SX':
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = False
for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = True
for i in range(len(relax_timestamps)):
all_feat_ind_task_start = bisect(all_features['times'], relax_timestamps[i][0])
all_feat_ind_task_end = bisect(all_features['times'], relax_timestamps[i][1])
new_end = all_feat_ind_task_end + 30
# if i==0:
# continue
for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
all_features.ix[k + all_feat_ind_task_start, 'consecutive_break'] = i
for k in range(new_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_break_and_after_index'] = k
if k <= 15:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = False
elif k <= 30:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = np.nan
else:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = True
for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = False
all_features['person_id'] = cog_res['person_id'][0]
all_features.to_csv(path_or_buf=path + ident + '/' + ident + '-data.csv', index=False)
def extract_for_all_users_and_combine(path, idents, outfile):
for i in idents:
print(i)
full_signal_extract(path, i)
append_csv_files(path, idents, outfile)
def plot_all_full_signals(path, idents):
for i in idents:
print(i)
plot_whole_signal_and_tasks_times(path, i)
def compare_extracted_hr_and_band(path, ident):
"""Compater heart rates acquired wirelessly and with Microfost Band.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: MAE, MSE, CORRelation values of the aligned HR time series
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok1'] = extracted_hr_features['hr_ok']
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new_data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
plt.figure(1)
plt.clf()
plt.plot(extracted_hr_features_roll_avg['times'], extracted_hr_features_roll_avg['hr_rate'], color='orange',
label='Wi-Mind heart rate')
plt.plot(band_data_new_data[0], band_data_new_data[1], color='green', label='Microsoft Band heart rate')
plt.xlabel('time (s)')
plt.ylabel('heart rate')
plt.legend()
plt.show()
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = pd.DataFrame()
band_data['times'] = band_data_new_data[0]
band_data['times'] = band_data['times'].astype(int)
band_data['rate'] = band_data_new_data[1]
band_data = band_data.drop_duplicates(subset=['times'])
together_data = pd.merge(hr_data, band_data, on='times')
together_data = together_data.dropna()
# new_hr = res_ind[intersect]
# new_band = band_data_new__data[1][intersect]
mae = metrics.mean_absolute_error(together_data['rate'], together_data['hr_rate'])
mse = metrics.mean_squared_error(together_data['rate'], together_data['hr_rate'])
corr = stats.pearsonr(together_data['rate'], together_data['hr_rate'])
# print('mae amd mse: ', mae, mse)
return mae, mse, corr
def compare_hr_for_all_idents(path, idents):
compare_metrics = pd.DataFrame()
for i in idents:
print(i)
mae, mse, cor = compare_extracted_hr_and_band(path, i) # uncomment if comparing errors
df = pd.DataFrame([[i, mae, mse, cor[0]]], columns=['ID', 'MAE', 'MSE', 'COR'])
compare_metrics = compare_metrics.append(df, ignore_index=True)
print(compare_metrics)
def append_csv_files(path, idents, out_file_name_csv):
"""Goes through .csv files (all 'idents'), where all features are stored and combines them to one file."""
full_appended_frame = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 11:36:13 2021
@author: <NAME> (Finnish Meteorological Institute)
"""
import sys
import pandas as pd
import numpy as np
import xarray as xr
from satellitetools.biophys import SNAP_BIO_RMSE
def xr_dataset_to_timeseries(
xr_dataset,
variables,
add_uncertainty=False,
add_confidence_intervals=False,
confidence_level="95",
):
"""Compute timeseries dataframe from xr dataset.
Parameters
----------
xr_dataset : xarray dataset
variables : list
list of varbiale names as string.
add_uncertainty : bool, default False
Adds variable {variable}_uncertainty and confidence intervals to dataframe. Currently,
uncertainty is equal to standar error (se) or if variable is biophysical
variable from biophys_xarray, it sqrt(se^2 + RMSE_mean^2) where RMSE_mean is
propagated uncertainty for the individual observations/pixels uncertainties.
Uncertainty for the individual pixels is considered to be the variable RMSE
from the SNAP biophysical processor developers
(see biophys_xarray.py and linked ATBD) (i.e. same for all pixels).
confidence_level : str, default "95"
Confidence level (%) for calculating the confidence interval bounds. Options "90", "95" & "99"
Returns
-------
df : pandas dataframe
Pandas dataframe with mean, std, se and percentage of NaNs inside AOI.
"""
df = pd.DataFrame({"Date": | pd.to_datetime(xr_dataset.time.values) | pandas.to_datetime |
# Module: Bachelor thesis
# Theme: Detect malicious/unusual Login Events
# Author: <NAME> <<EMAIL>>
# Status: 28.07.2021
import datetime
import pandas as pd
import re
import numpy as np
from joblib import dump
def read_features(data_path):
features = pd.read_csv(data_path, index_col=0)
return features
def get_column_names(features):
columns = features.columns.values.tolist()
return columns
def convert_time_features(features, columns):
if "hour" in columns:
hours, features = convert_hours(features)
else:
hours = None
if "day" in columns:
days, features = convert_days(features)
else:
days = None
return hours, days, features
def convert_hours(features: pd.DataFrame):
hours = features['hour']
def convert_hour(hour):
# https://www.kite.com/python/answers/how-to-convert-a-time-string-to-seconds-in-python
date_time = datetime.datetime.strptime(hour, "%H:%M:%S")
a_timedelta = date_time - datetime.datetime(1900, 1, 1)
seconds = a_timedelta.total_seconds()
return seconds
secs = map(convert_hour, features['hour'].tolist())
secs = list(secs)
features['hour'] = secs
return hours, features
def convert_days(features: pd.DataFrame):
days = features['day']
def convert_day(day):
date = datetime.datetime.strptime(day, "%Y-%m-%d")
date_delta = date - datetime.datetime(1900, 1, 1)
days_delta = date_delta.days
return days_delta
days_dif = map(convert_day, features['day'].tolist())
days_dif = list(days_dif)
features['day'] = days_dif
return days, features
def save_model_to_path(model, path, save_model):
if save_model:
dump(model, path + 'model/' + 'model.joblib')
def sort_features(features, ascending):
sorted_features = features.sort_values(by=['scores'], ascending=ascending)
return sorted_features
def convert_time_features_back(features, columns, hours, days):
if "hour" in columns:
features['hour'] = hours
if "day" in columns:
features['day'] = days
return features
def persist_result(features, path, anomaly_id):
features.loc[features['anomaly'] == anomaly_id].to_csv(path + 'results.csv')
def persist_rank_result(rank_method, path, features):
if rank_method == "m":
res = rank_mean(features)
elif rank_method == "v":
res = rank_with_var(features)
else:
res = rank_first(features)
res.to_csv(path + 'results.csv')
def rank_first(features: pd.DataFrame):
new_names = extract_index(features)
res = pd.DataFrame()
in_res = []
for i in range(len(new_names)):
if new_names[i] not in in_res:
res = res.append(features.iloc[i])
in_res.append(new_names[i])
return res
def extract_index(features: pd.DataFrame):
rownames = features.index.values.tolist()
new_names = []
for names in rownames:
new_names.append(re.sub('^X', "", re.sub('\\..*$', "", names)))
return new_names
def rank_mean(features: pd.DataFrame):
new_names = extract_index(features)
rownames = list(set(new_names))
new_names = np.array(new_names)
means = []
for names in rownames:
row_numbers = list(np.where(new_names == names)[0])
means.append(features["scores"].iloc[row_numbers].mean())
res = pd.DataFrame({"mean_score": means}, index=rownames)
return res
def rank_with_var(features: pd.DataFrame):
pd.options.mode.chained_assignment = None
new_names = extract_index(features)
rownames = list(set(new_names))
features['scores'] = (features['scores'] - features['scores'].min()) / (
features['scores'].max() - features['scores'].min())
features_without_scores = features.drop(['scores', 'anomaly', 'Identifier'], axis=1, errors='ignore')
users_with_vars = | pd.DataFrame(columns=features_without_scores.columns) | pandas.DataFrame |
import igraph as Graph
import pandas as pd
import os
import numpy as np
import spacy
from sklearn.cluster import KMeans
from pylab import *
import re
import time
import src.pickle_handler as ph
import src.relation_creator as rc
# the dataframe has been preprocessed by many other functions. However we only need a subset of this information to
# create a graph representation of the relations.
# distillDataframe() takes the dataframe given to it. It selects
# a) character_A
# b) character_B
# formate_bible should transform all rows, that contain >= 2 characters into multiple rows between all characters
# from [lukas, mark, maria] to [[lukas, mark],[lukas, maria], [maria, mark]]
def formate_bible(df_bible):
# Parameter
# df_bible : expects a pandas dataframe that consists of "characters" and "emotion" column
# Return
# df_bible_formate : pandas dataframe, that consists of 3 columns "character_A", "character_B", "emotion"
df_bible_formate = pd.DataFrame()
for i, row in df_bible.iterrows():
names = row["characters"]
emotion = row["emotion"]
names = names.replace("[", "")
names = names.replace("]", "")
names = names.replace(",", "|")
names = names.replace("'", "")
names = names.strip()
names = names.replace("\n", "")
names = names.split("|")
names_remove = names.copy()
if len(names) >= 2:
for name in names:
for r_name in names_remove:
if name != r_name:
new_row = {
"character_A": name.strip(),
"character_B": r_name.strip(),
"emotion": emotion,
}
df_bible_formate = df_bible_formate.append(
new_row, ignore_index=True
)
names_remove.remove(name)
print(" - Pre-processed the dataframe to run the graph generation")
return df_bible_formate
# distillDataframe should turn the dataframe to distinct rows, which have been aggregated in terms of
# their emotion. The dataframe needs to be aggregated because characters may occur at multiple verses
# to not list those multiple times within an graph and to given an more "global" represenation of their
# emotional state the emotion is aggregated. If the emotion_mean > 0.75, the relation is considered to be positive
# if the emotion_mean < -0.75 the relation is considered to be negative.
# else wise it is neutral. The relation will later be used to project an color to the graph.
def distillDataframe(df_bible, load, threshold, save):
# Parameter
# df_bible : pandas dataframe of the bible
# load : determines if a csv should be loaded or if one has to be produced by the function, bool
# threshold : counts how often relations should occur before being considered reasonable
# save : if file should be saved at the end, bool
# i.e. one time mentions may not be displayed, integer
# Return
# df_distilled : pandas dataframe consistent of distinct relations
# label : unique list of all characters
# create a list of labels (names) which have been detected in both rows character_A and #character_B
file = os.path.join("src", "csv", "bibleTA_distilled" + "_" + str(threshold) + ".csv")
if load == True:
try:
df_distilled = pd.read_csv(file)
# get list of unique characters
A = df_distilled["character_A"].unique().tolist()
B = df_distilled["character_B"].unique().tolist()
label = A + B
label = list(set(label))
label.sort(reverse=False)
print(label)
try:
label.remove("")
except:
pass
print(" - Dataframe has been loaded successfully from : " + str(file))
except:
print(
" - WARNING: Could not load file, make sure to following file exists: " + str(file)
)
load = False
if load == False:
# get list of unique characters
A = df_bible["character_A"].unique().tolist()
B = df_bible["character_B"].unique().tolist()
label = A + B
label = list(set(label))
label.sort(reverse=False)
try:
label.remove("")
except:
pass
# create output dataframe to be further processed
df_distilled = pd.DataFrame()
# iterate over all labels
# only count in one direction e.g. "character_A" = "Lukas", "character_B"="Jesus" ;
# do not do a subsequent "character_b" = "Lukas", "character_a"="Jesus" search ;
# implemented by removal of labels in label_remove list
label_remove = label.copy()
for i, character_A in enumerate(label):
if (i + 1) % 10 == 0:
print(str(i + 1) + "/" + str(len(label)))
for character_B in label_remove:
if character_A != character_B:
# count emotions in both directions
subset_A = df_bible.loc[
(df_bible["character_A"] == character_A)
& (df_bible["character_B"] == character_B)
& (df_bible["emotion"].notna() == True)
]
subset_B = df_bible.loc[
(df_bible["character_A"] == character_B)
& (df_bible["character_B"] == character_A)
& (df_bible["emotion"].notna() == True)
]
# join both dataframes
frames = [subset_A, subset_B]
subset = pd.concat(frames, sort=False)
if check_empty(subset) == False and subset.shape[0] > threshold:
# calculate mean over emotions
emotion_mean = np.mean(subset["emotion"])
# round it to an absolute emotion (needed for coloring in graph)
if emotion_mean > 0.75:
emotion_mean = 1.0
elif emotion_mean < -0.75:
emotion_mean = -1.0
else:
emotion_mean = 0.0
# add new row to the distilled dataframe
# df_distilled will have distinct and aggregated emotion rows. From this we
# can create edge colors and the concrete edges of the graph
# sort names alphabetically, will later be "exploited" while shrinking the graph
new_row = {
"character_A": character_A,
"character_B": character_B,
"emotion": emotion_mean,
}
# create object from relation class like new_row
df_distilled = df_distilled.append(new_row, ignore_index=True)
label_remove.remove(character_A)
A = df_distilled["character_A"].unique().tolist()
B = df_distilled["character_B"].unique().tolist()
label = A + B
label = list(set(label))
print(" - Dataframe has been distilled successfully")
if save == True:
df_distilled.to_csv(file)
print(" - Distilled dataframe is saved to : " + str(file))
return df_distilled, label, load
# plotGraph plots the graph based on its edges and edge colors and save it to the given path
# first it builds the graph based on its nodes and edges.
# than it tries to create the experiment folder, where it than saves the graph plot
# if no relation information is given in color_emotion all edges will be black.
def plotGraph(edges, color_emotion, label, location, exp):
# Parameter:
# edges : numerical edges which are mapped by the dict to its label, numpy array
# color_emotion : based on "emotion" column a color is chosen to represent the kind of relation, list
# black: neutral, red: bad, green: good
# label : unique list of all characters
# location : place to save the experiment to, string
# exp : name of the experiment, string
graph = Graph.Graph(n=len(label), edges=edges)
file = os.path.join(location, exp + ".png")
if color_emotion == []:
out = Graph.plot(
graph, vertex_size=10, vertex_color=["white"], vertex_label=label
)
out.save(file)
else:
out = Graph.plot(
graph,
vertex_size=10,
vertex_color=["white"],
vertex_label=label,
edge_color=color_emotion,
)
out.save(file)
print(" - Graph has been saved to : " + str(file))
# converts a distinct dataframe to numerical nodes and edges.
# has the option to include a value {-1,0,1} which will create a color to the edge
# between the neighboring nodes {red, black, green}
# in context of the bible evaluation this stands for {negative, positive, neutral}
def dataframe2graph(dataframe, relations, side_A, side_B, relation_value):
# Parameters:
# dataframe : pandas dataframe consistent of labels and relation between characters
# relations : should the evaluation consider a relation to be positive/neutral/negative, bool
# side_A : in which column of the dataframe will the function find neighboring node A, string
# side_B : in which column of the dataframe will the function find neighboring node B, string
# relation_value : in which column of the dataframe will the function find the color value, string
# Return
# edges : numpy array which transfers labels to numerical values
# color_emotion : returned if relations == True, to list of colors of the edges based on their relation, list
# label : unique list of labels which index matches the edges
A = dataframe[str(side_A)].unique().tolist()
B = dataframe[str(side_B)].unique().tolist()
label = A + B
label = list(set(label))
label.sort(reverse=False)
label2id = {l: i for i, l in enumerate(label)}
id2label = {i: l for i, l in enumerate(label)}
if relations == True:
# color dict for transfering the emotion score to a colored edge
color_dict = {1: "green", -1: "red", 0: "black"}
color_emotion = []
edges = []
for i, df_verse in dataframe.iterrows():
A = label2id[df_verse[str(side_A)]]
B = label2id[df_verse[str(side_B)]]
edges.append([A, B])
if relations == True:
relation = df_verse[str(relation_value)]
color_emotion.append(color_dict[relation])
print(" - Dataframe has been converted to numerical nodes and edges")
if relations == True:
return edges, color_emotion, label
else:
return edges, label
# load csv in case new textAnalytics outout has been generated. can be set in main.
# load the bible as csv and can differentiate between the old an the new testament
def loadCSV(testament, file):
# Parameter:
# testament : "new", "old", string
# give csv file name, string
# Return
# df_bible : pandas dataframe which contains the bible loaded from csv file
df_bible = pd.read_csv(file)
if testament == "new":
first_matthew_verse = df_bible.index[
(df_bible["book_id"] == "Matt")
& (df_bible["verse"] == 1)
& (df_bible["chapter"] == 1)
].tolist()[0]
df_bible = df_bible[first_matthew_verse:]
if testament == "old":
first_matthew_verse = df_bible.index[
(df_bible["book_id"] == "Matt")
& (df_bible["verse"] == 1)
& (df_bible["chapter"] == 1)
].tolist()[0]
df_bible = df_bible[: (first_matthew_verse - 1)]
return df_bible
# main function, which calls all the other functions and can be called from outside.
# can be given a dataframe (or it loads one from the folder)
# can also be given load, which loads the last distilled dataframe with distinct
# character_A to character_B mappings with an aggregated emotion value
def getGraph(df_bible, load, threshold, testament, location, file):
# Parameter:
# df_bible : pandas dataframe, may be given from outside
# load : load calculations from previous run
# threshold : counts the encounterments of two characters in one verse, int
# testament : "old", "new", else both testaments, string
# location : name of the experiment, string
# file : csv file name used to load the data, string
# Return:
# df_relation : pandas dataframe of the relations, dataframe consistes of ["character_A", "character_B", "emotion"]
# label_list : unique list of characters in dataframe
# load : chain of trust in load; if one load fails all subsequent are set to False, bool
# loads bible dataframe if not given one
if not isinstance(df_bible, pd.DataFrame):
df_bible = loadCSV(testament=testament, file=file)
df_bible = pd.DataFrame(data=df_bible)
df_bible = formate_bible(df_bible)
df_relation, label_list, load = distillDataframe(
df_bible, load, threshold=threshold, save=True
)
# convert distilled data to nodes and edges. Also generate colored edges
edges, color_emotion, label = dataframe2graph(df_relation, True, "character_A", "character_B", "emotion")
# make and plot graph + save to path
plotGraph(edges, color_emotion, label, location=location, exp="1_emotion_graph")
return df_relation, label_list, load
# This Function aims to create the pickle, objecs using the relation_creator. We need a distiled csv to do the work.
def create_pickle_objects(df_emotion):
# parameter :
# df_emotion : pandas dataframe with relations, just distilled
rc.create_char_relation(df_emotion)
rc.create_character_keywords()
# function is to load the pickle objects, to process their keywords each person
def load_pickle_objects():
pickle_obj = ph.PickleHandler()
pickle_list = pickle_obj.load_characters()
labels = []
res = []
temp_res = []
for obj in pickle_list:
name = obj.name
labels.append(name)
most_frequent_words = obj.most_frequent_words
for word in most_frequent_words:
temp_res.append(word[0])
res.append(temp_res)
temp_res = []
return labels, res
# cluster keywords and create list of people in this cluster that have threshold enough keywords coming from the same cluster
def cluster_data(num_cluster, threshold):
# Parameter:
# num_cluster: number of cluster centroid - results in labels for keywords, int
# threshold: min count of cluster label to app person to cluster, int
# return:
# df_cluster: pandas dataframe, consistent of cluster name and character
file = os.path.join("src", "csv", "clustered_keywords_" + str(num_cluster) + str(int(1/threshold)) + ".csv")
# load the pickle objects to find keyword clusters
characters, res = load_pickle_objects()
# extract distinct keywords to convert them to word-vectors and afterwards determine clusters
distinct_res = []
for keywords_res in res:
for keyword in keywords_res:
if keyword not in distinct_res:
distinct_res.append(keyword)
# load spaCy's word2vec
nlp = spacy.load("en_core_web_lg")
# vectorize the list of disinct keywords
# 300 is vector length of spacy word representation
vectorized_distinct_res = np.empty((len(distinct_res), 300))
for i, keyword in enumerate(distinct_res):
word_vector = nlp(keyword).vector
vectorized_distinct_res[i, :] = word_vector
# cluster word vectors in "num_cluster" cluster
kmeans = KMeans(n_clusters=num_cluster)
clustered_words = kmeans.fit_predict(vectorized_distinct_res)
# dict that gives cluster to word, to then convert the keywords of character to keywords in cluster
# e.g. Jesus, keywords = ["Lord", "raised from dead", "son", "god"]
# into: Jesus, clustered_res = [1, 0, 4, 2]
keyword2cluster = {
keyword: cluster for keyword, cluster in zip(distinct_res, clustered_words)
}
clustered_res = []
temp = []
for i, keywords_res in enumerate(res):
for ii, keyword in enumerate(keywords_res):
label = keyword2cluster[keyword]
temp.append(label)
clustered_res.append(temp)
df_cluster = pd.DataFrame()
# for all cluster (e.g. 4 --> 0, 1, 2, 3)
for cluster in range(num_cluster):
# count how often this cluster occurred in the the clusterized keywords (clustered_res == cluster) each person (axis = 1)
for i, clustered_char in enumerate(clustered_res):
ct = clustered_char.count(cluster)
ind_threshold = threshold * len(clustered_char)
if ct > ind_threshold:
# result = np.count_nonzero(clustered_res == cluster, axis=1)
# make node for graph (may later be turned into a cloud if possible)
cluster_name = "cluster" + str(cluster)
# append to dataframe
new_row = {"person": characters[i], "cluster": cluster_name}
df_cluster = df_cluster.append(new_row, ignore_index=True)
df_cluster.to_csv(file)
print(" - Clusters have been created and will be saved to : " + str(file))
return df_cluster
# getCluster loads the clusters to a dataframe. Either from csv file or by calling cluster_data()
# dataframe is then prepared to be displayed as a graph and subsequent plotted
def getCluster(load, num_cluster, threshold, location):
# parameter:
# load: if true, load data from csv file, bool
# num_cluster: number of cluster centroid - results in labels for keywords, int
# threshold: min count of cluster label to app person to cluster, int
# location: name of the experiment to save the plot, string
# return:
# df_cluster = pandas dataframe, consistent of cluster name and character
# if load = True, load pre-evaluated csv file
file = os.path.join("src", "csv", "clustered_keywords_" + str(num_cluster) + str(int(1/threshold)) + ".csv")
if load == True:
try:
df_cluster = pd.read_csv(file)
print(" - Clusters have been loaded from file : " + str(file))
except:
print(" - WARNING: Could not load file: " + str(file))
print(" - run adjust_graph() to create clusters")
load = False
if load == False:
# from character with keywords to a dataframe that shows edges, where
# more keywords from one cluster have occurred than threshold says
df_cluster = cluster_data(num_cluster=num_cluster, threshold=threshold)
if check_empty(df_cluster)== False:
# convert edges to nummeric edges and prepare node labels
edges, label = dataframe2graph(df_cluster, False, "person", "cluster", "")
# plot the graph
plotGraph(edges, [], label, location=location, exp="2_cluster_graph")
else:
print(" - WARNING: could not add any cluster, lower threshold")
return df_cluster, load
# apply the found clusters in the graph to the actual dataframe
# practically difficult because one character can be in multiple clusters
# returns a new dataframe that considers the previous dataframe
###########################################################################################################
# Idea:
# from character: Jesus
# has neighbors: cluster1, cluster2, Lukas
# has clusters: cluster6, cluster4
# result: cluster6-cluster1, cluster6-cluster2, cluster6-Lukas, cluster4-cluster1, cluster4-cluster2, cluster4-lukas
# or, if character has no cluster:
# from character: Jesus
# has neighbors: cluster1, cluster2, Lukas
# has clusters: Jesus <-- has the character itself as cluster
# result: Jesus-cluster1, Jesus-cluster2, Jesus-Lukas, Jesus-cluster1, Jesus-cluster2, Jesus-lukas
###########################################################################################################
def replaceCluster(cluster_log, df_emotion):
# parameter:
# cluster_log: pandas dataframe that hold all character, that are in cluster and can be found in graph
# df_emotion: bible dataframe, has 3 rows: character_A, character_B and emotion
# return:
# df_emotion_cluster: new bible dataframe, including the clusters, also in format: character_A, character_B and emotion
# remove duplicates in log
cluster_log = cluster_log.drop_duplicates()
# get all people in some cluster
# iterate over the labels
labels = cluster_log["from"].unique().tolist()
for label in labels:
# get the clusters the label is in
charincluster = cluster_log.loc[cluster_log["from"] == label]
for i, row in charincluster.iterrows():
# get any neighbor of the current label
from_char, to_char = row.values
subset_A = df_emotion.loc[df_emotion["character_A"] == from_char]
subset_B = df_emotion.loc[df_emotion["character_B"] == from_char]
frames = [subset_A, subset_B]
neighbors = pd.concat(frames, sort=False)
# include the current cluster "to_char" to all neighbors, because the node is reduced from the graph
for ii, row_neighbor in neighbors.iterrows():
try:
_, char_A, char_B, emotion = row_neighbor.values
except:
char_A, char_B, emotion = row_neighbor.values
# character can be in "character_A" oder "character_B"...
if char_A == from_char:
entry = {"character_A": to_char, "character_B": char_B, "emotion": emotion}
else:
entry = {"character_A": to_char, "character_B": char_A, "emotion": emotion}
df_emotion = df_emotion.append(entry, ignore_index=True)
# because character is transfered to a cluster, the initial rows are dropped from the dataframe
subset_A = df_emotion.loc[df_emotion["character_A"] == label]
subset_B = df_emotion.loc[df_emotion["character_B"] == label]
frames = [subset_A, subset_B]
drop_df = pd.concat(frames, sort=False)
for ii, row in drop_df.iterrows():
df_emotion = df_emotion.drop(index=ii)
# also, there may be self relations because of a neighbor chain within the same cluster
# delete those relations, e.g. "cluster1" to "cluster1"
subset = df_emotion.loc[df_emotion["character_B"] == df_emotion["character_A"]]
for ii, row in subset.iterrows():
df_emotion = df_emotion.drop(index=ii)
return df_emotion
# recursive call to do a depth first search
# is given a person which is in cluster x and checks every relation/neighbor node if this node is also in the cluster
# if so, the previous node is added to the cluster / marked "in cluster"
def investigateNeighbor(
cluster, cluster_id, neighbor, df_cluster, df_emotion, cluster_log, found_neighbors
):
# parameter:
# cluster: current cluster the function should find neighbors in, string
# cluster_id: cluster may be found at multiple places in graph which
# do not need to overlap; therefore each root-cluster has an id
# neighbor: neighbor that should be investigated by the function, string
# df_cluster: person in cluster dataframe, that should be searched for in the graph, pandas dataframe
# df_emotion: bible dataframe, has 3 rows: character_A, character_B and emotion
# cluster_log: pandas dataframe that should be enhanced by clusters located in the graph
# found_neighbors: already investigated neighbors; should not investigate them multiple times, string array
# return:
# cluster_log: pandas dataframe that hold all character, that are in cluster and can be found in graph
# probe if the node has neighbor nodes
subset_A = df_emotion.loc[
(df_emotion["character_A"] == neighbor)
& (~df_emotion["character_B"].isin(found_neighbors))
]["character_B"]
subset_B = df_emotion.loc[
(df_emotion["character_B"] == neighbor)
& (~df_emotion["character_A"].isin(found_neighbors))
]["character_A"]
frames = [subset_A, subset_B]
new_neighbors = pd.concat(frames, sort=False).unique().tolist()
# if yes, probe if those nodes are also in the cluster
for ii, new_neighbor in enumerate(new_neighbors):
found_neighbors.append(new_neighbor)
if new_neighbor != neighbor:
check_cluster = df_cluster.loc[
(df_cluster["cluster"] == cluster)
& (df_cluster["person"] == new_neighbor)
]
if check_empty(check_cluster)== False:
# first delete the row from cluster_frame
df_cluster = df_cluster.drop(check_cluster.index)
log_entry = {"from": new_neighbor.strip(), "to": cluster_id}
cluster_log = cluster_log.append(log_entry, ignore_index=True)
# start recursive call to find other members of the current cluster
cluster_log, df_cluster = investigateNeighbor(
cluster,
cluster_id,
new_neighbor,
df_cluster,
df_emotion,
cluster_log,
found_neighbors,
)
return cluster_log, df_cluster
# main function hat is looking for clusters in the dataframe. Finds initial pair and starts the recursive call
# to investigate that cluster
def adopt_clusters(df_cluster, df_emotion, min_neighbor_cluster):
# parameters:
# df_cluster: gets the dataframe, which includes the characters and their clusters; does not say anything about the
# question of if the cluster, the character is in can be found in the dataframe
# df_emotion: pandas dataframe, which includes all relations of the bible by using 3 columns:
# character_A, character_B, emotion
# min_neighbor_cluster: threshold for cluster to have at MAX n-characters with the cluster
# to not replace vast sums of characters
# (exclude mini-clusters), int
# return:
# df_emotion: is the ralation pandas dataframe, that has been adjusted, such that it includes the clusters
# get all clusters available in the data
clusters = df_cluster["cluster"].unique().tolist()
# add characters that have been found in the dataframe and run in the same cluster; needs at least 2
# neighboring characters which are in the same cluster to add them
cluster_log = pd.DataFrame()
for cluster in clusters:
# find the characters at the current cluster
i = 0
found_neighbors = []
while True:
characters_in_cluster = df_cluster.loc[df_cluster["cluster"] == cluster]
# either first iteration or there are rows in the dataframe left
if characters_in_cluster.shape[0] > 1 or (
i > 0 and characters_in_cluster.shape[0] > 0
):
cluster_person = characters_in_cluster.head(1)["person"].values[0]
found_neighbors.append(cluster_person)
# get all dataframe entries of the bible for the people in the current cluster
# df_emotion has 3 columns: character_A, character_B, emotion
subset_A = df_emotion.loc[df_emotion["character_A"] == cluster_person]["character_B"]
subset_B = df_emotion.loc[df_emotion["character_B"] == cluster_person]["character_A"]
frames = [subset_A, subset_B]
neighbors = pd.concat(frames, sort=False).unique().tolist()
# Since the same cluster may be found at multiple locations in the graph it has to
# get an individual name : cluster_id
cluster_id = str(cluster.strip()) + "_" + str(i)
if len(neighbors) >= min_neighbor_cluster:
for ii, new_neighbor in enumerate(neighbors):
characters_in_cluster = df_cluster.loc[(df_cluster["cluster"] == cluster) & (df_cluster["person"] == new_neighbor)]
if check_empty(characters_in_cluster) == True:
continue
else:
# initialize the set of neighbors, which have already been found in the data
found_neighbors.append(new_neighbor)
# since already on couple has been found, add them to the dataframe
log_entry = {"from": cluster_person.strip(), "to": cluster_id}
cluster_log = cluster_log.append(log_entry, ignore_index=True)
# delete entry from cluster dataframe because one character can only be once in the dataframe
check_cluster = df_cluster.loc[
(df_cluster["cluster"] == cluster)
& (df_cluster["person"] == cluster_person)
]
df_cluster = df_cluster.drop(check_cluster.index)
log_entry = {"from": new_neighbor.strip(), "to": cluster_id}
cluster_log = cluster_log.append(log_entry, ignore_index=True)
check_cluster = df_cluster.loc[
(df_cluster["cluster"] == cluster)
& (df_cluster["person"] == new_neighbor)
]
df_cluster = df_cluster.drop(check_cluster.index)
# check if further neighbors exists
check_cluster = df_cluster.loc[
(df_cluster["cluster"] == cluster)
& (df_cluster["person"] == new_neighbor)
& (~df_cluster["person"].isin(found_neighbors))
]
# investigate those neighbors
if check_empty(check_cluster) == False:
cluster_log, df_cluster = investigateNeighbor(
cluster,
cluster_id,
new_neighbor,
df_cluster,
df_emotion,
cluster_log,
found_neighbors,
)
else:
check_cluster = df_cluster.loc[
(df_cluster["cluster"] == cluster)
& (df_cluster["person"] == cluster_person)
]
df_cluster = df_cluster.drop(check_cluster.index)
i += 1
else:
break
# check if clusters could be found in the data
if check_empty(cluster_log) == True:
print("No cluster was assigned")
else:
# apply the cluster_log to the df_emotion dataframe, such that any cluster found in the data, overrides the existing data in the frame
df_emotion = replaceCluster(cluster_log, df_emotion)
return df_emotion
# concat_cluster() should concatenate clusters which point to each other such as
# character_A = some cluster, character_B = some cluster
# so, edges from cluster to cluster should be reduced.
# tbe algorithm takes one of these rows each iteration
# and changes all the other edges to the new cluster, which will be
# formed from the concatenation
def concat_cluster(df_emotion):
# parameter:
# df_emotion: pandas dataframe containing all relations
# return:
# df_emotion: df_emotion, but with concatinated clusters - no cluster 2 cluster edges
i = 0
while True:
# find cluster that either have been concatenated from other clusters
# "cluster_[0-9]+" or "original" clusters "cluster[0-9]+"
cluster2cluster = df_emotion.loc[
(df_emotion["character_A"].str.contains(r"cluster_?[0-9]+", regex=True))
& (df_emotion["character_B"].str.contains(r"cluster_?[0-9]+", regex=True))
]
# check if those exist
if check_empty(cluster2cluster) == True:
break
else:
# once in a row, get the first row
row = cluster2cluster.head(1)
# extract the cluster names
cluster_A = row["character_A"].values[0]
cluster_B = row["character_B"].values[0]
# delete the row from the dataframe
df_emotion = df_emotion.drop(row.index)
# take new name, concatenated clusters can be detected by "_"
new_cluster_name = "cluster_" + str(i)
i += 1
# find all characters that are i in the clusters
involved_characters_A = df_emotion.loc[
df_emotion["character_A"] == cluster_A
]
involved_characters_B = df_emotion.loc[
df_emotion["character_A"] == cluster_B
]
# join both dataframes
frames = [involved_characters_A, involved_characters_B]
involved_characters = pd.concat(frames, sort=False)
if check_empty(involved_characters) == False:
for i, character in involved_characters.iterrows():
# change the cluster name to the new cluster name
# keep rows, as they will later be aggregated by distill_shrunken_df()
df_emotion.loc[i, "character_A"] = new_cluster_name
involved_characters_A = df_emotion.loc[
df_emotion["character_B"] == cluster_A
]
involved_characters_B = df_emotion.loc[
df_emotion["character_B"] == cluster_B
]
# join both dataframes
frames = [involved_characters_A, involved_characters_B]
involved_characters = | pd.concat(frames, sort=False) | pandas.concat |
import pandas as pd
import numpy as np
import re
import math
import codecs
import csv
# 预计剩余电影总量220k到200k
data= | pd.read_csv("Website_ETL.CSV") | pandas.read_csv |
# -*- coding:utf-8 -*-
import re
import logging
import pandas as pd
from contrib.utils.DataCleanCheckTool import DataCleanCheckTool
class CorpusFromEllisQTB(object):
"""
CorpusFromEllis, question_text with blank
整个程序是由大量函数构成的
主要的函数是final_process,其他在final_process中调度
final_process完成之后输出的结果是一个包含了所有文本的list
这个list会根据index,跟最后一个make_final_list函数结合
make_final_list函数会把信息添加到final_process函数中的数据中
最终完成带有标签信息的完整corpus
"""
@classmethod
def read_data_from_csv(cls):
# 首先要读进来的表有那么几张,json中重新提取的blank,tmjcxx,exercise_package
data_tmjcxx = pd.read_csv("tmjcxx.csv")
data_packageid = pd.read_csv("exercise_package.csv")
data_stat = pd.read_csv("exercise_status.csv")
data_w_blank = pd.read_csv("data_with_blank.csv", index_col=0)
# 然后对这些表进行合并
data = pd.merge(data_w_blank, data_stat, left_on="exercise_id", right_on="ExerciseID", how="left")
data = | pd.merge(data, data_packageid, on="exercise_id", how="left") | pandas.merge |
# -*- coding: utf-8 -*-
"""Copy of rnn.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hw5VX0w03qnA-pD4YmOck-HAmzP9_fO8
# Recurrent Neural Network
## Part 1 - Data Preprocessing
### Importing the libraries
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
"""### Importing the training set"""
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values #creates numpy array
#only numpy arrays can be used as inputs to keras neural networks
"""### Feature Scaling"""
#check difference between stardarization and normalizaton
#in RNN, whenever there is sigmoid function in the output layer of the RNN,
#normalization is recommended
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1)) #creating object of the MinMaxScaler class
# with feature range
training_set_scaled = sc.fit_transform(training_set)
"""### Creating a data structure with 60 timesteps and 1 output"""
#creation of time step is important
# wrong timestep can leaad to overfiting
# the 60 time steps correspond to the past 60 inputs at any particular time step.
# hence X_train has 60 previous stock prices and Y_train has the next daay stock price, which is what
# we want from the network, hence its the output to be estimated.
X_train = []
y_train = []
for i in range(60, 1258): #hence the range starts from 60 and goes to end of the list
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
"""### Reshaping"""
#reshaping so that the array dimensions are compatible with the inputs layer of the RNN
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) # the 1 in the end is the number of indicators i.e. dependent vars
# new dimensions are also added to include more dependent variables.
"""## Part 2 - Building and Training the RNN
### Importing the Keras libraries and packages
"""
# explore keras documentation on the internet
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
"""### Initialising the RNN"""
regressor = Sequential() # we are making our RNN to be sequential. check documentations for the terms
"""### Adding the first LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
# return sequences == True, for back propagation, in last layer..it is false
# units == neurons
# input shape == last two dimensions of X_train
regressor.add(Dropout(0.2))
# this is to add dropout regularization i.e. to drop the percent of neruons, for more info check internet
"""### Adding a second LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
"""### Adding a third LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
"""### Adding a fourth LSTM layer and some Dropout regularisation"""
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
"""### Adding the output layer"""
regressor.add(Dense(units = 1))
"""### Compiling the RNN"""
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
"""### Fitting the RNN to the Training set"""
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
"""## Part 3 - Making the predictions and visualising the results
### Getting the real stock price of 2017
"""
dataset_test = | pd.read_csv('Google_Stock_Price_Test.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from imblearn.over_sampling import SMOTE
import datetime
list_names = ['rain_data_aus.csv','wind_table_01.csv','wind_table_02.csv','wind_table_03.csv','wind_table_04.csv','wind_table_05.csv','wind_table_06.csv','wind_table_07.csv','wind_table_08.csv']
# Loading the data
rain_data = pd.DataFrame(pd.read_csv('data/rain_data_aus.csv'))
wind_table_01 = pd.DataFrame(pd.read_csv('data/wind_table_01.csv'))
wind_table_02 = pd.DataFrame(pd.read_csv('data/wind_table_02.csv'))
wind_table_03 = pd.DataFrame(pd.read_csv('data/wind_table_03.csv'))
wind_table_04 = pd.DataFrame(pd.read_csv('data/wind_table_04.csv'))
wind_table_05 = pd.DataFrame(pd.read_csv('data/wind_table_05.csv'))
wind_table_06 = pd.DataFrame(pd.read_csv('data/wind_table_06.csv'))
wind_table_07 = pd.DataFrame( | pd.read_csv('data/wind_table_07.csv') | pandas.read_csv |
import logging
import textwrap
import pandas
from sqlalchemy import text
from triage.database_reflection import table_exists
from triage.component.catwalk.storage import MatrixStore
class ProtectedGroupsGeneratorNoOp(object):
def generate_all_dates(self, *args, **kwargs):
logging.warning(
"No bias audit configuration is available, so protected groups will not be created"
)
def generate(self, *args, **kwargs):
logging.warning(
"No bias audit configuration is available, so protected groups will not be created"
)
def as_dataframe(self, *args, **kwargs):
return | pandas.DataFrame() | pandas.DataFrame |
import os
from typing import cast
import matplotlib.pyplot as plt
import pandas as pd
import pandera as pa
import requests
import seaborn as sns
from dagster_pandera import pandera_schema_to_dagster_type
from pandera.typing import Series
# ****************************************************************************
# ***** TYPES ****************************************************************
class StockPrices(pa.SchemaModel):
"""Open/high/low/close prices for a set of stocks by day."""
name: Series[str] = pa.Field(description="Ticker symbol of stock")
date: Series[pd.Timestamp] = pa.Field(description="Date of prices")
open: Series[float] = pa.Field(ge=0, description="Price at market open")
high: Series[float] = pa.Field(ge=0, description="Highest price of the day")
low: Series[float] = pa.Field(ge=0, description="Lowest price of the day")
close: Series[float] = pa.Field(ge=0, description="Price at market close")
volume: Series[int] = pa.Field(ge=0, description="Number of shares traded for day")
StockPricesDgType = pandera_schema_to_dagster_type(StockPrices)
class BollingerBands(pa.SchemaModel):
"""Bollinger bands for a set of stock prices."""
name: Series[str] = pa.Field(description="Ticker symbol of stock")
date: Series[pd.Timestamp] = pa.Field(description="Date of prices")
upper: Series[float] = pa.Field(ge=0, description="Upper band")
lower: Series[float] = pa.Field(description="Lower band")
BollingerBandsDgType = pandera_schema_to_dagster_type(BollingerBands)
class AnomalousEvents(pa.SchemaModel):
"""Anomalous price events, defined by a day on which a stock's closing price strayed above or
below its Bollinger bands."""
date: Series[pd.Timestamp] = pa.Field(description="Date of price event")
name: Series[str] = pa.Field(description="Ticker symbol of stock")
event: Series[pd.CategoricalDtype] = pa.Field(description="Type of event: 'high' or low'")
AnomalousEventsDgType = pandera_schema_to_dagster_type(AnomalousEvents)
# ****************************************************************************
# ***** FUNCTIONS ************************************************************
DATA_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data"))
SP500_CSV_URL = "https://raw.githubusercontent.com/plotly/datasets/master/all_stocks_5yr.csv"
def normalize_path(path: str) -> str:
return path if path[0] == "/" else os.path.join(DATA_ROOT, path)
def download_file(url: str, path: str):
"""Download a file from a URL to a local path. If relative path, will be resolved relative to `DATA_ROOT`."""
path = normalize_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(requests.get(url).content)
def load_prices_csv(path: str) -> pd.DataFrame:
"""Load a CSV file containing stock prices. CSV should conform to the schema in the
`StockPrices` pandera schema above. If relative path, will be resolved relative to
`DATA_ROOT`."""
path = normalize_path(path)
df = cast(pd.DataFrame, | pd.read_csv(path, parse_dates=["date"]) | pandas.read_csv |
import logging
import pandas as pd
from itertools import tee, izip
from copy import deepcopy
from modules.loggingFunctions import initialize_logging
from modules.amr.aro import ARO_ACCESSIONS
DF_ARO = | pd.DataFrame(ARO_ACCESSIONS) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import (
Period,
Series,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
class TestCombineFirst:
def test_combine_first_period_datetime(self):
# GH#3367
didx = date_range(start="1950-01-31", end="1950-07-31", freq="M")
pidx = period_range(start=Period("1950-1"), end=Period("1950-7"), freq="M")
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_combine_first_name(self, datetime_series):
result = datetime_series.combine_first(datetime_series[:5])
assert result.name == datetime_series.name
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
tm.assert_series_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
assert np.isfinite(combined).all()
tm.assert_series_equal(combined[::2], series[::2])
tm.assert_series_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(np.random.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_series_equal(strings, combined.loc[index[::2]])
tm.assert_series_equal(floats[1::2].astype(object), combined.loc[index[1::2]])
# corner case
ser = Series([1.0, 2, 3], index=[0, 1, 2])
empty = Series([], index=[], dtype=object)
result = ser.combine_first(empty)
ser.index = ser.index.astype("O")
| tm.assert_series_equal(ser, result) | pandas._testing.assert_series_equal |
import pandas as pd
import numpy as np
import networkx as nx
from sklearn.preprocessing import StandardScaler, normalize, MinMaxScaler
import matplotlib.pyplot as plt
from collections import defaultdict, Counter
import urllib.request as request
import json
import os
from scipy.sparse import csr_matrix as csr_matrix
from matplotlib import cm
from tempfile import TemporaryFile
import torch
import torch_geometric
import argparse
from utils import makepath
## Replace function to rename countries for uniformity
def replace_(country_list, replace_dict):
nnn = country_list.copy()
#nnn[i] = a.replace('&','and')
for k, v in replace_dict.items():
for i,a in enumerate(nnn):
if a == k:
nnn[i] = v
return nnn
def makeplot(edge_list, countries_attributes, country_dict, output_dir):
G = nx.Graph() # Define Graph here
G = G.to_undirected()
G.add_weighted_edges_from(edge_list)
pos = nx.spring_layout(G)
A = nx.adjacency_matrix(G)
A = A.todense()
# attr_names = countries_profile.columns[2:]
attr_dict = get_node_attributes(countries_attributes, country_dict)
# attr_dict = set_node_attributes(scaled_data, attr_names)
nx.set_node_attributes(G, attr_dict)
plt.figure(figsize=(20, 12))
nx.draw(G, pos, node_size=400, with_labels=True, edge_color='#C0C0C0')
plt.savefig(output_dir + 'graph_raw.png')
plt.show()
return
# Import data between countries into tuples of countries and edges
def make_directed_edges(data, compare_dict):
data = data.copy()
edges = []
for i in range(len(data)):
c = (compare_dict[str(data.iloc[i,1])], compare_dict[str(data.iloc[i,2])],
round(data.iloc[i,3],2))
edges.append(c)
#edges = sorted(iedges)
return edges
def check_cyclic_edges(edge_list, remove_edges = False):
self_edges = []
new_edge_list = []
idx = []
for i in range(len(edge_list)):
if (edge_list[i][0] == edge_list[i][1]):
#print(edge_list[i])
self_edges.append(edge_list[i])
idx.append(i)
else:
new_edge_list.append(edge_list[i])
if remove_edges:
return new_edge_list, self_edges
else:
return edge_list, self_edges
# Function to make a dictionary of nodes and attributes
def get_node_attributes(attributes, dict_):
attr_names = attributes.columns[1:]
attr_dict = {}
for i in range(len(attributes)):
attr_dict[dict_[attributes.loc[i][0]]] = {attr_names[j]: k for j, k in enumerate(attributes.loc[i][1:])}
return attr_dict
def income_level_dict(income_grp, country_dict):
groups = income_grp.iloc[:,1]
classes = list(set(groups))
c_dict = {}
for c in classes:
l = income_grp[groups== c].iloc[:,0]
c_dict[c] = [country_dict[a] for a in l]
return c_dict
# Function to make a dictionary of nod# Function to make a dictionary of nodes and attributes
def get_node_attributes(attributes, dict_):
attr_names = attributes.columns[1:]
attr_dict = {}
for i in range(len(attributes)):
attr_dict[dict_[attributes.loc[i][0]]] = {attr_names[j]: k for j, k in enumerate(attributes.loc[i][1:])}
return attr_dict
## Read data of countries import and exports with partner countries from directory
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--raw_data_dir", default = '../data', type = str, required = False)
parser.add_argument("--output_dir", default = '../data/processed', type = str, required = False)
parser.add_argument("-makeplot", type = bool, default = True, help = "Plot graph")
args = parser.parse_args()
input_dir = args.raw_data_dir
comtradeurl = os.path.join(input_dir, "comtrade_data")
makepath(args.output_dir)
print("Processing data...")
replace_dict = np.load(input_dir + '/countries_rename.npy', allow_pickle=True).item() # Get dict items from npy file
frames = []
for name in os.listdir(comtradeurl):
a = pd.read_csv(os.path.join(comtradeurl, name))
a = a[['Trade Flow','Reporter','Partner','Trade Value (US$)']]
frames.append(a)
trade = pd.concat(frames, ignore_index=True)
trade = trade.dropna()
HCI_data = pd.read_csv(os.path.join(input_dir, 'HCIcountry.csv'))
c_income_group = HCI_data[['Short Name','Income Group']]
c_income_group = c_income_group.rename(columns = {'Short Name': 'country'})
inc_levels = set(c_income_group['Income Group'])
inc_levels_dict = {i:j for j,i in enumerate(inc_levels)}
countries_attributes = pd.read_csv(os.path.join(input_dir, "country_profile_variables2017.csv"))
countries_attributes = countries_attributes.replace(['~0','~0.0','-~0.0','...'],0)
countries_attributes = countries_attributes.apply(lambda x: pd.to_numeric(x, errors = 'ignore'))
# Create feature dictionary for easy selection
feature_indices_dict = {i:j for i,j in enumerate(list(countries_attributes.columns))}
countries_attributes.iloc[:,2:] = countries_attributes.iloc[:,2:].select_dtypes(exclude = 'object')
countries_attributes = countries_attributes.dropna(axis = 'columns')
countries_attributes = countries_attributes.drop(['Region'], axis = 1)
countries_attributes.head()
cols = countries_attributes.columns[1:]
scaler = StandardScaler()
scaled_data = scaler.fit_transform(countries_attributes.iloc[:,1:])
scaled_data = pd.DataFrame(scaled_data, columns = cols)
countries_attributes.iloc[:,1:] = scaled_data
countries_attributes.head()
#----------------------------------------------------------------------------------
countries_distances = pd.read_csv(os.path.join(input_dir,"countries_distances.csv"))
countries_distances = countries_distances.rename(columns = {'pays1':'country1', 'pays2':'country2'})
countries_distances = countries_distances[['country1','country2','dist']]
countries_names = list(countries_distances['country1'])
countries_distances.head()
#-----------------------------------------------------------------------------------
dat1 = list(countries_attributes['country'])
dat2 = list(c_income_group['country'])
dat3 = list(set(countries_distances['country1']))
dat3_1 = list(countries_distances['country1'])
dat3_2 = list(countries_distances['country2'])
dat1 = replace_(dat1, replace_dict)
dat2 = replace_(dat2, replace_dict)
dat3 = replace_(dat3, replace_dict)
dat3_1 = replace_(dat3_1, replace_dict)
countries_attributes['country'] = dat1
c_income_group['country'] = dat2
countries_distances['country1'] = dat3_1
countries_distances['country2'] = dat3_2
countries_attributes = countries_attributes.drop_duplicates(subset = 'country', inplace = False)
#----------------------------------------------------------------------------------------
# [print(i) for i in c_income_group['country']]
common_countries = [] # Countries found in all three lists of countries
c1_nc23 = [] # countries found in c1 but not in c2 and c3
ncm123 = []
c2_nc13 = [] # countries found in c2 but not in c1 and c3
c3_nc12 = [] # countries found in c3 but not in c1 and c2
for c in dat1:
if c in dat2 and c in dat3:
common_countries.append(c)
else:
ncm123.append(c)
for c in dat2:
if c in dat1 and c in dat3:
pass
else:
c2_nc13.append(c)
for c in dat3:
if c in dat1 and c in dat2:
pass
else:
c3_nc12.append(c)
print(len(common_countries))
#-----------------------------------------------------------------------------------------
## Make a dictionary of countries and their given codes as keys for easy reference
country_dict = {j:i for i, j in enumerate(sorted(set(common_countries)))}
#country_dict
#----------------------------------------------------------------------------------------
# Select countries with names or data appearing in each of the datasets
countries_attributes = countries_attributes[countries_attributes['country'].isin(common_countries)].reset_index(drop =True)
c_income_group = c_income_group[c_income_group['country'].isin(common_countries)]
countries_dists = countries_distances[countries_distances['country1'].isin(common_countries)]
countries_dists = countries_dists[countries_dists['country2'].isin(common_countries)]
#--------------------------------------------------------------------------
cdist = countries_dists.copy()
edge_list = []
for i in range(len(cdist)):
c = (country_dict[str(cdist.iloc[i, 0])], country_dict[str(cdist.iloc[i, 1])],
round(cdist.iloc[i, 2], 2))
edge_list.append(c)
edge_list = sorted(edge_list)
# edge_list
#------------------------------------------------------------------------------
edges_dists = | pd.DataFrame(edge_list) | pandas.DataFrame |
"""
Hold pandas dataframe of given excel sheet
Performs various read operations which all return numpy arrays
"""
import numpy as np
import pandas as pd
def clean_vector(x):
return x[~np.isnan(x)]
class DataHandler:
def __init__(self, filepath):
self._path = filepath
self._df = pd.read_excel(self._path)
def get_columns(self, column_names):
res = []
for col_name in column_names:
col = clean_vector(self._df[col_name].to_numpy())
res.append(col)
return tuple(res)
def add_column_to_excel(self, name, values):
self._df[name] = pd.Series(values)
self._df.to_excel(self._path)
self._df = pd.read_excel(self._path)
def drop_first_n_rows(self, n):
self._df = self._df.iloc[n+1:, :]
def save_df_back_to_excel(self):
self._df = | pd.read_excel(self._path) | pandas.read_excel |
'''reports details about a virtual boinc farm'''
# standard library modules
import argparse
import collections
#import contextlib
#from concurrent import futures
#import errno
import datetime
#import getpass
#import json
import logging
#import math
#import os
#import re
#import socket
#import shutil
#import signal
import socket
#import subprocess
import sys
#import threading
#import time
import urllib
#import uuid
# third-party module(s)
import dateutil.parser
import lxml
import pandas as pd
import pymongo
import requests
# neocortix module(s)
import devicePerformance
import ncs
logger = logging.getLogger(__name__)
def anyFound( a, b ):
''' return true iff any items from iterable a is found in iterable b '''
for x in a:
if x in b:
return True
return False
def datetimeIsAware( dt ):
if not dt: return None
return (dt.tzinfo is not None) and (dt.tzinfo.utcoffset( dt ) is not None)
def universalizeDateTime( dt ):
if not dt: return None
if datetimeIsAware( dt ):
#return dt
return dt.astimezone(datetime.timezone.utc)
return dt.replace( tzinfo=datetime.timezone.utc )
def interpretDateTimeField( field ):
if isinstance( field, datetime.datetime ):
return universalizeDateTime( field )
elif isinstance( field, str ):
return universalizeDateTime( dateutil.parser.parse( field ) )
else:
raise TypeError( 'datetime or parseable string required' )
def isNumber( sss ):
try:
float(sss)
return True
except ValueError:
return False
def instanceDpr( inst ):
#logger.info( 'NCSC Inst details %s', inst )
# cpuarch: string like "aarch64" or "armv7l"
# cpunumcores: int
# cpuspeeds: list of floats of length cpunumcores, each representing a clock frequency in GHz
# cpufamily: list of strings of length cpunumcores
cpuarch = inst['cpu']['arch']
cpunumcores = len( inst['cpu']['cores'])
cpuspeeds = []
cpufamily = []
for core in inst['cpu']['cores']:
cpuspeeds.append( core['freq'] / 1e9)
cpufamily.append( core['family'] )
dpr = devicePerformance.devicePerformanceRating( cpuarch, cpunumcores, cpuspeeds, cpufamily )
return dpr
def getStartedInstances( db ):
collNames = db.list_collection_names( filter={ 'name': {'$regex': r'^launchedInstances_.*'} } )
#logger.info( 'launched collections: %s', collNames )
startedInstances = []
for collName in collNames:
#logger.info( 'getting instances from %s', collName )
launchedColl = db[collName]
inRecs = list( launchedColl.find( {},
{'device-id': 1, 'cpu': 1, 'instanceId': 1, 'state': 1 })
) # fully iterates the cursor, getting all records
if len(inRecs) <= 0:
logger.warn( 'no launched instances found in %s', collName )
for inRec in inRecs:
if 'instanceId' not in inRec:
logger.warning( 'no instance ID in input record')
if 'dpr' in inRec:
dpr = inRec['dpr']
if dpr < 24:
logger.info( 'low dpr %.1f %s', dpr, inRec )
else:
dpr = instanceDpr( inRec )
if dpr < 24:
logger.info( 'low dpr computed %.1f %s', dpr, inRec )
inRec['dpr'] = round( dpr )
startedInstances.extend( [inst for inst in inRecs if inst['state'] in ['started', 'stopped'] ] )
return startedInstances
def getInstallerRecs( db ):
instRecs = {}
colls = db.list_collection_names( filter={ 'name': {'$regex': r'^startBoinc_.*'} } )
colls = sorted( colls, reverse=False )
for collName in colls:
found = db[collName].find( {"instanceId": {"$ne": "<master>"} } )
for event in found:
iid = event['instanceId']
if anyFound( ['exception', 'returncode', 'timeout'], event ):
if iid in instRecs:
logger.warning( 'alread had instRec for %s', iid )
if 'exception' in event:
event['status'] = 'exception'
event['exceptionType'] = event['exception']['type'] # redundant
elif 'returncode' in event:
event['status'] = 'failed' if event['returncode'] else 'ok'
elif 'timeout' in event:
event['status'] = 'timeout'
instRecs[iid] = event
return instRecs
def parseTaskLines( lines ):
tasks = []
curTask = {}
firstLine = True
for line in lines:
line = line.rstrip()
if not line:
continue
if firstLine and '== Tasks ==' in line:
continue
if line[0] != ' ':
#logger.info( 'task BOUNDARY %s', line )
numPart = line.split( ')' )[0]
taskNum = int(numPart)
#logger.info( 'TASK %d', taskNum )
curTask = { 'num': taskNum }
tasks.append( curTask )
continue
if ':' in line:
# extract a key:value pair from this line
stripped = line.strip()
parts = stripped.split( ':', 1 ) # only the first colon will be significant
# convert to numeric or None type, if appropriate
val = parts[1].strip()
if val is None:
pass
elif val.isnumeric():
val = int( val )
elif isNumber( val ):
val = float( val )
# store the value
curTask[ parts[0] ] = val
continue
logger.info( '> %s', line )
return tasks
def collectTaskMetrics( db ):
allTasks = | pd.DataFrame() | pandas.DataFrame |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="A", year=2007)
ival_Deoq_to_AJAN = | Period(freq="A-JAN", year=2008) | pandas.Period |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.util.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import cudf
from cudf.core import DataFrame, Series
from cudf.core.index import DatetimeIndex
from cudf.tests.utils import NUMERIC_TYPES, assert_eq
def data1():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def data2():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def timeseries_us_data():
return pd.date_range(
"2019-07-16 00:00:00",
"2019-07-16 00:00:01",
freq="5555us",
name="times",
)
def timestamp_ms_data():
return pd.Series(
[
"2019-07-16 00:00:00.333",
"2019-07-16 00:00:00.666",
"2019-07-16 00:00:00.888",
]
)
def timestamp_us_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333",
"2019-07-16 00:00:00.666666",
"2019-07-16 00:00:00.888888",
]
)
def timestamp_ns_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333333",
"2019-07-16 00:00:00.666666666",
"2019-07-16 00:00:00.888888888",
]
)
def numerical_data():
return np.arange(1, 10)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
@pytest.mark.parametrize("data", [data1(), data2()])
def test_series(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
assert_eq(pd_data, gdf_data)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]"))
assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]"))
assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2)
assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2)
assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2)
assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2)
assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
np_data_1 = np.array(pd_data_1).astype(lhs_dtype)
np_data_2 = np.array(pd_data_2).astype(rhs_dtype)
np.testing.assert_equal(np_data_1, gdf_data_1.to_array())
np.testing.assert_equal(np_data_2, gdf_data_2.to_array())
np.testing.assert_equal(
np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_array()
)
np.testing.assert_equal(
np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_array()
)
np.testing.assert_equal(
np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_array()
)
np.testing.assert_equal(
np.less_equal(np_data_1, np_data_2),
(gdf_data_1 <= gdf_data_2).to_array(),
)
np.testing.assert_equal(
np.greater_equal(np_data_1, np_data_2),
(gdf_data_1 >= gdf_data_2).to_array(),
)
@pytest.mark.parametrize("data", [data1(), data2()])
def test_dt_ops(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(data.copy())
assert_eq(pd_data == pd_data, gdf_data == gdf_data)
assert_eq(pd_data < pd_data, gdf_data < gdf_data)
assert_eq(pd_data > pd_data, gdf_data > gdf_data)
# libgdf doesn't respect timezones
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_series(data, field):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
base = getattr(pd_data.dt, field)
test = getattr(gdf_data.dt, field).to_pandas().astype("int64")
assert_series_equal(base, test)
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_index(data, field):
pd_data = data.copy()
gdf_data = DatetimeIndex(pd_data)
assert_index_equal(
getattr(gdf_data, field).to_pandas(), getattr(pd_data, field)
)
def test_setitem_datetime():
df = DataFrame()
df["date"] = pd.date_range("20010101", "20010105").values
assert np.issubdtype(df.date.dtype, np.datetime64)
def test_sort_datetime():
df = pd.DataFrame()
df["date"] = np.array(
[
np.datetime64("2016-11-20"),
np.datetime64("2020-11-20"),
np.datetime64("2019-11-20"),
np.datetime64("1918-11-20"),
np.datetime64("2118-11-20"),
]
)
df["vals"] = np.random.sample(len(df["date"]))
gdf = cudf.from_pandas(df)
s_df = df.sort_values(by="date")
s_gdf = gdf.sort_values(by="date")
assert_eq(s_df, s_gdf)
def test_issue_165():
df_pandas = pd.DataFrame()
start_date = dt.datetime.strptime("2000-10-21", "%Y-%m-%d")
data = [(start_date + dt.timedelta(days=x)) for x in range(6)]
df_pandas["dates"] = data
df_pandas["num"] = [1, 2, 3, 4, 5, 6]
df_cudf = DataFrame.from_pandas(df_pandas)
base = df_pandas.query("dates==@start_date")
test = df_cudf.query("dates==@start_date")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date
base_mask = df_pandas.dates == start_date
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_ts = pd.Timestamp(start_date)
test = df_cudf.query("dates==@start_date_ts")
base = df_pandas.query("dates==@start_date_ts")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_ts
base_mask = df_pandas.dates == start_date_ts
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_np = np.datetime64(start_date_ts, "ns")
test = df_cudf.query("dates==@start_date_np")
base = df_pandas.query("dates==@start_date_np")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_np
base_mask = df_pandas.dates == start_date_np
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
def test_typecast_from_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(dtype)
gdf_casted = gdf_data.astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_int64_to_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(np.int64).astype(dtype)
gdf_casted = gdf_data.astype(np.int64).astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [timeseries_us_data()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_different_datetime_resolutions(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data).astype(dtype)
gdf_series = Series(pd_data).astype(dtype)
np.testing.assert_equal(np_data, gdf_series.to_array())
@pytest.mark.parametrize(
"data", [timestamp_ms_data(), timestamp_us_data(), timestamp_ns_data()]
)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_string_timstamp_typecast_to_different_datetime_resolutions(
data, dtype
):
pd_sr = data
gdf_sr = cudf.Series.from_pandas(pd_sr)
expect = pd_sr.values.astype(dtype)
got = gdf_sr.astype(dtype).values_host
np.testing.assert_equal(expect, got)
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_data.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_from_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype).astype(from_dtype)
gdf_casted = gdf_data.astype(to_dtype).astype(from_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize(
"from_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_col = Series(np_data)._column
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_col.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("nulls", ["some", "all"])
def test_to_from_pandas_nulls(data, nulls):
pd_data = pd.Series(data.copy().astype("datetime64[ns]"))
if nulls == "some":
# Fill half the values with NaT
pd_data[list(range(0, len(pd_data), 2))] = np.datetime64("nat", "ns")
elif nulls == "all":
# Fill all the values with NaT
pd_data[:] = np.datetime64("nat", "ns")
gdf_data = Series.from_pandas(pd_data)
expect = pd_data
got = gdf_data.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_to_arrow(dtype):
timestamp = (
cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={}
)
.reset_index()["timestamp"]
.reset_index(drop=True)
)
gdf = DataFrame({"timestamp": timestamp.astype(dtype)})
assert_eq(gdf, DataFrame.from_arrow(gdf.to_arrow(preserve_index=False)))
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize(
"nulls", ["none", pytest.param("some", marks=pytest.mark.xfail)]
)
def test_datetime_unique(data, nulls):
psr = pd.Series(data)
print(data)
print(nulls)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.unique()
got = gsr.unique()
assert_eq(pd.Series(expected), got.to_pandas())
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_datetime_nunique(data, nulls):
psr = pd.Series(data)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.nunique()
got = gsr.nunique()
assert_eq(got, expected)
testdata = [
(
Series(
["2018-01-01", None, "2019-01-31", None, "2018-01-01"],
dtype="datetime64[ms]",
),
True,
),
(
Series(
[
"2018-01-01",
"2018-01-02",
"2019-01-31",
"2018-03-01",
"2018-01-01",
],
dtype="datetime64[ms]",
),
False,
),
(
Series(
np.array(
["2018-01-01", None, "2019-12-30"], dtype="datetime64[ms]"
)
),
True,
),
]
@pytest.mark.parametrize("data, expected", testdata)
def test_datetime_has_null_test(data, expected):
pd_data = data.to_pandas()
count = pd_data.notna().value_counts()
expected_count = 0
if False in count.keys():
expected_count = count[False]
assert_eq(expected, data.has_nulls)
assert_eq(expected_count, data.null_count)
def test_datetime_has_null_test_pyarrow():
data = Series(
pa.array(
[0, np.iinfo("int64").min, np.iinfo("int64").max, None],
type=pa.timestamp("ns"),
)
)
expected = True
expected_count = 1
assert_eq(expected, data.has_nulls)
assert_eq(expected_count, data.null_count)
def test_datetime_dataframe():
data = {
"timearray": np.array(
[0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]"
)
}
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame(data)
assert_eq(pdf, gdf)
assert_eq(pdf.dropna(), gdf.dropna())
assert_eq(pdf.isnull(), gdf.isnull())
data = np.array([0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]")
gs = cudf.Series(data)
ps = pd.Series(data)
assert_eq(ps, gs)
assert_eq(ps.dropna(), gs.dropna())
assert_eq(ps.isnull(), gs.isnull())
@pytest.mark.parametrize(
"data",
[
None,
[],
pd.Series([]),
pd.Index([]),
pd.Series([1, 2, 3]),
pd.Series([0, 1, -1]),
pd.Series([0, 1, -1, 100.3, 200, 47637289]),
pd.Series(["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"]),
[1, 2, 3, 100, -123, -1, 0, 1000000000000679367],
pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}),
pd.DataFrame(
{"year": ["2015", "2016"], "month": ["2", "3"], "day": [4, 5]}
),
pd.DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0.5],
},
index=["a", "b"],
),
pd.DataFrame(
{
"year": [],
"month": [],
"day": [],
"minute": [],
"second": [],
"hour": [],
},
),
["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"],
pd.Index([1, 2, 3, 4]),
pd.DatetimeIndex(
["1970-01-01 00:00:00.000000001", "1970-01-01 00:00:00.000000002"],
dtype="datetime64[ns]",
freq=None,
),
pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None,),
pd.Series([1, 2, 3]).astype("datetime64[ns]"),
pd.Series([1, 2, 3]).astype("datetime64[us]"),
pd.Series([1, 2, 3]).astype("datetime64[ms]"),
pd.Series([1, 2, 3]).astype("datetime64[s]"),
pd.Series([1, 2, 3]).astype("datetime64[D]"),
1,
100,
17,
53.638435454,
np.array([1, 10, 15, 478925, 2327623467]),
np.array([0.3474673, -10, 15, 478925.34345, 2327623467]),
],
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("infer_datetime_format", [True, False])
def test_cudf_to_datetime(data, dayfirst, infer_datetime_format):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
if type(pd_data).__module__ == np.__name__:
gd_data = cp.array(pd_data)
else:
gd_data = pd_data
expected = pd.to_datetime(
pd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format
)
actual = cudf.to_datetime(
gd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format
)
assert_eq(actual, expected)
@pytest.mark.parametrize(
"data",
[
"2",
["1", "2", "3"],
["1/1/1", "2/2/2", "1"],
pd.DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0],
"blablacol": [1, 1],
}
),
pd.DataFrame(
{
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0],
}
),
],
)
def test_to_datetime_errors(data):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
gd_data = pd_data
try:
pd.to_datetime(pd_data)
except Exception as e:
with pytest.raises(type(e), match=re.escape(str(e))):
cudf.to_datetime(gd_data)
else:
raise AssertionError("Was expecting `pd.to_datetime` to fail")
def test_to_datetime_not_implemented():
with pytest.raises(NotImplementedError):
cudf.to_datetime([], exact=False)
with pytest.raises(NotImplementedError):
cudf.to_datetime([], origin="julian")
with pytest.raises(NotImplementedError):
cudf.to_datetime([], yearfirst=True)
@pytest.mark.parametrize(
"data",
[
1,
[],
pd.Series([]),
pd.Index([]),
pd.Series([1, 2, 3]),
pd.Series([1, 2.4, 3]),
pd.Series([0, 1, -1]),
pd.Series([0, 1, -1, 100, 200, 47637]),
[10, 12, 1200, 15003],
pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None,),
| pd.Index([1, 2, 3, 4]) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test=pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1)
#determine corr
corr=dataset_int1.corr()
f,ax=plt.subplots(figsize=(7,5))
sns.heatmap(corr,mask=np.zeros_like(corr,dtype=np.bool),cmap=sns.diverging_palette(220,10,as_cmap=True),square=True,ax=ax)
# """feature engineering"""
#1.we will derive new features from pickup_datetime variable
#new features will be year,month,day_of_week,hour
dataset_train1=pd.concat([dataset_int1,dataset_train["passenger_count"]],axis=1)
dataset_train2=pd.concat([dataset_train1,dataset_train["pickup_datetime"]],axis=1)
#dataset_train2.isna().sum()
data=[dataset_train2,dataset_test]
for i in data:
i["year"]=i["pickup_datetime"].apply(lambda row:row.year)
i["month"]=i["pickup_datetime"].apply(lambda row:row.month)
i["day_of_week"] = i["pickup_datetime"].apply(lambda row: row.dayofweek)
i["hour"] = i["pickup_datetime"].apply(lambda row: row.hour)
# train2_nodummies=dataset_train2.copy()
# dataset_train2=train2_nodummies.copy()
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2["year"])
# plt.savefig('year.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['month'])
# plt.savefig('month.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['day_of_week'])
# plt.savefig('day_of_week.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['hour'])
# plt.savefig('hour.png')
plt.show
#Now we will use month,day_of_week,hour to derive new features like sessions in a day,seasons in a year,week:weekend/weekday
# for sessions in a day using hour columns
def f(x):
if(x>=5) and (x<=11):
return "morning"
elif (x>=12) and (x<=16):
return "afternoon"
elif (x>=17) and (x<=20):
return "evening"
elif (x>=21) and (x<=23):
return "night pm"
elif (x>=0) and (x<=4):
return "night am"
dataset_train2["sessions"]=dataset_train2["hour"].apply(f)
dataset_test['session'] = dataset_test['hour'].apply(f)
#for seasons in a year using month column
def g(x):
if (x>=3) and (x<=5):
return "spring"
elif (x>=6) and (x<=8):
return "summer"
elif (x>=9) and (x<=11):
return "fall"
else :
return "winter"
dataset_train2['seasons'] = dataset_train2['month'].apply(g)
dataset_test['seasons'] = dataset_test['month'].apply(g)
#for week / weekend in a day of week columns
def h(x):
if (x>=0) and (x<=4):
return "weekday"
elif (x>=5) and (x<=6):
return "weekend"
dataset_train2['week'] = dataset_train2['day_of_week'].apply(h)
dataset_test['week'] = dataset_test['day_of_week'].apply(h)
dataset_train2['passenger_count'].describe()
dataset_train2.isnull().sum()
dataset_test.isna().sum()
#creating dummy varibale
temp=pd.get_dummies(dataset_train2["passenger_count"],prefix="passenger_count")
dataset_train2=dataset_train2.join(temp)
temp = pd.get_dummies(dataset_test['passenger_count'], prefix = 'passenger_count')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_test['seasons'], prefix = 'seasons')
dataset_test = dataset_test.join(temp)
temp=pd.get_dummies(dataset_train2["seasons"],prefix = "season" )
dataset_train2= | pd.concat([dataset_train2,temp],axis=1) | pandas.concat |
import os
from typing import List
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
from typing import Optional
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from rich import print
WORKING_DIRECTORY = os.path.dirname(__file__)
def generate_expression_table(
adata,
cluster: str = "all",
subset_by: str = "cell_type",
xlabel: str = "days",
hue: str = None,
use_raw: bool = None,
):
"""
Args:
adata: Anndata object
cluster: Which label of the subsets to generate the table for. Use 'all' if for all subsets.
subset_by: Which label to subset the clusters by
xlabel: x-axis
hue: Value to color by
use_raw: Whether to use adata.raw.X for the calculations
Returns:
Gene expression table
"""
if cluster == "all":
cells = adata.obs_names
else:
cells = [True if val in cluster else False for val in adata.obs[subset_by]]
if use_raw:
gen_expression_table = pd.DataFrame(
adata[cells].raw.X.todense(), index=adata[cells].obs_names, columns=adata[cells].raw.var_names
)
else:
gen_expression_table = pd.DataFrame(
adata[cells].X, index=adata[cells].obs_names, columns=adata[cells].var_names
)
gen_expression_table["identifier"] = adata[cells].obs["identifier"]
gen_expression_table[xlabel] = adata[cells].obs[xlabel]
if hue:
# For multiple cluster, split internally per condition
if isinstance(cluster, list) and len(cluster) > 1 and subset_by != hue:
gen_expression_table[hue] = [f"{t}_{c}" for t, c in zip(adata[cells].obs[hue], adata[cells].obs[subset_by])]
else:
gen_expression_table[hue] = adata[cells].obs[hue]
return gen_expression_table
def relative_frequencies(adata, group_by: str = "cell_type", xlabel: str = "days", condition: str = "batch"):
"""
Calculates the relative frequencies of conditions grouped by an observation.
Args:
adata: AnnData Objet containing the data
group_by:
xlabel: x-axis label
condition:
Returns:
Relative frequencies in a Pandas DataFrame
"""
freqs = adata.obs.groupby(["identifier", group_by]).size()
samples = np.unique(adata.obs["identifier"])
ind = adata.obs[group_by].cat.categories
relative_frequencies = [freqs[ident] / sum(freqs[ident]) for ident in samples]
relative_frequencies = pd.DataFrame(relative_frequencies, columns=ind, index=samples).fillna(0)
# relFreqs[xlabel] = grouping.loc[samples, xlabel] ## when using Grouping Table
cell_types = {}
combis = adata.obs.groupby(["identifier", xlabel]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[xlabel] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
# Todo, add for condition
if condition:
combis = adata.obs.groupby(["identifier", condition]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def relative_frequency_per_cluster(adata, group_by: str = "cell_type", xlabel: str = "days", condition=None):
"""
Calculates relative frequencies per cluster
Args:
adata: AnnData object containing the data
group_by: The label to group by for the clusters
xlabel: x-axis label
condition: condition to combine by
Returns:
Pandas DataFrame of relative frequencies
"""
frequencies = adata.obs.groupby([group_by, xlabel]).size()
celltypes = np.unique(adata.obs[group_by])
ind = adata.obs[xlabel].cat.categories
relative_frequencies = [frequencies[ident] / sum(frequencies[ident]) for ident in celltypes]
relative_frequencies = pd.DataFrame(relative_frequencies, columns=ind, index=celltypes).fillna(0)
cell_types = {}
combinations = adata.obs.groupby([group_by, xlabel]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[group_by] = relative_frequencies.index # type: ignore
# Todo, add for condition
if condition:
combinations = adata.obs.groupby([group_by, condition]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def correlate_to_signature(
adata,
marker: pd.DataFrame,
log_fc_threshold: float = 0.7,
cell_type: str = "AT2 cells",
cell_type_label: str = "cell_type",
log_fc_label: str = "logfoldchange",
gene_label: str = "gene",
use_raw: bool = True,
):
"""
Correlations Score (based on cell type signature (logFC)) - alternative to sc.tl.score
Args:
adata: AnnData object containing the data
marker: Pandas DataFrame containing marker genes
log_fc_threshold: Log fold change label
cell_type: Cell type to calculate the correlation for
cell_type_label: Label of all cell types in the AnnData object
log_fc_label: Label of fold change in the AnnData object
gene_label: Label of genes in the AnnData object
use_raw: Whether to use adata.raw.X
Returns:
List of correlations
"""
from scipy.sparse import issparse
topmarker = marker[marker.loc[:, cell_type_label] == cell_type]
topmarker = topmarker.loc[topmarker.loc[:, log_fc_label] > log_fc_threshold, [gene_label, log_fc_label]]
gene_names = list(np.intersect1d(adata.var_names, topmarker.loc[:, gene_label].astype(str)))
topmarker = topmarker[topmarker.loc[:, gene_label].isin(gene_names)]
print(f"[bold blue]{len(gene_names)} genes used for correlation score to {cell_type}")
if use_raw:
if issparse(adata.raw.X):
gene_expression = adata.raw[:, gene_names].X.todense()
else:
gene_expression = adata.raw[:, gene_names].X
else:
if issparse(adata.X):
gene_expression = adata[:, gene_names].X.todense()
else:
gene_expression = adata[:, gene_names].X
gene_expression = pd.DataFrame(gene_expression.T, index=gene_names)
# For each cell separately
gene_expression = pd.DataFrame.fillna(gene_expression, value=0)
res = [
np.correlate(topmarker.loc[:, log_fc_label], gene_expression.iloc[:, c])[0]
for c in range(gene_expression.shape[1])
]
return res
def remove_outliers(cords, eps: int = 1, min_samples: int = 2):
"""
Remove outlying cells based on UMAP embeddings with DBScan (density based clustering)
Call as: sub.obs["d_cluster"] = remove_outliers(sub.obsm["X_umap"], min_samples = 10)
Args:
cords: adata UMAP coordinates, typically adata.obsm["X_umap"]
eps: Maximum distance between two clusters to still be considered neighbors
min_samples: Minimum samples of a cluster
Returns:
Pandas DataFrame of clusters
"""
from natsort import natsorted
from sklearn.cluster import DBSCAN
clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(cords)
cluster = clustering.labels_.astype("U")
return pd.Categorical(cluster, categories=natsorted(np.unique(cluster)))
def add_percentages(adata, table, ids, group_by: str, threshold: int = 0, gene_label: str = "gene"):
"""
Add columns to existing diffxpy table specifying percentage of expressing cells
Args:
adata: AnnData object containing the data
table: Table as generated by diffxpy
ids:
group_by: Label to group by
threshold:
gene_label: Label of the genes
Returns:
Table containing percentage of expressing cells
"""
for ident in ids:
cells = adata.obs_names[adata.obs[group_by] == ident]
data_temp = pd.DataFrame(
((adata[cells].layers["counts"] > threshold).sum(0) / adata[cells].layers["counts"].shape[0]).T,
index=adata.var_names,
)
if gene_label == "index":
table[f"pct.{ident}s"] = data_temp.reindex(table.index.values).values
else:
table[f"pct.{ident}s"] = data_temp.reindex(table.loc[:, gene_label]).values
return table
def ranksums_between_groups(
table, id1: str = "bystander", id2: str = "infected", xlabel: str = "condition", cells=None, score: str = "Axin2"
):
"""
Perform Wilcoxon Rank-sum test between two groups.
Args:
table:
id1:
id2:
xlabel: x-axis label
cells:
score:
Returns:
Pandas DataFrame containing test statistic and p-value
"""
from scipy import stats
if cells is not None:
table = table.loc[cells].copy()
group1 = table[table.loc[:, xlabel] == id1].copy()
group2 = table[table.loc[:, xlabel] == id2].copy()
t, p = stats.ranksums(group1.loc[:, score], group2.loc[:, score])
result = pd.DataFrame(columns=["wilcoxon_ranksum", "pval"])
result.loc[0] = [t, p]
return result
def generate_count_object(
adata,
hue: str = "disease",
cell_type_label: str = "cell_type",
cell_type: List[str] = None,
min_samples: int = 2,
min_cells: int = 5,
ref: str = "healthy",
subset: List[str] = None,
layer: str = "counts",
outliers_removal: bool = False,
):
"""
@Meshal what is this really supposed to do?
Args:
adata: AnnData object
hue: Value to color by
cell_type_label: Label containing cell types
cell_type: Cells type to generate counts for
min_samples: Minimum samples for outlier removal with DBScan
min_cells: Minimal number of cells
ref:
subset:
layer:
outliers_removal: Whether to remove outliers or not
Returns:
AnnData object containing counts
Example Call:
subset = ['3d PI-KO', '3d PI-WT']
raw_counts = generate_count_object(adata,
condition = "grouping",
cell_type_label = "celltype_refined", cell_type = ["AT2"],
ref = "3d PI-WT",
subset = subset)
"""
adata_subset = adata[adata.obs.grouping.isin(subset)]
cells = [
True if (adata_subset.obs[cell_type_label][i] in cell_type) else False for i in range(adata_subset.n_obs) # type: ignore
]
# Raw count data for diffxpy
obs = adata_subset[cells].obs.copy()
var = adata_subset.var_names.copy()
adata_raw = sc.AnnData(X=adata_subset[cells].layers[layer].copy())
adata_raw.obs = obs
adata_raw.var.index = var
adata_raw.obsm = adata_subset[cells].obsm.copy()
# Also automate tidy up with DBScan :)
if outliers_removal:
adata_raw.obs["dcluster"] = remove_outliers(adata_raw.obsm["X_umap"], min_samples=min_samples)
sc.pl.umap(adata_raw, color=[hue, "dcluster"])
adata_raw = adata_raw[adata_raw.obs.dcluster == "0"].copy()
sc.pp.filter_genes(adata_raw, min_cells=min_cells)
# Set reference as first column
adata_raw.obs.loc[:, hue].cat.reorder_categories([ref, np.setdiff1d(subset, ref)[0]], inplace=True)
pal = adata_subset.uns[f"{hue}_colors"]
sc.pl.umap(adata_raw, color=[hue], palette=list(pal))
return adata_raw
def tidy_de_table(de_test, adata, cells, ids=None, qval_thresh: float = 0.9, group_by: str = "treatment", cols=None):
"""
Sorts diffxpy de table and adds percentages of expression per group
Args:
de_test: diffxpy de test
adata: AnnData object
cells:
ids:
qval_thresh:
group_by:
cols:
Returns:
Pandas Dataframe of diffxpy table with percentages
"""
result = de_test.summary().sort_values(by=["qval"], ascending=True)
result = result[result.qval < qval_thresh].loc[:, cols].copy()
# Add percentages
result = add_percentages(adata[cells], result, ids=ids, group_by=group_by)
return result
def correlate_means_to_gene(means: pd.DataFrame, corr_gene: str = "EOMES"):
"""
Calculate gene to gene correlation based on a mean expression table
Args:
means:
corr_gene:
Returns:
Pandas DataFrame of correlations
"""
import scipy.stats
genes = means.columns.values
cors = pd.DataFrame(index=genes, columns=["spearman_corr", "pvalue"])
# tab = sc.get.obs_df(sub, keys = [corr_gene], layer = None, use_raw = True)
table = means.loc[:, [corr_gene]].values
# Loop over all genes.
for gene in genes:
tmp = scipy.stats.spearmanr(table, means.loc[:, [gene]]) # Spearman's rho
cors.loc[gene, :] = tmp[0:2]
cors.dropna(axis=0, inplace=True)
cors.sort_values("spearman_corr", ascending=False, inplace=True)
return cors
def extended_marker_table(
adata: AnnData,
qval_thresh: float = 0.05,
cell_type_label: str = "cell_type",
gene_ranks_key: str = "rank_genes_groups",
):
"""
Generates an extended marker table with cell types and percentages of expressed cell types per cluster.
Run scanpy.tl.rank_genes_groups before using this function.
Args:
adata: AnnData object containing ranked genes
qval_thresh: Threshold to filter the log fold change for
cell_type_label: Label containing all cell types
gene_ranks_key: Key for the ranked gene groups (generated by sc.tl.rank_genes_groups)
Returns:
A Pandas DataFrame
"""
result = adata.uns[gene_ranks_key]
all_markers = []
for cluster in result["names"].dtype.names:
current = pd.DataFrame(
{
"gene": result["names"][cluster],
"score": result["scores"][cluster],
"log_FC": result["logfoldchanges"][cluster],
"pval": result["pvals"][cluster],
"pval_adj": result["pvals_adj"][cluster],
"cell_type": cluster,
}
)
# Add percentage expressed per cell type
adata.obs["group"] = ["within" if ct == cluster else "outside" for ct in adata.obs.loc[:, cell_type_label]]
current = add_percentages(adata, table=current, group_by="group", gene_label="gene", ids=["within", "outside"])
all_markers.append(current)
all_markers_df = | pd.concat(all_markers) | pandas.concat |
'''
Author: <NAME>
Date: May 1, 2019
Course: ISTA355
Final Project
This file contains all the functions used for my ISTA355 Final Project.
The purpose of the file is to accomplish the task of incorporating question
answering features to a classifier in order to replicate a open ended question
answer model or search engine. For more information, please read the write-up.
'''
import os
import pandas as pd
import numpy as np
import nltk
import math
import progressbar
import string
from collections import Counter
from operator import itemgetter
import gc
import random
import pickle
def WikiData_to_DF():
'''
This function loads the Wikipedia DataFrame and dictionary with words as its keys
mapped to the number of pages they appear on from pickle files created beforehand
and returns these two objects.
Parameter: None
Returns: a Wikipedia DataFrame containining the titles, text, and weighed vectors
of wikipedia pages, and dictionary with words as its keys mapped to the number
of pages they appear on
'''
with open('total_dict.pickle', 'rb') as handle:
total_dict = pickle.load(handle)
with open('Wiki_DF.pickle', 'rb') as handle:
output = pickle.load(handle)
return output, total_dict
def create_wikiData(txt_data_dir):
'''
This function reads through a given directory, its subdirectories and files,
reading each file and parsing the file into a large Wikipedia DataFrame, with
the page titles as its index, and having columns for the textual content of the
page along with its weighed (TF-IDF) vectors of that textual content. A dictionary
with keys that are words mapped to the number of pages the words appear on is also
created to help with the TF-IDF weighing. The dataframe and dictionary are saved into
pickle files.
Parameters:
txt_data_dir - a string representing the file path to the parsed
Wikipedia page dump directory
Returns: None
'''
if (os.path.isdir(txt_data_dir)) == False:
print("Error: parsed data file not found or incorrect filename")
print("*Extracting WikiPages into a DataFrame")
wiki_file_dirs = []
for walk in os.walk(txt_data_dir): # os.walk() returns (dir, subdirs, filenames)
for filename in walk[2]:
wiki_file_dirs.append(walk[0]+'/'+filename)
wiki_file_dirs.pop(0) #remove DS_store directory
index = []
cols = ["text_content"]
data = []
for file in wiki_file_dirs:
in_file = open(file, 'r')
for line in in_file:
if line[:4] == "<doc":
doc_info = line.split('\"')
#url = doc_info[3]
title = doc_info[5]
index.append(title)
temp = [""]
elif line[:5] == "</doc":
data.append(temp)
else:
temp[0] += line
in_file.close()
output = | pd.DataFrame(data, index=index, columns=cols) | pandas.DataFrame |
import pandas as pd
import os
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
SYR = 2011 # calendar year used to normalize factors
BEN_SYR = 2014 # calendar year used just for the benefit start year
EYR = 2030 # last calendar year we have data for
SOI_YR = 2014 # most recently available SOI estimates
# define constants for the number refers total population,
# dependent age upper limit, and senior age lower limit
TOTES = 999
DEP = 19
SENIOR = 65
# Import Census projection on population:
# - Projection from 2014
# <http://www.census.gov/population/projections/data/national/2014/downloadablefiles.html>
# - Historical estimates from 2010 to 2014
# <http://www.census.gov/popest/data/datasets.html>
# - Historical estimates from 2000 to 2010
# <http://www.census.gov/popest/data/intercensal/national/nat2010.html>
# projection for 2014+
pop_projection = pd.read_csv(os.path.join(CUR_PATH, "NP2014_D1.csv"), index_col="year")
pop_projection = pop_projection[
(pop_projection.sex == 0)
& (pop_projection.race == 0)
& (pop_projection.origin == 0)
]
pop_projection = pop_projection.drop(["sex", "race", "origin"], axis=1)
# We're dropping the rows for years 2014 to EYR from the pop_projection DF
num_drop = EYR + 1 - 2014
pop_projection = pop_projection.drop(pop_projection.index[num_drop:], axis=0)
pop_projection = pop_projection.drop(pop_projection.index[:1], axis=0)
# data for 2010-2014
historical1 = pd.read_csv(os.path.join(CUR_PATH, "NC-EST2014-AGESEX-RES.csv"))
historical1 = historical1[historical1.SEX == 0]
historical1 = historical1.drop(["SEX", "CENSUS2010POP", "ESTIMATESBASE2010"], axis=1)
pop_dep1 = historical1[historical1.AGE <= DEP].sum()
pop_dep1 = pop_dep1.drop(["AGE"], axis=0)
pop_snr1 = historical1[(historical1.AGE >= SENIOR) & (historical1.AGE < TOTES)].sum()
pop_snr1 = pop_snr1.drop(["AGE"], axis=0)
total_pop1 = historical1[historical1.AGE == TOTES]
total_pop1 = total_pop1.drop(["AGE"], axis=1)
# data for 2008-2009
historical2 = pd.read_csv(os.path.join(CUR_PATH, "US-EST00INT-ALLDATA.csv"))
historical2 = historical2[
(historical2.MONTH == 7) & (historical2.YEAR >= 2008) & (historical2.YEAR < 2010)
]
historical2 = historical2.drop(historical2.columns[4:], axis=1)
historical2 = historical2.drop(historical2.columns[0], axis=1)
year08under19 = (historical2.YEAR == 2008) & (historical2.AGE <= DEP)
year09under19 = (historical2.YEAR == 2009) & (historical2.AGE <= DEP)
pop_dep2 = []
pop_dep2.append(historical2.TOT_POP[year08under19].sum())
pop_dep2.append(historical2.TOT_POP[year09under19].sum())
year08over65 = (
(historical2.YEAR == 2008) & (historical2.AGE >= SENIOR) & (historical2.AGE < TOTES)
)
year09over65 = (
(historical2.YEAR == 2009) & (historical2.AGE >= SENIOR) & (historical2.AGE < TOTES)
)
pop_snr2 = []
pop_snr2.append(historical2.TOT_POP[year08over65].sum())
pop_snr2.append(historical2.TOT_POP[year09over65].sum())
year08total = (historical2.YEAR == 2008) & (historical2.AGE == TOTES)
year09total = (historical2.YEAR == 2009) & (historical2.AGE == TOTES)
total_pop2 = []
total_pop2.append(historical2.TOT_POP[year08total].sum())
total_pop2.append(historical2.TOT_POP[year09total].sum())
# combine data for 2008-2014 with projection data
popdf = pd.DataFrame(pop_projection[pop_projection.columns[1:21]].sum(axis=1))
POP_DEP = pd.concat([pd.DataFrame(pop_dep2), pd.DataFrame(pop_dep1), popdf])
popdf = pd.DataFrame(pop_projection[pop_projection.columns[66:]].sum(axis=1))
POP_SNR = pd.concat([pd.DataFrame(pop_snr2), pd.DataFrame(pop_snr1), popdf])
TOTAL_POP = pd.concat(
[
| pd.DataFrame(total_pop2) | pandas.DataFrame |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 15:36:24 2021
@author: reideej1
:DESCRIPTION:
Rolls up situational statistics for individual player stats contained in
CFBStats/teamXXX/individual folders.
Totals will be generated for each player on a yearly and a career basis.
:REQUIRES:
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import glob
import numpy as np
import os
import pandas as pd
import pathlib
import tqdm
from itertools import groupby # used for grouping similar substrings
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def directoryCheck(team_name):
'''
Purpose: Run a check of the /data/raw/CFBStats/ folder to see if a folder
exists for the specified team and category. If it doesn't, create it.
Input:
(1) team_name (string): Name of the school being scraped
Output:
- NONE
'''
# Check for the team folder
pathlib.Path('data/raw/CFBStats/'+team_name).mkdir(parents=True, exist_ok=True)
# Checking for required sub-folders
for category in ['games', 'players', 'records',
'rosters', 'schedules', 'situations', 'splits']:
pathlib.Path('data/raw/CFBStats/', team_name, category).mkdir(
parents=True, exist_ok=True)
def roll_up_by_team(path_team):
'''
Purpose: Process the individual stats for each team's players. For example,
there are currently situational and splits stats for every QB a team
has had for each year they played. This script will merge the yearly,
stat-specific .csv files such that each player simply has a file called
team_QB_playername_year.csv.
Inputs
------
path_team : pathlib Path
file path of team directory
Outputs
-------
none
'''
# grab a list of all files in the directory
path_files = os.listdir(path_team)
# extract team name
player_team = path_files[0].split('_')[0]
# create a list of variables that are consistent across table types
list_col_default_sit = ['situation', 'season', 'team', 'name', 'name_first',
'name_last', 'class', 'position', 'height', 'weight',
'home_town', 'home_state']
list_col_default_split = ['split', 'season', 'team', 'name', 'name_first',
'name_last', 'class', 'position', 'height', 'weight',
'home_town', 'home_state']
# create dataframes for storing all player stats for a team
df_sit_team = pd.DataFrame()
df_split_team = pd.DataFrame()
# group stats by player
for player_files in tqdm.tqdm([list(playerIdx) for j, playerIdx in groupby(
path_files, lambda a: a.split('QB_')[1].split('_')[0])]):
# process PASSING situational stats
df_sit_pass = pd.DataFrame()
for file_sit_pass in [x for x in player_files if 'passing_situational.csv' in x]:
df_sit_pass = df_sit_pass.append(pd.read_csv(path_team.joinpath(file_sit_pass)))
df_sit_pass.columns = ['pass_' + x if x not in list_col_default_sit else x for x in df_sit_pass.columns]
# process RUSHING situational stats
df_sit_rush = pd.DataFrame()
for file_sit_rush in [x for x in player_files if 'rushing_situational.csv' in x]:
df_sit_rush = df_sit_rush.append(pd.read_csv(path_team.joinpath(file_sit_rush)))
df_sit_rush.columns = ['rush_' + x if x not in list_col_default_sit else x for x in df_sit_rush.columns]
# process PASSING split stats
df_split_pass = pd.DataFrame()
for file_split_pass in [x for x in player_files if 'passing_split.csv' in x]:
df_split_pass = df_split_pass.append(pd.read_csv(path_team.joinpath(file_split_pass)))
df_split_pass.columns = ['pass_' + x if x not in list_col_default_split else x for x in df_split_pass.columns]
# process RUSHING split stats
df_split_rush = pd.DataFrame()
for file_split_rush in [x for x in player_files if 'rushing_split.csv' in x]:
df_split_rush = df_split_rush.append(pd.read_csv(path_team.joinpath(file_split_rush)))
df_split_rush.columns = ['rush_' + x if x not in list_col_default_split else x for x in df_split_rush.columns]
# merge SITUATIONAL stats together
#--- handle empty dataframes by creating NaNs for all variables
if len(df_sit_pass) == 0:
df_sit_pass = create_empty_stat_dataframe('sit_pass')
df_sit = pd.merge(df_sit_rush, df_sit_pass, how = 'left', on = 'situation')
elif len(df_sit_rush) == 0:
df_sit_rush = create_empty_stat_dataframe('sit_rush')
df_sit = pd.merge(df_sit_pass, df_sit_rush, how = 'left', on = 'situation')
else:
df_sit = pd.merge(df_sit_pass, df_sit_rush, how = 'inner', on = list_col_default_sit)
df_sit = df_sit[['season', 'team', 'position', 'name_last', 'name_first',
'name', 'class', 'situation', 'pass_g', 'pass_att',
'pass_comp', 'pass_pct.', 'pass_yards', 'pass_td',
'pass_int', 'pass_rating', 'pass_long', 'pass_1st',
'pass_15+', 'pass_25+', 'rush_g', 'rush_att',
'rush_yards', 'rush_avg.', 'rush_td', 'rush_long',
'rush_1st', 'rush_10+', 'rush_20+',
'height', 'weight', 'home_town', 'home_state']]
# merge SPLIT stats together
#--- handle empty dataframes by creating NaNs for all variables
if len(df_split_pass) == 0:
df_split_pass = create_empty_stat_dataframe('split_pass')
df_split = pd.merge(df_split_rush, df_split_pass, how = 'left', on = 'split')
elif len(df_split_rush) == 0:
df_split_rush = create_empty_stat_dataframe('split_rush')
df_split = pd.merge(df_split_pass, df_split_rush, how = 'left', on = 'split')
else:
df_split = pd.merge(df_split_pass, df_split_rush, how = 'inner', on = list_col_default_split)
df_split = df_split[['season', 'team', 'position', 'name_last', 'name_first',
'name', 'class', 'split', 'pass_g', 'pass_att',
'pass_comp', 'pass_pct.', 'pass_yards', 'pass_td',
'pass_int', 'pass_att/g', 'pass_yards/g',
'pass_yards/att', 'pass_rating',
'rush_g', 'rush_att', 'rush_yards', 'rush_avg.',
'rush_td', 'rush_att/g', 'rush_yards/g',
'height', 'weight', 'home_town', 'home_state']]
# create career totals for each player
df_sit, df_split = create_player_career_stats(df_sit, df_split)
# add player stats to team dataframe
if len(df_sit_team) == 0:
df_sit_team = df_sit
else:
df_sit_team = df_sit_team.append(df_sit)
if len(df_split_team) == 0:
df_split_team = df_split
else:
df_split_team = df_split_team.append(df_split)
# save SITUATIONAL team file to disk
fname_sit = f'{player_team}_situational.csv'
outdir_sit = pathlib.Path('data/processed/CFBStats/individual/situational')
if not os.path.exists(outdir_sit):
os.mkdir(outdir_sit)
df_sit_team.to_csv(outdir_sit.joinpath(fname_sit), index = False)
# save SPLIT team file to disk
fname_split = f'{player_team}_split.csv'
outdir_split = pathlib.Path('data/processed/CFBStats/individual/split')
if not os.path.exists(outdir_split):
os.mkdir(outdir_split)
df_split_team.to_csv(outdir_split.joinpath(fname_split), index = False)
print(f'Done with: {player_team}')
return
def create_empty_stat_dataframe(df_type):
'''
Purpose: Create a placeholder "situational" or "split" stats dataframe
that can be used as a stand-in if a player is lacking stats for a
specific year (i.e. no rushing stats for freshman year, but QB has
passing stats)
Inputs
------
df_type : string
specifies which type of dataframe to make:
- "sit_pass"
- "sit_rush"
- "split_pass"
- "split_rush"
Outputs
-------
df_empty: pandas DataFrame
stats dataframe with all values initialized to NaN for all categories
'''
list_situations = ['1st Half', '2nd Half/OT', '1st Quarter', '2nd Quarter',
'3rd Quarter', '4th Quarter', 'Overtime', '1st Down',
'2nd Down', '3rd Down', '3rd Down, 1-3 To Go',
'3rd Down, 4-6 To Go', '3rd Down, 7-9 To Go',
'3rd Down, 10+ To Go', '4th Down', 'Own 1 To 20 Yd Ln',
'Own 21 To 39 Yd Ln', 'Own 40 To Opp 40 Yd Ln',
'Opp 39 To 21 Yd Ln', 'Opp 20 To 1 Yd Ln (RZ)',
'Winning By 15+ Pts', 'Winning By 8-14 Pts',
'Winning By 1-7 Pts', 'Tied', 'Losing By 1-7 Pts',
'Losing By 8-14 Pts', 'Losing By 15+ Pts']
list_sit_pass = ['pass_g', 'pass_att', 'pass_comp', 'pass_pct.', 'pass_yards',
'pass_td', 'pass_int', 'pass_rating', 'pass_long', 'pass_1st',
'pass_15+', 'pass_25+']
list_sit_rush = ['rush_g', 'rush_att', 'rush_yards', 'rush_avg.', 'rush_td',
'rush_long', 'rush_1st', 'rush_10+', 'rush_20+']
list_split = ['All Games', 'at Home', 'on Road/Neutral Site', 'in Wins',
'in Losses', 'vs. Conference', 'vs. Non-Conference',
'vs. Ranked (AP)', 'vs. Unranked (AP)', 'vs. FBS (I-A)',
'vs. FCS (I-AA)', 'vs. FBS Winning', 'vs. FBS Non-Winning',
'vs. BCS AQ', 'vs. BCS non-AQ', 'in August/September',
'in October', 'in November', 'in December/January']
list_split_pass = ['pass_g', 'pass_att', 'pass_comp', 'pass_pct.',
'pass_yards', 'pass_td', 'pass_int', 'pass_att/g',
'pass_yards/g', 'pass_yards/att', 'pass_rating']
list_split_rush = ['rush_g', 'rush_att', 'rush_yards', 'rush_avg.',
'rush_td', 'rush_att/g', 'rush_yards/g']
df_empty = pd.DataFrame()
# Handle SITUATIONAL - PASSING
if df_type == "sit_pass":
nrow = len(list_situations)
ncol = len(list_sit_pass)
df_empty = pd.DataFrame(np.zeros([nrow, ncol])*np.nan, columns=list_sit_pass)
df_empty['situation'] = list_situations
# Handle SITUATIONAL - RUSHING
elif df_type == "sit_rush":
nrow = len(list_situations)
ncol = len(list_sit_rush)
df_empty = pd.DataFrame(np.zeros([nrow, ncol])*np.nan, columns=list_sit_rush)
df_empty['situation'] = list_situations
# Handle SPLIT - PASSING
elif df_type == "split_pass":
nrow = len(list_split)
ncol = len(list_split_pass)
df_empty = pd.DataFrame(np.zeros([nrow, ncol])*np.nan, columns=list_split_pass)
df_empty['split'] = list_split
# Handle SPLIT - PASSING
elif df_type == "split_rush":
nrow = len(list_split)
ncol = len(list_split_rush)
df_empty = pd.DataFrame(np.zeros([nrow, ncol])*np.nan, columns=list_split_rush)
df_empty['split'] = list_split
else:
print("Error detected. Wrong stats type presented to 'create_empty_stat_dataframe'")
return
return df_empty
def create_player_career_stats(df_sit, df_split):
'''
Purpose: Given a player's statistics broken out by year (both situational
and split), calculate the player's career numbers across all categories.
Inputs
------
df_sit : pandas DataFrame
contains the pilot's situational stats for all available years
df_split : pandas DataFrame
contains the pilot's split stats for all available
Outputs
-------
df_sit_career : pandas DataFrame
the player's situational stats updated with career totals
df_split_career : pandas DataFrame
the player's split stats updated with career totals
'''
list_cols_unchanged = ['team', 'position', 'name_last', 'name_first', 'name',
'height', 'weight', 'home_town', 'home_state']
#----- Process SITUATIONAL Stats
# group all situational stats by statistical category
groups_sit = df_sit.groupby(['situation'])
df_sit_career = df_sit.copy(deep = True)
# iterate over every category and "sum" up one by one
for grp in groups_sit:
career_sit = grp[1].sum()
for col in list_cols_unchanged:
career_sit[col] = grp[1].iloc[-1][col]
career_sit['season'] = np.nan
career_sit['class'] = 'Career'
career_sit['situation'] = grp[0]
# add category to player's career list
df_sit_career = df_sit_career.append(career_sit, ignore_index = True)
# fix career situational stats for the player
#--- 1. Pass Rating
''' NCAA passer rating is calculated by:
((8.4 x Passing Yards) + (330 x Touchdown Passes) +
(100 x Number of Completions) – (200 x Interceptions))
÷ Passing Attempts
Sourced from: https://captaincalculator.com/sports/football/ncaa-passer-rating-calculator/
'''
df_sit_career['pass_rating'] = (((8.4 * df_sit_career['pass_yards']) +
(330 * df_sit_career['pass_td']) +
(100 * df_sit_career['pass_comp']) -
(200 * df_sit_career['pass_int'])) /
df_sit_career['pass_att'])
#--- 2. Pass Completion %
df_sit_career['pass_pct.'] = df_sit_career['pass_comp']/df_sit_career['pass_att']
#--- 3. Rush Avg (Yds/Att.)
df_sit_career['rush_avg.'] = df_sit_career['rush_yards']/df_sit_career['rush_att']
#----- Process SPLIT Stats
# group all split stats by statistical category
groups_split = df_split.groupby(['split'])
df_split_career = df_split.copy(deep = True)
# iterate over every category and "sum" up one by one
for grp in groups_split:
career_split = grp[1].sum()
for col in list_cols_unchanged:
career_split[col] = grp[1].iloc[-1][col]
career_split['season'] = np.nan
career_split['class'] = 'Career'
career_split['split'] = grp[0]
# add category to player's career list
df_split_career = df_split_career.append(career_split, ignore_index = True)
# fix career situational stats for the player
#--- 1. Pass Rating
''' NCAA passer rating is calculated by:
((8.4 x Passing Yards) + (330 x Touchdown Passes) +
(100 x Number of Completions) – (200 x Interceptions))
÷ Passing Attempts
Sourced from: https://captaincalculator.com/sports/football/ncaa-passer-rating-calculator/
'''
df_split_career['pass_rating'] = (((8.4 * df_split_career['pass_yards']) +
(330 * df_split_career['pass_td']) +
(100 * df_split_career['pass_comp']) -
(200 * df_split_career['pass_int'])) /
df_split_career['pass_att'])
#--- 2. Pass Completion %
df_split_career['pass_pct.'] = (df_split_career['pass_comp']/
df_split_career['pass_att'])
#--- 3. Pass Att/Game
df_split_career['pass_att/g'] = (df_split_career['pass_att']/
df_split_career['pass_g'])
#--- 4. Pass Yards/Game
df_split_career['pass_yards/g'] = (df_split_career['pass_yards']/
df_split_career['pass_g'])
#--- 5. Rush Avg (Yds/Att.)
df_split_career['rush_avg.'] = (df_split_career['rush_yards']/
df_split_career['rush_att'])
#--- 6. Rush Att/Game
df_split_career['rush_att/g'] = (df_split_career['rush_att']/
df_split_career['rush_g'])
#--- 7. Rush Yards/Game
df_split_career['rush_yards/g'] = (df_split_career['rush_yards']/
df_split_career['rush_g'])
return df_sit_career, df_split_career
def create_master_file(path_project):
'''
Purpose: Loop through all team files and make a unique file that includes
all player data across all years/careers
Input:
- NONE
Output:
- NONE
'''
#------------- Handle "Split" stats --------------------------------------
# grab a list of all files in the directory for "split" stats
path_split = pathlib.Path(os.path.abspath(os.curdir),
'data', 'processed', 'CFBStats', 'individual', 'split')
files_split = os.listdir(path_split)
# merge all files into a single file
df_split_all = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
tf.random.set_seed(2021)
from models import DNMC, NMC, NSurv, MLP, train_model, evaluate_model
FILL_VALUES = {
'alb': 3.5,
'pafi': 333.3,
'bili': 1.01,
'crea': 1.01,
'bun': 6.51,
'wblc': 9.,
'urine': 2502.
}
TO_DROP = ['aps', 'sps', 'surv2m', 'surv6m', 'prg2m', 'prg6m', 'dnr', 'dnrday']
TO_DROP = TO_DROP + ['sfdm2', 'hospdead']
# load, drop columns, fill using specified fill values
df = | pd.read_csv('../datasets/support2.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 16:15:24 2017
Sponsors Club messaging functions
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import re
import math
import textwrap
from tkinter import filedialog
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pkg.SC_signup_functions import findcards
from openpyxl import load_workbook
import pkg.SC_config as cnf
def emailparent_tk(teams, season, year):
''' Inteface for non-billing email messages to parents (non-generic)
Message types include:
recruit - specific inquiry about player from last year not yet signed up; needs signupfile w/ recruits tab
assign - notify of team assignment, optional recruit for short team, CYC card notify; teams/cards/mastersignups
missinguni - ask about missing uniforms; missingunifile
unireturn - generic instructions for uniform return; mastersignups w/ unis issued
askforcards - check for CYC card on file and ask
other -- Generic single all team+coaches message (can have $SCHOOL, $GRADERANGE,$COACHINFO, $SPORT, $PLAYERLIST)
8/9/17 works for team assignments
TODO test recruit, missing unis, unireturn
args:
teams - df w/ active teams
season -'Winter', 'Fall' or 'Spring'
year - starting sport year i.e. 2019 for 2019-20 school year
'''
#%%
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail to parents')
messageframe=tk.LabelFrame(root, text='Message options')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
recruitbool=tk.BooleanVar() # optional recruiting for short teams
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
transmessfile=tk.StringVar() # text of e-mail message for transfers
extravar=tk.StringVar() # use depends on message type... normally filename
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra_file_name.txt') # default starting choice
choice=tk.StringVar() # test or send -mail
def chooseFile(txtmess, ftypes):
''' tkinter file chooser (passes message string for window and expected
file types as tuple e.g. ('TXT','*.txt')
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = txtmess, filetypes=[ ftypes] )
root.destroy() # closes pop up window
return full_path
def choose_message():
# choose existing message (.txt file)
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose message file', filetypes=[ ('TXT','*.txt')] )
root.destroy() # closes pop up window
return full_path
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Assignopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
extraname.set('n/a')
messfile.set('parent_team_assignment.txt')
transmessfile.set('parent_team_transfer.txt')
emailtitle.set('Fall $SPORT for $FIRST')
def Recruitopts():
''' Display relevant choices for specific player recruiting'''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
messfile.set('player_recruiting.txt')
transmessfile.set('n/a')
extraname.set('n/a')
emailtitle.set('Cabrini-Soulard sports for $FIRST this fall?')
def Missingopts():
''' Display relevant choices for ask parent for missing uniforms '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('finish_me.txt')
transmessfile.set('n/a')
extraname.set('Missing uni file name')
extravar.set('missing_uni.csv')
# TODO look up most recent uni file?
emailtitle.set("Please return $FIRST's $SPORT uniform!")
def Schedopts():
''' Display relevant choices for sending schedules (game and practice) to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('parent_game_schedule.txt')
transmessfile.set('n/a')
extraname.set('Game schedule file')
extravar.set('Cabrini_2017_schedule.csv')
emailtitle.set("Game schedule for Cabrini $GRADERANGE $GENDER $SPORT")
def Cardopts():
''' Display relevant choices for asking parent for missing CYC cards '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.DISABLED)
messfile.set('CYCcard_needed.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("CYC card needed for $FIRST")
def Otheropts():
''' Display relevant choices for other generic message to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
def Allopts():
''' Display relevant choices for generic message to all sports parents '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
# E-mail title and message file name
rownum=0
tk.Label(messageframe, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(messageframe, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(messageframe, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='Transfer messagefile').grid(row=rownum, column=0)
transmessentry=tk.Entry(messageframe, textvariable=transmessfile)
transmessentry.config(width=50)
transmessentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot
tk.Radiobutton(messageframe, text='Team assignment', value='Assign', variable = mtype, command=Assignopts).grid(row=rownum, column=0)
tk.Radiobutton(messageframe, text='Recruit missing', value='Recruit', variable = mtype, command=Recruitopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Missing uni', value='Missing', variable = mtype, command=Missingopts).grid(row=rownum, column=2)
tk.Radiobutton(messageframe, text='Send schedule', value='Schedule', variable = mtype, command=Schedopts).grid(row=rownum, column=3)
rownum+=1
tk.Radiobutton(messageframe, text='Ask for cards', value='Cards', variable = mtype, command=Cardopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Other team message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='All sport parents', value='All', variable = mtype, command=Allopts).grid(row=rownum, column=2)
rownum+=1
tk.Label(messageframe, text=extraname.get()).grid(row=rownum, column=0)
extraentry=tk.Entry(messageframe, textvariable=extravar)
extraentry.grid(row=rownum, column=1)
# Extra file chooser button
# button arg includes file type extension .. get from messfile
try:
ft = extraname.get().split('.')[-1]
ftypes =("%s" %ft.upper(), "*.%s" %ft)
except:
ftypes =("CSV" , "*.*") # default to all files
# TODO fix extra file chooser
d=tk.Button(messageframe, text='Choose file', command=chooseFile('Choose extra file', ftypes) )
d.grid(row=rownum, column=2)
recruitcheck=tk.Checkbutton(messageframe, variable=recruitbool, text='Recruit more players for short teams?')
recruitcheck.grid(row=rownum, column=3) # can't do immediate grid or nonetype is returned
rownum+=1
messageframe.grid(row=0, column=0)
# Specific team selector section using checkboxes
teamframe=tk.LabelFrame(root, text='Team selector')
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(teamframe, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(teamframe, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(teamframe, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
teamframe.grid(row=1, column=0)
choiceframe=tk.LabelFrame(root)
d=tk.Button(choiceframe, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(choiceframe, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(choiceframe, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(choiceframe, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
choiceframe.grid(row=2, column=0)
root.mainloop()
#%%
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='KCtest':
# this is a true send test but only to me
kwargs.update({'KCtest':True})
mychoice='send'
kwargs.update({'choice':mychoice}) # test or send
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates(['Team','Sport'])
# Now deal with the different types of messages
#%%
if mtype.get()=='Schedule':
# Send practice and game schedules
try:
sched=pd.read_csv(extravar.get())
except:
print('Problem opening schedule and other required files for sending game schedules')
fname=filedialog.askopenfilename(title='Select schedule file.')
sched=pd.read_csv(fname)
# fields=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Fields')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
# open and send master CYC schedule
sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Recruit':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
except:
print('Problem loading family contacts')
try: # Recruits stored in CSV
Recruits=pd.read_csv(cnf._OUTPUT_DIR+'\\%s%s_recruits.csv' %(season, year))
print('Loaded possible recruits from csv file')
except:
fname=filedialog.askopenfilename(title='Select recruits file.')
if fname.endswith('.csv'): # final move is query for file
Recruits=pd.read_csv(fname)
else:
print('Recruits file needed in csv format.')
return
emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs)
if mtype.get()=='Assign':
# Notify parents needs teams, mastersignups, famcontacts
if recruitbool.get():
kwargs.update({'recruit':True})
try:
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv', encoding='cp437')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
tranmessagefile='messages\\'+transmessfile.get()
with open(tranmessagefile, 'r') as file:
blanktransmess=file.read()
except:
print('Problem loading mastersignups, famcontacts')
return
notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs)
if mtype.get()=='Unis':
try:
missing=pd.read_csv(messfile.get(), encoding='cp437')
oldteams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
# TODO Finish ask for missing uniforms script
askforunis(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Cards':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = | pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437') | pandas.read_csv |
import boto3
import json
import os
import requests
import pandas as pd
import warnings
from pandas import json_normalize
from github import Github
warnings.filterwarnings('ignore')
bucket = 'wmwaredata'
fileName = 'gw_releases.json'
s3 = boto3.client('s3')
git_token = os.getenv('GIT_TOKEN')
git_headers = {'Authorization': f'token {git_token}'}
g = Github(os.getenv('GIT_TOKEN'))
class GetRelease():
def releases(self):
# Listing repos
org = g.get_organization("k8-proxy")
all_repos = org.get_repos()
repos = []
for repo in all_repos:
myrepo = repo.id, repo.name, repo.html_url
repos.append(myrepo)
df1 = pd.DataFrame(repos)
df1.columns = ['id', 'name', 'repo_url']
# Getting release data
url = f'https://api.github.com/repos/k8-proxy/GW-Releases/contents'
res = requests.get(url, headers=git_headers).json()
data1 = json_normalize(res, max_level=1)
dft = pd.DataFrame(data1)
dft = dft[['path','sha', 'html_url']]
dft['repo_url'] = dft.path.map(df1.set_index('name')['repo_url'])
dft = dft.dropna()
dft.reset_index(drop = True, inplace=True)
dft = dft.rename({'path':'repo_name'}, axis=1)
dft = dft.rename({'sha':'hash'}, axis=1)
dft = dft.rename({'html_url':'commit_url'}, axis=1)
dft['hash_short'] = dft['hash'].str[:7]
myrepos = dft['repo_name']
nurepo = dft[['repo_name']]
repos = myrepos.tolist()
rdata = []
for repo in repos:
rurl = f'https://api.github.com/repos/k8-proxy/{repo}/releases'
resn = requests.get(rurl, headers=git_headers).json()
data2 = json_normalize(resn, max_level=1)
temp_dfs = pd.DataFrame(data2)
rdata.append(temp_dfs )
df3 = pd.concat(rdata, ignore_index=True)
df3 = df3[['html_url','tag_name', 'published_at','body', 'zipball_url']]
df1['join'] = 1
df3['join'] = 1
df = df1.merge(df3, on='join').drop('join', axis=1)
df3.drop('join', axis=1, inplace=True)
df['match'] = [x[0] in x[1] for x in zip(df['name'], df['html_url'])]
df = df.loc[df['match'] == True]
df.reset_index(drop=True, inplace=True)
df = df[['name','repo_url','body','tag_name', 'published_at','zipball_url']]
df = df.rename({'name':'repo_name'}, axis=1)
df = df.rename({'body':'release_name'}, axis=1)
df = df.rename({'tag_name':'release_tag'}, axis=1)
df = df.rename({'published_at':'release_date'}, axis=1)
# tags
tdata = []
for repo in repos:
tag_url = f'https://api.github.com/repos/k8-proxy/{repo}/tags'
t_url = requests.get(tag_url, headers=git_headers).json()
t_data = json_normalize(t_url, max_level=1)
temp_tdf = pd.DataFrame(t_data)
tdata.append(temp_tdf )
tdf = pd.concat(tdata, ignore_index=True)
tdf = tdf[['name','commit.sha','zipball_url']]
df['hash'] = df.zipball_url.map(tdf.set_index('zipball_url')['commit.sha'])
df = df.dropna()
df.reset_index(drop=True, inplace=True)
df = df.drop('zipball_url', 1)
dft = dft[['hash','hash_short', 'commit_url']]
nudf = pd.merge(df, dft, on='hash')
nudf = nudf.drop_duplicates(subset='repo_name')
data = []
for repo in repos:
myurl = f'https://api.github.com/repos/k8-proxy/{repo}/contents'
req = requests.get(myurl, headers=git_headers).json()
data1 = json_normalize(req, max_level=1)
temp_df = pd.DataFrame(data1)
data.append(temp_df )
dfg = | pd.concat(data, ignore_index=True) | pandas.concat |
import sys
# sys.path.append("..")
# sys.path.append("../..")
import os, errno
import core_models.parser_arguments as parser_arguments
import warnings
import numpy as np
import pandas as pd
import core_models.utils as utils
import operator
from sklearn.metrics import roc_auc_score as auc_compute
from sklearn.metrics import average_precision_score as avpr_compute
from core_models.utils import get_auc_metrics, get_avpr_metrics
from sklearn import mixture
def is_number(val):
# some outliers will be cast to NaN
# when reading from file (eg. csv)
# since they do not conform with data types
# checks if numerical
try:
float(val)
# check if nan
if np.isnan(float(val)):
return False
return True
except ValueError:
return False
def get_prob_matrix(df_dataset, cat_columns, z_mtx=None, dict_densities=None, n_comp_max=6):
'''
Computes:
-> Marginal Histograms (Categorical feature)
and
-> GMM (Gaussian Mixture Model) Density with BIC selection (Continous feature)
'''
# z_mtx is matrix that indicates whether some cell in dataset is clean (=1) or dirty (=0)
if z_mtx is None:
z_mtx = np.ones(df_dataset.shape, dtype=bool)
# obtain vectorized version of is_nan
is_number_vec = np.vectorize(is_number, otypes=[np.bool])
# get indexes for columns
col_idx_map = dict((name, index) for index, name in enumerate(df_dataset.columns))
# get continuous features’ names
cont_columns = [col for col in df_dataset.columns if col not in cat_columns]
# GMM model selected dictionary (which GMM has been selected in terms of components, for each column)
gmm_selected = dict.fromkeys(cont_columns)
# dict of dicts with density/probability values for the domain of each feature
if dict_densities is None:
# initialize dictionary for each number of components, for each GMM
dict_densities = dict()
for col_name in cont_columns:
dict_densities[col_name] = dict()
for n_components in range(1, n_comp_max+1,2):
dict_densities[col_name][n_components] = \
mixture.GaussianMixture(n_components=n_components, warm_start=True)
# density/probability matrix (to be returned for the dataset)
prob_mat = np.empty(df_dataset.shape)
repair_mat = np.empty(df_dataset.shape)
# calculate histogram values for categorical features
for col_name in cat_columns:
# build density for discrete variable
dict_densities[col_name] = df_dataset[col_name][z_mtx[:,col_idx_map[col_name]]].value_counts(normalize=True).to_dict()
# insert normalized histogram for feature
lf = lambda cell: dict_densities[col_name][cell] if cell in dict_densities[col_name] else 0.0
prob_mat[:, col_idx_map[col_name]] = np.array([*map(lf, df_dataset[col_name].values)])
# calculate GMM values for continuous features
for col_name in cont_columns:
# the feature data
col_data = df_dataset[col_name].values.reshape(-1,1)
col_data = (col_data - np.mean(col_data))/np.std(col_data)
# select indexes of number cells (not nan)
idx_bool = is_number_vec(col_data).flatten()
# select clean data as defined by the z variables, and that is number (not nan)
aux_idxs = np.logical_and(z_mtx[:,col_idx_map[col_name]], idx_bool)
col_data_clean = col_data[aux_idxs]
# select best number of components for GMM
best_bic = np.inf
best_GMM = None
for n_components in range(1, n_comp_max+1,2):
gmm_mdl = dict_densities[col_name][n_components]
gmm_mdl.fit(col_data_clean)
bic_val = gmm_mdl.bic(col_data_clean)
if best_bic > bic_val:
best_bic = bic_val
best_GMM = gmm_mdl
# for output
gmm_selected[col_name] = best_GMM
# obtain density values for feature’s cells, using best current GMM model
prob_mat[:, col_idx_map[col_name]][idx_bool] = np.exp(best_GMM.score_samples(col_data[idx_bool]))
prob_mat[:, col_idx_map[col_name]][np.logical_not(idx_bool)] = 0.0
#Select closest mean for each value
index = np.argmin((col_data_clean-best_GMM.means_.T)**2,1)
repair_mat[:, col_idx_map[col_name]][idx_bool] = best_GMM.means_[index].squeeze()
repair_mat[:, col_idx_map[col_name]][np.logical_not(idx_bool)] = np.mean(best_GMM.means_)
return prob_mat, dict_densities, gmm_selected, repair_mat
def error_computation(dataset_obj, X_true, X_hat, dict_densities, mask):
cursor_feat = 0
feature_errors_arr = []
for feat_select, (feat_name, col_type, feat_size) in enumerate(dataset_obj.feat_info):
select_cell_pos = np.argwhere(mask[:,cursor_feat]==1)
if select_cell_pos.sum() == 0:
feature_errors_arr.append(-1.)
else:
# Brier Score (score ranges between 0-1)
if col_type == 'categ':
true_feature_one_hot = np.zeros((X_true.shape[0], feat_size))
true_index = [int(elem) for elem in X_true[:,cursor_feat]]
true_feature_one_hot[np.arange(X_true.shape[0]), true_index] = 1.
mean_est_probs = [dict_densities[feat_name][key] for key in dataset_obj.cat_to_idx[feat_name].keys()]
error_brier = np.sum((mean_est_probs - true_feature_one_hot[select_cell_pos].squeeze())**2) / (2*float(len(select_cell_pos)))
feature_errors_arr.append(error_brier)
# Standardized Mean Square Error (SMSE)
# SMSE (score ranges betweem 0,1)
elif col_type == 'real':
true_feature = X_true[:,cursor_feat]
reconstructed_feature = X_hat[:,cursor_feat]
smse_error = np.sum((true_feature[select_cell_pos] - reconstructed_feature[select_cell_pos])**2)
sse_div = np.sum(true_feature[select_cell_pos]**2) # (y_ti - avg(y_i))**2, avg(y_i)=0. due to data standardization
smse_error = smse_error / sse_div # SMSE does not need to div 1/N_mask, since it cancels out.
feature_errors_arr.append(smse_error.item())
cursor_feat +=1
# Global error (adding all the errors and dividing by number of features)
mean_error = np.array([val for val in feature_errors_arr if val >= 0.]).mean() # / torch.sum(mask.type(dtype_float))
return mean_error, feature_errors_arr
def main(args):
# Load datasets
train_loader, X_train, target_errors_train, dataset_obj_train, attributes = utils.load_data(args.data_folder, args.batch_size,
is_train=True)
train_loader_clean, X_train_clean, _, dataset_obj_clean, _ = utils.load_data(args.data_folder, args.batch_size,
is_train=True, is_clean=True, stdize_dirty=True)
dataset_obj = dataset_obj_train
df_data_train = dataset_obj_train.df_dataset_instance
with warnings.catch_warnings():
warnings.simplefilter("ignore")
p_mat_train, dict_densities, _, repair_mat = get_prob_matrix(df_data_train, dataset_obj.cat_cols, n_comp_max=40)
mean_error_dirty, features_errors_dirty = error_computation(dataset_obj_clean, X_train_clean.detach().numpy(),
repair_mat, dict_densities, target_errors_train.detach().numpy())
mean_error_clean, features_errors_clean = error_computation(dataset_obj_clean, X_train_clean.detach().numpy(),
repair_mat, dict_densities, (1-target_errors_train).detach().numpy())
#print(features_errors)
logp_mat_train = np.log(p_mat_train + 1e-9)
target_row_train = (target_errors_train.sum(dim=1)>0).numpy()
# Uses the NLL score as outlier score (just like VAE outlier score)
outlier_score_cell_train = -logp_mat_train
outlier_score_row_train = -logp_mat_train.sum(axis=1)
## Cell metrics
auc_cell_train, auc_feats = get_auc_metrics(target_errors_train, outlier_score_cell_train)
avpr_cell_train, avpr_feats = get_avpr_metrics(target_errors_train, outlier_score_cell_train)
print("AVPR per feature")
print(avpr_feats)
print("AUC per feature")
print(auc_feats)
## Row metrics
auc_row_train = auc_compute(target_row_train, outlier_score_row_train)
avpr_row_train = avpr_compute(target_row_train, outlier_score_row_train)
print('Marginals Prob. Train - Cell AUC: {}, Cell AVPR: {}, Row AUC: {}, Row AVPR: {}'.format(
auc_cell_train, avpr_cell_train, auc_row_train, avpr_row_train))
#Save results into csv
if args.save_on:
# create folder for saving experiment data (if necessary)
folder_output = args.output_folder + "/" + args.outlier_model
try:
os.makedirs(folder_output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
columns = ['AUC row','AVPR row','AUC cell','AVPR cell','Error repair on dirty pos', 'Error repair on clean pos']
results = {'AUC row': [auc_row_train], 'AVPR row': [avpr_row_train],
'AUC cell': [auc_cell_train], 'AVPR cell': [avpr_cell_train],
'Error repair on dirty pos': [mean_error_dirty], 'Error repair on clean pos': [mean_error_clean]}
#Dataframe
df_out = pd.DataFrame(data=results, columns=columns)
df_out.index.name = "Epochs"
df_out.to_csv(folder_output + "/train_epochs_data.csv")
# store AVPR for features (cell only)
df_avpr_feat_cell = pd.DataFrame([], index=['AVPR'], columns=attributes)
df_avpr_feat_cell.loc['AVPR'] = avpr_feats
df_avpr_feat_cell.to_csv(folder_output + "/train_avpr_features.csv")
# store AUC for features (cell only)
df_auc_feat_cell = pd.DataFrame([], index=['AUC'], columns=attributes)
df_auc_feat_cell.loc['AUC'] = auc_feats
df_auc_feat_cell.to_csv(folder_output + "/train_auc_features.csv")
df_errors_repair = | pd.DataFrame([], index=['error_repair_dirtycells','error_repair_cleancells'], columns=attributes) | pandas.DataFrame |
from typing import List
import pytest
import numpy as np
import pandas as pd
from obp.dataset import (
linear_reward_function,
logistic_reward_function,
linear_behavior_policy_logit,
SyntheticSlateBanditDataset,
)
from obp.types import BanditFeedback
# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description
invalid_input_of_init = [
(
"4",
3,
2,
"binary",
"independent",
"pbm",
1,
"n_unique_action must be an integer larger than 1",
),
(
1,
3,
2,
"binary",
"independent",
"pbm",
1,
"n_unique_action must be an integer larger than 1",
),
(
5,
"4",
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
-1,
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
10,
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
3,
0,
"binary",
"independent",
"pbm",
1,
"dim_context must be a positive integer",
),
(
5,
3,
"2",
"binary",
"independent",
"pbm",
1,
"dim_context must be a positive integer",
),
(5, 3, 2, "aaa", "independent", "pbm", 1, "reward_type must be either"),
(5, 3, 2, "binary", "aaa", "pbm", 1, "reward_structure must be one of"),
(5, 3, 2, "binary", "independent", "aaa", 1, "click_model must be one of"),
(5, 3, 2, "binary", "independent", "pbm", "x", "random_state must be an integer"),
(5, 3, 2, "binary", "independent", "pbm", None, "random_state must be an integer"),
]
@pytest.mark.parametrize(
"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description",
invalid_input_of_init,
)
def test_synthetic_slate_init_using_invalid_inputs(
n_unique_action,
len_list,
dim_context,
reward_type,
reward_structure,
click_model,
random_state,
description,
):
with pytest.raises(ValueError, match=f"{description}*"):
_ = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
)
def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):
# check pscore columns
pscore_columns: List[str] = []
pscore_candidate_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
for column in pscore_candidate_columns:
if column in bandit_feedback and bandit_feedback[column] is not None:
pscore_columns.append(column)
else:
pscore_columns.append(column)
assert (
len(pscore_columns) > 0
), f"bandit feedback must contain at least one of the following pscore columns: {pscore_candidate_columns}"
bandit_feedback_df = pd.DataFrame()
for column in ["slate_id", "position", "action"] + pscore_columns:
bandit_feedback_df[column] = bandit_feedback[column]
# sort dataframe
bandit_feedback_df = (
bandit_feedback_df.sort_values(["slate_id", "position"])
.reset_index(drop=True)
.copy()
)
# check uniqueness
assert (
bandit_feedback_df.duplicated(["slate_id", "position"]).sum() == 0
), "position must not be duplicated in each slate"
assert (
bandit_feedback_df.duplicated(["slate_id", "action"]).sum() == 0
), "action must not be duplicated in each slate"
# check pscores
for column in pscore_columns:
invalid_pscore_flgs = (bandit_feedback_df[column] < 0) | (
bandit_feedback_df[column] > 1
)
assert invalid_pscore_flgs.sum() == 0, "the range of pscores must be [0, 1]"
if "pscore_cascade" in pscore_columns and "pscore" in pscore_columns:
assert (
bandit_feedback_df["pscore_cascade"] < bandit_feedback_df["pscore"]
).sum() == 0, "pscore must be smaller than or equal to pscore_cascade"
if "pscore_item_position" in pscore_columns and "pscore" in pscore_columns:
assert (
bandit_feedback_df["pscore_item_position"] < bandit_feedback_df["pscore"]
).sum() == 0, "pscore must be smaller than or equal to pscore_item_position"
if "pscore_item_position" in pscore_columns and "pscore_cascade" in pscore_columns:
assert (
bandit_feedback_df["pscore_item_position"]
< bandit_feedback_df["pscore_cascade"]
).sum() == 0, (
"pscore_cascade must be smaller than or equal to pscore_item_position"
)
if "pscore_cascade" in pscore_columns:
previous_minimum_pscore_cascade = (
bandit_feedback_df.groupby("slate_id")["pscore_cascade"]
.expanding()
.min()
.values
)
assert (
previous_minimum_pscore_cascade < bandit_feedback_df["pscore_cascade"]
).sum() == 0, "pscore_cascade must be non-decresing sequence in each slate"
if "pscore" in pscore_columns:
count_pscore_in_expression = bandit_feedback_df.groupby("slate_id").apply(
lambda x: x["pscore"].unique().shape[0]
)
assert (
count_pscore_in_expression != 1
).sum() == 0, "pscore must be unique in each slate"
if "pscore" in pscore_columns and "pscore_cascade" in pscore_columns:
last_slot_feedback_df = bandit_feedback_df.drop_duplicates(
"slate_id", keep="last"
)
assert (
last_slot_feedback_df["pscore"] != last_slot_feedback_df["pscore_cascade"]
).sum() == 0, "pscore must be the same as pscore_cascade in the last slot"
def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
pscore_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
bandit_feedback_df = pd.DataFrame()
for column in ["slate_id", "position", "action"] + pscore_columns:
bandit_feedback_df[column] = bandit_feedback[column]
# check pscore marginal
pscore_item_position = 1 / n_unique_action
assert np.allclose(
bandit_feedback_df["pscore_item_position"].unique(), pscore_item_position
), f"pscore_item_position must be [{pscore_item_position}], but {bandit_feedback_df['pscore_item_position'].unique()}"
# check pscore joint
pscore_cascade = []
pscore_above = 1.0
for position_ in np.arange(len_list):
pscore_above = pscore_above * 1.0 / (n_unique_action - position_)
pscore_cascade.append(pscore_above)
assert np.allclose(
bandit_feedback_df["pscore_cascade"], np.tile(pscore_cascade, n_rounds)
), f"pscore_cascade must be {pscore_cascade} for all impresessions"
assert np.allclose(
bandit_feedback_df["pscore"].unique(), [pscore_above]
), f"pscore must be {pscore_above} for all slates"
def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy_largescale():
# set parameters
n_unique_action = 100
len_list = 10
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 10000
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
# check pscore marginal
pscore_item_position = 1 / n_unique_action
assert np.allclose(
np.unique(bandit_feedback["pscore_item_position"]), pscore_item_position
), f"pscore_item_position must be [{pscore_item_position}], but {np.unique(bandit_feedback['pscore_item_position'])}"
def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
behavior_policy_function=linear_behavior_policy_logit,
)
with pytest.raises(ValueError):
_ = dataset.obtain_batch_bandit_feedback(n_rounds=-1)
with pytest.raises(ValueError):
_ = dataset.obtain_batch_bandit_feedback(n_rounds="a")
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
# print reward
pscore_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
bandit_feedback_df = pd.DataFrame()
for column in ["slate_id", "position", "action", "reward"] + pscore_columns:
bandit_feedback_df[column] = bandit_feedback[column]
print(bandit_feedback_df.groupby("position")["reward"].describe())
if reward_type == "binary":
assert set(np.unique(bandit_feedback["reward"])) == set([0, 1])
def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_item_position():
# set parameters
n_unique_action = 80
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
behavior_policy_function=linear_behavior_policy_logit,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(
n_rounds=n_rounds, return_pscore_item_position=False
)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
assert (
bandit_feedback["pscore_item_position"] is None
), f"pscore marginal must be None, but {bandit_feedback['pscore_item_position']}"
# random seed should be fixed
dataset2 = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
behavior_policy_function=linear_behavior_policy_logit,
)
# obtain feedback
bandit_feedback2 = dataset2.obtain_batch_bandit_feedback(
n_rounds=n_rounds, return_pscore_item_position=False
)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback2)
# check random seed effect
assert np.allclose(
bandit_feedback["expected_reward_factual"],
bandit_feedback2["expected_reward_factual"],
)
if reward_type == "binary":
assert set(np.unique(bandit_feedback["reward"])) == set([0, 1])
# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description
valid_input_of_obtain_batch_bandit_feedback = [
(
10,
3,
2,
"binary",
123,
1000,
"standard_additive",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_additive",
),
(
10,
3,
2,
"binary",
123,
1000,
"independent",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"independent",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_additive",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_additive",
),
(
10,
3,
2,
"continuous",
123,
1000,
"standard_additive",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"standard_additive continuous",
),
(
10,
3,
2,
"continuous",
123,
1000,
"independent",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"independent continuous",
),
(
10,
3,
2,
"continuous",
123,
1000,
"cascade_additive",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"cascade_additive continuous",
),
(
10,
3,
2,
"continuous",
123,
1000,
"cascade_additive",
None,
None,
None,
False,
"Random policy and reward function (continuous reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_exponential",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_exponential (binary reward)",
),
(
10,
3,
2,
"continuous",
123,
1000,
"cascade_exponential",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"cascade_exponential (continuous reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_exponential",
None,
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_exponential (binary reward)",
),
(
10,
3,
2,
"continuous",
123,
1000,
"standard_exponential",
None,
linear_behavior_policy_logit,
linear_reward_function,
False,
"standard_exponential (continuous reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_additive",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_additive, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_exponential",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_exponential, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_additive",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_additive, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_exponential",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_exponential, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"independent",
"cascade",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"independent, cascade click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_additive",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_additive, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"cascade_exponential",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"cascade_exponential, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_additive",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_additive, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"standard_exponential",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"standard_exponential, pbm click model (binary reward)",
),
(
10,
3,
2,
"binary",
123,
1000,
"independent",
"pbm",
linear_behavior_policy_logit,
logistic_reward_function,
False,
"independent, pbm click model (binary reward)",
),
]
@pytest.mark.parametrize(
"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description",
valid_input_of_obtain_batch_bandit_feedback,
)
def test_synthetic_slate_using_valid_inputs(
n_unique_action,
len_list,
dim_context,
reward_type,
random_state,
n_rounds,
reward_structure,
click_model,
behavior_policy_function,
reward_function,
return_pscore_item_position,
description,
):
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=behavior_policy_function,
base_reward_function=reward_function,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(
n_rounds=n_rounds, return_pscore_item_position=return_pscore_item_position
)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
pscore_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
bandit_feedback_df = | pd.DataFrame() | pandas.DataFrame |
import itertools
from sklearn.model_selection import train_test_split
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
import pandas as pd
import re
PATTERN = re.compile(r"((?P<days1>[1-9]\d*)D(?P<amount1>[1-9]\d*[NP])_)?((?P<days2>[1-9]\d*)D(?P<amount2>[1-9]\d*[NP])_)?(?P<noshow>[1-9]\d*[NP])?")
def cancel_parser(policy: str, nights_num):
if nights_num <= 0:
nights_num = 1
match = PATTERN.match(policy)
if match is None:
return policy
else:
noshow = match.group("noshow")
noshow = 1 if noshow is None else int(noshow[:-1])/100 if noshow[-1] == 'P' else int(noshow[:-1]) / nights_num
days1 = match.group("days1")
if days1 is None:
days1 = 0
amount1 = noshow
else:
days1 = int(days1)
amount1 = match.group("amount1")
amount1 = int(amount1[:-1])/100 if amount1[-1] == 'P' else int(amount1[:-1])/nights_num
days2 = match.group("days2")
if days2 is None:
days2 = 0
amount2 = amount1
else:
days2 = int(days2)
amount2 = match.group("amount2")
amount2 = int(amount2[:-1])/100 if amount2[-1] == 'P' else int(amount2[:-1])/nights_num
return days1, amount1, days2, amount2, noshow
def agoda_preprocessor(full_data: np.ndarray):
# fill cancellation datetime which doesn't exist as 0
full_data.loc[full_data["cancellation_datetime"].isnull(), "cancellation_datetime"] = full_data["checkin_date"]
full_data['cancellation_datetime'] = pd.to_datetime(full_data["cancellation_datetime"])
features = data_preprocessor(full_data)
full_data["cancel_warning_days"] = (full_data['checkin_date'] - full_data['cancellation_datetime']).dt.days
full_data["days_cancelled_after_booking"] = (full_data["cancellation_datetime"] - full_data["booking_datetime"]).dt.days
labels = (7 <= full_data["days_cancelled_after_booking"]) & (full_data["days_cancelled_after_booking"] <= 43)
return features, np.asarray(labels).astype(int)
def load_agoda_dataset():
"""
Load Agoda booking cancellation dataset
Returns
-------
Design matrix and response vector in the following format:
- Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# clean data for unrealistic shit
full_data = pd.read_csv("../datasets/agoda_cancellation_train.csv").drop_duplicates()
features, labels = agoda_preprocessor(full_data)
return features, labels
def data_preprocessor(full_data):
# starting with the numerical and boolean columns
features = full_data[["hotel_star_rating",
"guest_is_not_the_customer",
"original_selling_amount",
"is_user_logged_in",
"is_first_booking",
"cancellation_policy_code",
]].fillna(0)
# how much the customer cares about his order, sums all it's requests
features["num_requests"] = (full_data["request_nonesmoke"].fillna(0) +
full_data["request_latecheckin"].fillna(0) +
full_data["request_highfloor"].fillna(0) +
full_data["request_largebed"].fillna(0) +
full_data["request_twinbeds"].fillna(0) +
full_data["request_airport"].fillna(0) +
full_data["request_earlycheckin"].fillna(0))
features["charge_option"] = full_data["charge_option"].apply(lambda x: 1 if x == "Pay Later" else 0)
# accom = {"":}
# features["accommadation_type_name"] = full_data["accommadation_type_name"].apply(lambda x: accom[x])
full_data['booking_datetime'] = pd.to_datetime(full_data['booking_datetime'])
full_data['checkin_date'] = pd.to_datetime(full_data['checkin_date'])
full_data['checkout_date'] = pd.to_datetime(full_data['checkout_date'])
# add date connected numerical columns
features["days_to_checkin"] = (full_data["checkin_date"] - full_data["booking_datetime"]).dt.days
features["num_nights"] = (full_data['checkout_date'] - full_data['checkin_date']).dt.days - 1
# deal with cancellation policy code
features['parsed_cancellation'] = features.apply(lambda x: cancel_parser(x['cancellation_policy_code'], x['num_nights']), axis=1)
features[['cd1', 'cp1', 'cd2', 'cp2', 'ns']] = pd.DataFrame(features['parsed_cancellation'].tolist(), index=features.index)
del features["cancellation_policy_code"]
del features['parsed_cancellation']
return features
def cross_validate(estimator, X: np.ndarray, y: np.ndarray, cv):
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
cv: int
Specify the number of folds.
Returns
-------
validation_score: float
Average validation score over folds
"""
validation_scores = []
split_X, split_y = np.array_split(X, cv), np.array_split(y, cv)
for i in range(cv):
# create S\Si & Si
train_x, train_y = np.concatenate(np.delete(split_X, i, axis=0)), np.concatenate(np.delete(split_y, i, axis=0))
test_x, test_y = split_X[i], split_y[i]
# fit the estimator to the current folds
A = estimator.fit(train_x, train_y)
# predict over the validation fold and over the hole train set
validation_scores.append(metrics.f1_score(A.predict(test_x), test_y, average='macro'))
return np.array(validation_scores).mean()
def training_playground(X, y):
"""
Evaluate current model performances over previous weeks datasets.
Parameters
----------
X: the previous weeks unite dataset
y: the previous weeks unite labels
"""
# f1_scores = []
# for true, false in itertools.product(list(np.arange(0.6, 1, 0.05)), list(np.arange(0.03, 0.1, 0.01))):
# print(true, false)
# estimator = AgodaCancellationEstimator(true, false)
# f1_scores.append(cross_validate(estimator, X, y, cv=6))
#
# print(f1_scores)
# define train & test sets.
train_X, test_X, train_y, test_y = train_test_split(X.to_numpy(), y.to_numpy(), test_size=1/6)
# Fit model over data
prev_estimator = AgodaCancellationEstimator(0.6, 0.07).fit(train_X, train_y)
# Predict for test_X
y_pred = pd.DataFrame(prev_estimator.predict(test_X), columns=["predicted_values"])
# confusion matrix
cm = metrics.ConfusionMatrixDisplay(metrics.confusion_matrix(test_y, y_pred))
cm.plot()
plt.show()
# Performances:
print("Area Under Curve: ", metrics.roc_auc_score(test_y, y_pred))
print("Accuracy: ", metrics.accuracy_score(test_y, y_pred))
print("Recall: ", metrics.recall_score(test_y, y_pred))
print("Precision: ", metrics.precision_score(test_y, y_pred))
print("F1 Macro Score: ", metrics.f1_score(test_y, y_pred, average='macro'))
def evaluate_and_export(X, y, test_csv_filename):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
X: the previous weeks unite dataset
y: the previous weeks unite labels
test_csv_filename: path to the current week test-set csv file
"""
f1_scores = []
range_of_weights = list(itertools.product(list(np.arange(0.6, 1, 0.05)), list(np.arange(0.03, 0.1, 0.01))))
for true, false in range_of_weights:
estimator = AgodaCancellationEstimator(true, false)
f1_scores.append(cross_validate(estimator, X, y, cv=6))
print(np.max(f1_scores))
true_weight, false_weight = range_of_weights[np.argmax(f1_scores)]
# Fit model over data
prev_estimator = AgodaCancellationEstimator(true_weight, false_weight).fit(X, y)
# Store model predictions over test set
test_set = pd.read_csv(test_csv_filename).drop_duplicates()
# predict over current-week test-set
X = data_preprocessor(test_set)
y_pred = pd.DataFrame(prev_estimator.predict(X), columns=["predicted_values"])
# export the current-week predicted labels
pd.DataFrame(y_pred, columns=["predicted_values"]).to_csv("342473642_206200552_316457340.csv", index=False)
def load_previous():
"""
Load Previous-weeks test-sets and labels
Returns
-------
Design matrix and response vector in the following format:
- Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
data_set = pd.read_csv(f'testsets//t1.csv')
data_set['label'] = pd.read_csv(f'labels//l1.csv')["cancel"]
for i in range(2, 7):
ti = pd.read_csv(f'testsets//t{i}.csv')
li = | pd.read_csv(f'labels//l{i}.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from linearmodels import PanelOLS
import statsmodels.api as sm
import econtools as econ
import econtools.metrics as mt
import math
from statsmodels.stats.outliers_influence import variance_inflation_factor
from auxiliary.prepare import *
from auxiliary.table2 import *
from auxiliary.table3 import *
from auxiliary.table4 import *
from auxiliary.table5 import *
from auxiliary.table6 import *
from auxiliary.table7 import *
from auxiliary.extension import *
from auxiliary.table_formula import *
def calc_vif(X):
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
return(vif)
def table5_setting(data):
df = data
df = df[((df['turin_co_sample']==1) | (df['turin_pr_sample']==1)) & ((df['post_experience']>=5)|(df['post_experience'].isnull()==True)) & ((df['pre_experience']>=5)|(df['pre_experience'].isnull()==True))& (df['missing']==0)]
df = df[(df['ctrl_pop_turin_co_sample']==1) | (df['ctrl_pop_turin_pr_sample']==1) | (df['ctrl_exp_turin_co_sample']==1) | (df['ctrl_exp_turin_pr_sample']==1) | (df['ctrl_pop_exp_turin_co_sample']==1) | (df['ctrl_pop_exp_turin_pr_sample']==1)]
df = df.reset_index()
#re-construct trend-pa: setting
id_auth_remained = df['id_auth'].unique()
id_auth_remained_df = pd.DataFrame({'id_auth': [], 'group_num': []})
for i in range(len(id_auth_remained)):
id_auth_remained_df.loc[i,'id_auth'] = id_auth_remained[i]
id_auth_remained_df.loc[i,'group_num'] = i+1
for i in range(len(df)):
for j in range(len(id_auth_remained_df)):
if df.loc[i, 'id_auth'] == id_auth_remained_df.loc[j, 'id_auth']:
df.loc[i, 'id_auth_remained'] = j+1
id_auth_remained_dum = pd.get_dummies(df['id_auth_remained']).rename(columns=lambda x: 'id_auth_remained' + str(x))
df = pd.concat([df, id_auth_remained_dum],axis = 1)
#re-construct trend-pa
for i in range(len(id_auth_remained_dum.columns)):
df['trend_pa_remained_'+str(i+1)] = 0
for j in range(len(df)):
if df.loc[j, id_auth_remained_dum.columns[i]]==1 and df.loc[j, 'authority_code']!=3090272 and df.loc[j, 'authority_code']!=3070001:
df.loc[j,'trend_pa_remained_'+str(i+1)] = 1
df.drop([id_auth_remained_dum.columns[i]],axis = 1)
return(df)
def table5_PanelA_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelA_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('year_dum_2006.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelA_odd_row3(data):
outcomes = ['discount','delay_ratio','overrun_ratio'] #Aodd_days to award값 안나옴
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = | pd.DataFrame((ci_1,ci_2)) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.