prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import csv, os
import numpy as np
import pandas as pd
import argparse
if __name__ == "__main__":
# Parse arguments for the script
parser = argparse.ArgumentParser(description='Run the conformal map with options.')
parser.add_argument("-i", "--input", help="input folder that stores log files")
parser.add_argument("-o", "--output", help="output folder to write combined data")
parser.add_argument("-n", "--name", help="name of the combined output file")
parser.add_argument("--use_mpf", action="store_true", help="True for multiprecision logs")
parser.add_argument("--max_itr", type=int, help="number of iterations for the combined logs", default=500)
args = parser.parse_args()
# Get list of log files
logs = os.listdir(args.input)
if args.use_mpf:
logs = [f for f in logs if f.endswith("_mpf.csv")]
else:
logs = [f for f in logs if f.endswith("_float.csv")]
max_grads = []
for f in logs:
max_grads.append([])
log_path = os.path.join(args.input, f)
with open(log_path, newline='') as csvfile:
grad_reader = csv.DictReader(csvfile, delimiter=',', quotechar='|')
# Get the max error values (the first column is the iteration number)
for row in grad_reader:
max_grads[-1].append(row[' max error'])
# Ensure each log is the requested number of iterations
if(len(max_grads[-1])>=args.max_itr):
break
while(len(max_grads[-1])<args.max_itr):
max_grads[-1].append(max_grads[-1][-1])
# Save the max_grads as a csv file
max_grads = np.array(max_grads,dtype=float).T
max_grads_df = | pd.DataFrame(max_grads) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date
import numpy as np
import pandas.tseries.offsets as offsets
from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
#---------------
# Period logic
def _period_field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field(alias, self.ordinal, base)
f.__name__ = name
return property(f)
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field_arr(alias, self.values, base)
f.__name__ = name
return property(f)
class Period(object):
__slots__ = ['freq', 'ordinal']
def __init__(self, value=None, freq=None, ordinal=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if ordinal is not None and value is not None:
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
self.ordinal = ordinal
elif value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
self.ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring) or com.is_integer(value):
if com.is_integer(value):
value = str(value)
dt, freq = _get_date_and_freq(value, freq)
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, date):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
base)
self.freq = _freq_mod._get_freq_str(base)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
import os
import warnings
import numpy as np
import pandas as pd
from gisutils import df2shp, project
from shapely.geometry import Point
from mfsetup.fileio import append_csv, check_source_files
from mfsetup.grid import get_ij
from mfsetup.sourcedata import TransientTabularSourceData
from mfsetup.tmr import Tmr
from mfsetup.wateruse import get_mean_pumping_rates, resample_pumping_rates
def setup_wel_data(model, for_external_files=True):
"""Performs the part of well package setup that is independent of
MODFLOW version. Returns a DataFrame with the information
needed to set up stress_period_data.
"""
# default options for distributing fluxes vertically
vfd_defaults = {'across_layers': False,
'distribute_by': 'transmissivity',
'screen_top_col': 'screen_top',
'screen_botm_col': 'screen_botm',
'minimum_layer_thickness': model.cfg['wel'].get('minimum_layer_thickness', 2.)
}
# master dataframe for stress period data
columns = ['per', 'k', 'i', 'j', 'q', 'boundname']
df = pd.DataFrame(columns=columns)
# check for source data
datasets = model.cfg['wel'].get('source_data')
# delete the dropped wells file if it exists, to avoid confusion
dropped_wells_file = model.cfg['wel']['output_files']['dropped_wells_file'].format(model.name)
if os.path.exists(dropped_wells_file):
os.remove(dropped_wells_file)
# get well package input from source (parent) model in lieu of source data
# todo: fetching correct well package from mf6 parent model
if datasets is None and model.cfg['parent'].get('default_source_data') \
and hasattr(model.parent, 'wel'):
# get well stress period data from mfnwt or mf6 model
parent = model.parent
spd = get_package_stress_period_data(parent, package_name='wel')
# map the parent stress period data to inset stress periods
periods = spd.groupby('per')
dfs = []
for inset_per, parent_per in model.parent_stress_periods.items():
if parent_per in periods.groups:
period = periods.get_group(parent_per)
if len(dfs) > 0 and period.drop('per', axis=1).equals(dfs[-1].drop('per', axis=1)):
continue
else:
dfs.append(period)
spd = pd.concat(dfs)
parent_well_i = spd.i.copy()
parent_well_j = spd.j.copy()
parent_well_k = spd.k.copy()
# set boundnames based on well locations in parent model
parent_name = parent.name
spd['boundname'] = ['{}_({},{},{})'.format(parent_name, pk, pi, pj)
for pk, pi, pj in zip(parent_well_k, parent_well_i, parent_well_j)]
parent_well_x = parent.modelgrid.xcellcenters[parent_well_i, parent_well_j]
parent_well_y = parent.modelgrid.ycellcenters[parent_well_i, parent_well_j]
coords = project((parent_well_x, parent_well_y),
model.modelgrid.proj_str,
parent.modelgrid.proj_str)
geoms = [Point(x, y) for x, y in zip(*coords)]
bounds = model.modelgrid.bbox
within = [g.within(bounds) for g in geoms]
i, j = get_ij(model.modelgrid,
parent_well_x[within],
parent_well_y[within])
spd = spd.loc[within].copy()
spd['i'] = i
spd['j'] = j
df = df.append(spd)
# read source data and map onto model space and time discretization
# multiple types of source data can be submitted
elif datasets is not None:
for k, v in datasets.items():
# determine the format
if 'csvfile' in k.lower(): # generic csv
# read csv file and aggregate flow rates to model stress periods
# sum well fluxes co-located in a cell
sd = TransientTabularSourceData.from_config(v,
resolve_duplicates_with='sum',
dest_model=model)
csvdata = sd.get_data()
csvdata.rename(columns={v['data_column']: 'q',
v['id_column']: 'boundname'}, inplace=True)
if 'k' not in csvdata.columns:
if model.nlay > 1:
vfd = vfd_defaults.copy()
vfd.update(v.get('vertical_flux_distribution', {}))
csvdata = assign_layers_from_screen_top_botm(csvdata,
model,
**vfd)
else:
csvdata['k'] = 0
df = df.append(csvdata[columns])
elif k.lower() == 'wells': # generic dict
added_wells = {k: v for k, v in v.items() if v is not None}
if len(added_wells) > 0:
aw = | pd.DataFrame(added_wells) | pandas.DataFrame |
import gc
import os
import os.path as osp
from typing import Sequence, Union, List, Generator, Optional, Tuple
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.cuda.amp as amp
from torch import nn
from torch import Tensor
import matplotlib.pyplot as plt
import pandas as pd
from ._misc import text_styles, TrivialContext
class DefaultConfig:
def __init__(self):
self.checkpoint_dir = ''
self.run_name = f'test'
# training
self.train_batch_size = 32
self.val_batch_size = 32
self.start_epoch = 0
self.end_epoch = 1
self.optimizers = torch.optim.AdamW
self.optimizer_params = {'lr': 3e-4, 'weight_decay': 1e-3}
self.schedulers = []
self.scheduler_params = []
# means we step the scheduler at each training iteration
# the other option is 'epoch'
self.scheduler_interval = 'step'
# whether or not the scheduler requires the step or epoch as an input
# argument
self.scheduler_interval_eval = '[]'
self.criteria = [torch.nn.BCEWithLogitsLoss()]
self.clip_grad_norm = -1
# automatic mixed precision https://pytorch.org/docs/stable/notes/amp_examples.html
self.use_amp = False
# whether we are aiming to maximize or minimize score
self.score_objective = 'max'
def merge_config(custom_config):
config = DefaultConfig()
for prop, value in vars(custom_config).items():
if prop.startswith('_'):
continue
setattr(config, prop, value)
# for backwards compatibility
try:
config.schedulers = config.scheduler
except AttributeError:
pass
try:
config.optimizers = config.optimizer
except AttributeError:
pass
try:
config.criteria = config.criterion
except AttributeError:
pass
return config
class Fitter:
"""
May need to override:
- prepare_inputs_and_targets
- compute_losses
- compute_scores
- collate_targets_and_outputs (for validation)
- forward_train (if you have more than one model)
- forward_validate (if you have more than one model)
- on_start_epoch
- on_validate_end
"""
def __init__(self, models, data_loaders, device, config=None, n_val_iters=0,
id_key='', load=''):
"""
a list of models may be provided, but then a list of optimizers
and a list of schedulers of the same lengths are also expected
`n_val_iters` lets us specify how often to do validation in intervals
of train iterations
`id_key` is used during validation with inspect=True. The idea is that
one of the keys in the dataset __getitem__ return value corresponds to
a datapoint id. This key is then returned alongside a list of vaidation
results. It lets us inspect a particular data point. See `validate`
method for more info
"""
self.models = models
if not isinstance(self.models, Sequence):
self.models = [self.models]
[model.to(device) for model in self.models]
self.data_loaders = data_loaders
self.device = device
# Keep original config in case we want to reset it after lr sweep
self.original_config = config
if config is not None:
self.config = merge_config(config)
else:
self.config = DefaultConfig()
self.reset_fitter_state()
self._n_train_iters = len(self.data_loaders.train_loader)
if n_val_iters > 0 and n_val_iters <= len(self.data_loaders.train_loader):
self.n_val_iters = n_val_iters
else:
if n_val_iters > len(self.data_loaders.train_loader):
print("Warning: Clipping n_val_iters to max train iters")
self.n_val_iters = len(self.data_loaders.train_loader)
if load:
self.load(load)
# For validation debug
self.id_key = id_key
# Automatic mixed precision
if self.config.use_amp:
self.scaler = amp.GradScaler()
self.train_forward_context = amp.autocast
else:
self.train_forward_context = TrivialContext
# Check score objective
assert self.config.score_objective in ['min', 'max'], \
"config.score_objective must be either 'min' or 'max'"
# Handle multiple losses
if not isinstance(self.config.criteria, Sequence):
self.config.criteria = [self.config.criteria]
# Make sure that there is a weighting for the losses in case it wasn't
# provided in the config
if hasattr(self.config, 'criteria_weights'):
assert (len(self.config.criteria_weights) \
== len(self.config.criteria)), ("config.criteria_weights "
"must be same length as config.criteria")
self.loss_weights = self.config.criteria_weights
else:
self.loss_weights = [1.] * len(self.config.criteria)
def reset_fitter_state(self):
"""
NOTE: I haven't done a thorough check to make sure I'm not missing anythin
"""
self.reset_history()
self.reset_optimizers()
self.reset_schedulers()
self.best_running_val_loss = np.float('inf')
if self.config.score_objective == 'max':
self.best_running_val_score = -np.float('inf')
elif self.config.score_objective == 'min':
self.best_running_val_score = np.float('inf')
self.epoch = self.config.start_epoch
def reset_optimizers(self):
optimizers = self.config.optimizers
if not isinstance(optimizers, Sequence):
optimizers = [optimizers]
assert len(optimizers) == len(self.models), \
"Must provide as many optimizers as models"
optimizer_params = self.config.optimizer_params
if not isinstance(optimizer_params, Sequence):
optimizer_params = [optimizer_params]
self.optimizers = []
for opt, ops, m in zip(optimizers, optimizer_params, self.models):
self.optimizers.append(opt(m.parameters(), **ops))
def set_lrs(self, lrs: Union[Sequence[float], float]):
""" manually set the lrs of the optimizers
"""
if not isinstance(lrs, Sequence):
lrs = [lrs]
assert len(lrs) == len(self.optimizers), \
"Must provide as many lrs as there are optimizers"
for lr, optim in zip(lrs, self.optimizers):
for g in optim.param_groups:
g['lr'] = lr
def reset_schedulers(self):
schedulers = self.config.schedulers
if not isinstance(schedulers, Sequence):
schedulers = [schedulers]
if len(schedulers) > 0:
assert len(schedulers) == len(self.models), \
"Must provide as many schedulers as models"
else:
self.schedulers = []
return
scheduler_params = self.config.scheduler_params
if not isinstance(scheduler_params, Sequence):
scheduler_params = [scheduler_params]
assert len(scheduler_params) == len(schedulers), \
"Must provide as many sets of scheduler_params as schedulers"
self.schedulers = []
for sched, sps, opt in zip(schedulers, scheduler_params, self.optimizers):
self.schedulers.append(sched(opt, **sps))
def reset_history(self):
self.history = {}
optimizers = self.config.optimizers
if not isinstance(optimizers, Sequence):
optimizers = [optimizers]
for i in range(len(optimizers)):
self.history[f'lr_{i}'] = []
self.history[f'grad_norm_{i}'] = []
def forward_train(self, inputs, targets):
"""
A single forward pass for training. usually this is straight forward
but one might want to be more specific where there are multiple models
"""
return self.models[0](*inputs)
def train_iteration(self, inputs, targets):
"""
what needs to happen during one train loop iteration
"""
for optimizer in self.optimizers:
optimizer.zero_grad()
with self.train_forward_context():
outputs = self.forward_train(inputs, targets)
losses = self.compute_losses(targets, outputs, mode='train')
if not isinstance(losses, Sequence):
# backwards compatibility
losses = [losses]
loss = sum(
[l * w for l, w in zip(losses, self.loss_weights)])
if self.config.use_amp:
self.scaler.scale(loss).backward()
# this next line makes sure getting/clipping grad norm works
for optimizer in self.optimizers:
self.scaler.unscale_(optimizer)
else:
loss.backward()
grad_norms = []
for model in self.models:
if self.config.clip_grad_norm > 0:
grad_norms.append(nn.utils.clip_grad_norm_(model.parameters(),
self.config.clip_grad_norm))
else:
# NOTE assumes l2 norm
grad_norms.append(torch.norm(torch.stack([torch.norm(
p.grad.detach(), 2) for p in model.parameters() \
if p.grad is not None]), 2))
if self.config.use_amp:
# scaler.step knows that I previously used scaler.unscale_
for optimizer in self.optimizers:
self.scaler.step(optimizer)
self.scaler.update()
else:
for optimizer in self.optimizers:
optimizer.step()
return losses, grad_norms
def fit(self, overfit=False, skip_to_step=0, save=True,
bail=float('inf'), verbose=0):
"""
`skip_to_step` cycles through the train loader without training
until that step is reached. Useful for diagnosing a bug appearing
at a specific location in the train cycle
"""
overfit_data = None # in case we want to try overfitting to one batch
epoch_bar = trange(self.epoch, self.config.end_epoch, disable=(verbose != 1))
for epoch in epoch_bar:
# hook for tasks to do when starting epoch
self.on_start_epoch()
# train
[model.train() for model in self.models]
total_train_losses = []
train_preds = 0
train_bar = tqdm(self.data_loaders.train_loader, disable=(verbose != 2))
train_bar.set_description(f'Epoch {epoch:03d}')
self.train_step = 0
for data in train_bar:
if skip_to_step > 0 and self.train_step < skip_to_step:
self.train_step += 1
continue
if overfit:
if overfit_data is None:
overfit_data = data
else:
data = overfit_data
# get inputs and targets
inputs, targets = self.prepare_inputs_and_targets(data, mode='train')
# train step
losses, grad_norms = self.train_iteration(inputs, targets)
# scheduler
# first keep track of lr to report on it
lrs = []
for optimizer in self.optimizers:
lrs.append(optimizer.param_groups[0]['lr'])
for scheduler in self.schedulers:
if self.config.scheduler_interval == 'step':
args = eval(self.config.scheduler_interval_eval)
scheduler.step(*args)
# logging
if isinstance(inputs[0], Sequence):
# Handle first input is a list instead of a Tensor
batch_size = inputs[0][0].shape[0]
else:
batch_size = inputs[0].shape[0]
with torch.no_grad():
# train_losses is just the item() version of losses for reporting
train_losses = [l.item() for l in losses]
for i, tl in enumerate(train_losses):
if len(total_train_losses) == 0:
# make train losses the right length
total_train_losses = [0.] * len(train_losses)
while len(total_train_losses) < i+1:
total_train_losses.append(0.)
total_train_losses[i] += tl * batch_size # unaverage
train_preds += batch_size
kwargs = {f'lr_{i}': f'{lr:.2E}' for i, lr in enumerate(lrs)}
kwargs.update({f'grad_norm_{i}': f'{grad_norm:.3f}' \
for i, grad_norm in enumerate(grad_norms)})
kwargs.update({f'train_loss_{i}': f'{l:.3f}' \
for i, l in enumerate(train_losses)})
train_bar.set_postfix(**kwargs)
for i, l in enumerate(train_losses):
if not f'train_loss_{i}' in self.history:
self.history[f'train_loss_{i}'] = []
self.history[f'train_loss_{i}'].append(l)
for i, (lr, grad_norm) in enumerate(zip(lrs, grad_norms)):
self.history[f'lr_{i}'].append(lr)
self.history[f'grad_norm_{i}'].append(grad_norm.item())
# bail out if loss gets too high
for i, l in enumerate(train_losses):
if l > bail:
print(f"loss_{i} blew up. Bailed training.")
return
if math.isnan(l):
msg = f"WARNING: NaN loss_{i}"
if self.config.use_amp:
msg += " Heads up: this may be a side effect of using AMP."
msg += " If so, the gradient scaler should skip this step."
print(msg)
# if bail is set to a specific number, bail
if bail < float('inf'):
return
# validate every val_itrs
if (self.train_step + 1) % self.n_val_iters == 0:
if self.data_loaders.val_loader is not None:
val_losses, val_scores = \
self.validate(verbose=(verbose == 2))
else:
val_losses, val_scores = [], []
for i, l in enumerate(val_losses):
if not f'val_loss_{i}' in self.history:
self.history[f'val_loss_{i}'] = []
self.history[f'val_loss_{i}'].append(l.item())
for i, s in enumerate(val_scores):
if not f'val_score_{i}' in self.history:
self.history[f'val_score_{i}'] = []
self.history[f'val_score_{i}'].append(s)
# best loss is based on sum of losses
# TODO decide if I want to use running avg
# and if so, make it possible to decide on length
running_val_loss = 0
for i in range(len(val_losses)):
running_val_loss += np.mean(self.history[f'val_loss_{i}'][-1:])
if save and running_val_loss <= self.best_running_val_loss:
self.best_running_val_loss = running_val_loss
self.save(f'{self.config.run_name}_best_loss.pt')
if 'val_score_0' in self.history:
running_val_score = np.mean(self.history['val_score_0'][-1:])
sign = 1 if self.config.score_objective == 'min' else -1
if save and sign*running_val_score <= sign*self.best_running_val_score:
self.best_running_val_score = running_val_score
self.save(f'{self.config.run_name}_best_score.pt')
[model.train() for model in self.models]
self.train_step += 1
# step scheduler on epoch
if self.config.scheduler_interval == 'epoch':
for scheduler in self.schedulers:
args = eval(self.config.scheduler_interval_eval)
scheduler.step(*args)
if verbose == 1:
kwargs = {f'val_loss_{i}': f"{l.item():.3f}" for i, l \
in enumerate(val_losses)}
kwargs.update({f'val_score_{i}': f"{s:.3f}" for i, s \
in enumerate(val_scores)})
kwargs.update({f'train_loss_{i}': f"{l/train_preds:.3f}" \
for i, l in enumerate(total_train_losses)})
kwargs.update({f'lr_{i}': f"{lr:.2E}" for i, lr in enumerate(lrs)})
epoch_bar.set_postfix(**kwargs)
if verbose == 2:
for i, (tl, vl) in enumerate(zip(total_train_losses, val_losses)):
msg = f"\nAverage train / val loss {i} "
msg += f"{text_styles.BOLD}{(tl/train_preds):.5f}{text_styles.ENDC}"
msg += f" / {text_styles.BOLD}{(vl):.5f}{text_styles.ENDC}. "
print(msg + '\n', flush=True)
self.epoch = epoch + 1
if save and self.config.run_name:
self.save(f'{self.config.run_name}_last.pt')
if save and self.config.run_name:
self.save(f'{self.config.run_name}_epoch{self.epoch:02d}.pt')
def on_start_epoch(self):
""" tasks to do when starting in epoch, overwrite me
"""
pass
def prepare_inputs_and_targets(self, data, mode='val'):
"""
This method probably needs to be overriden
Return a list of batches of inputs (each index in the list will be
treated as a positional argument for the model's forward), and a
single batch of targets. Multiple targets may be returned in any
format you desire, but then you'll have to modify `compute_losses`,
`compute_scores`, and `collate_targets_and_outputs` to handle it.
Don't forget to move whatever is necessary to `self.device`!
"""
assert mode in ['train', 'val'], "`mode` must be either 'train' or 'val'"
return [data['inp'].to(self.device)], data['target'].to(self.device)
def forward_validate(self, inputs):
"""
a single forward pass for validation. usually this is straight forward
but one might want to be more specific where there are multiple models
"""
return self.models[0](*inputs)
def validate(self, inspect=False, loader=None, use_train_loader=False,
verbose=True) -> Tuple[float, List[float]]:
"""
inspects lets us retrieve more information than just the validation score and loss
if `loader` is not provide, `self.val_loader` is used by default
if `use_train_loader` is set, `self.train_loader` is used
"""
[model.eval() for model in self.models]
ls_outputs = []
ls_targets = []
if inspect and self.id_key != '':
ids = [] # for debugging purposes
if loader is None:
if not use_train_loader:
loader = self.data_loaders.val_loader
else:
loader = self.data_loaders.train_loader
elif use_train_loader:
print("Warning: You have provided a loader but also set `use_train_loader` to true")
val_bar = tqdm(loader, disable=(not verbose))
for data in val_bar:
inputs, targets = self.prepare_inputs_and_targets(data, mode='val')
with torch.no_grad():
outputs = self.forward_validate(inputs)
ls_targets.append(targets)
ls_outputs.append(outputs)
if inspect and self.id_key != '':
ids += list(data[self.id_key])
targets, outputs = self.collate_targets_and_outputs(ls_targets, ls_outputs)
with torch.no_grad():
losses = self.compute_losses(targets, outputs, mode='val')
if not isinstance(losses, Sequence):
# backwards compatibility
losses = [losses]
scores = self.compute_scores(targets, outputs, mode='val')
if not isinstance(scores, Sequence):
# backwards compatibility
scores = [scores]
if verbose:
print(''.join([f', val_loss_{i}: {s:.3f}' \
for i, s in enumerate(losses)]) + \
''.join([f', val_score_{i}: {s:.3f}' \
for i, s in enumerate(scores)]),
flush=True)
if inspect:
ret = {f'loss_{i}': l for i, l in enumerate(losses)}
ret.update({'targets': targets, 'outputs': outputs})
ret.update({f'score_{i}': s for i, s in enumerate(scores)})
if self.id_key != '':
ret['id'] = ids
return ret
self.on_validate_end(targets, outputs)
return losses, scores
def on_validate_end(self, targets, outputs):
"""
post-validation hook, might be useful for showing something like an
example
"""
pass
def collate_targets_and_outputs(self, ls_targets, ls_outputs):
"""
During validation, targets and outputs are concatenated into a list
depending on the nature of these, we might want to overwrite the way
that the are collated prior to computing loss and score
By default, we have the naive implementation where `ls_targets` and
`ls_outputs` simply need to be concatendated
note that we send targets and outputs to cpu
TODO: Make this handle inputs as well so that we can visualize images
"""
targets = torch.cat([t.cpu() for t in ls_targets], axis=0)
outputs = torch.cat([o.cpu() for o in ls_outputs], axis=0)
return targets, outputs
def compute_loss(self, targets, outputs, mode='train'):
"""
Here for backwards compatibility. `compute_losses` is the one that's
used in other places, but that just calls this.
"""
return [c(outputs, targets) for c in self.config.criteria]
def compute_losses(self, targets, outputs, mode='train'):
"""
Compute a loss (or multiple losses if the config specifies a sequence
of criteria)
NOTE: This points to compute_loss for backwards compatibility. This is
the "official" one called in other places in the code
NOTE: If you need to handle multiple targets AND multiple losses
you will have to override this.
"""
return self.compute_loss(targets, outputs, mode=mode)
def compute_score(self, targets, outputs, mode='val') -> Union[Sequence[float], float]:
"""
Backwards compatibility via compute_scores.
This method MUST be overriden if you want to compute scores
"""
return []
def compute_scores(self, targets, outputs, mode='val') -> Union[Sequence[float], float]:
"""
Return a list of scores based on the targets
Note that the order of scores affects two things:
1. For the purpose of saving on best score, the first of the scores is
used.
2. For the purposes of plotting scores, the first of the scores is used\
NOTE: This points to compute_score for backwards compatibility. This
is the "official" one called in other places in the code
"""
return self.compute_score(targets, outputs, mode='val')
def test(self, loader=None, verbose=True) -> Generator[
Tuple[Tensor, Optional[List[str]]], None, None]:
"""
Similar to `validate` method, but not expecting targets. Returns
a generator of outputs and ids if self.id_key is specified
Note that this uses `forward_validate` method
"""
[model.eval() for model in self.models]
if self.id_key == '':
print("Warning: You have not set an id_key. Results won't have ids.")
if loader is None:
print("Warning: No loader provided. Default to val_loader")
loader = self.data_loaders.val_loader
# test_bar = tqdm(loader, disable=(not verbose))
for data in loader:
inputs = self.prepare_inputs_and_targets(data, mode='test')
with torch.no_grad():
outputs = self.forward_validate(inputs)
if self.id_key != '':
yield outputs, list(data[self.id_key])
else:
yield outputs
def plot_history(self, plot_from=0, sma_period=5):
fig, ax = plt.subplots(2, 2, figsize=(20,10))
ax = ax.flatten()
# train loss
max_losses = 3 # maximum number of loss traces to plot
x_axis = np.arange(1, len(self.history[f'train_loss_0'])+1)/self._n_train_iters
for i in range(max_losses+1):
if f'train_loss_{i}' not in self.history:
break
if i == max_losses:
print(f"Warning: Max number of loss traces ({max_losses}) " \
+ "exceeded. Not all losses are plotted")
break
train_loss = pd.Series(self.history[f'train_loss_{i}'][plot_from:])
ax[0].plot(x_axis[plot_from:], train_loss, alpha=0.5, label=f'train_loss_{i}')
ax[0].plot(x_axis[plot_from:][sma_period-1:],
train_loss.rolling(window=sma_period).mean().iloc[sma_period-1:].values,
label=f'train_loss_{i}_smoothed')
# val loss
vals_per_epoch = self._n_train_iters//self.n_val_iters
x_axis = np.arange(1, len(self.history['val_loss_0']) + 1)/vals_per_epoch
for i in range(max_losses+1) or i == max_losses:
if f'val_loss_{i}' not in self.history:
break
ax[0].plot(x_axis[(vals_per_epoch * plot_from)//self._n_train_iters:],
self.history[f'val_loss_{i}'][(vals_per_epoch * plot_from)//self._n_train_iters:],
label=f'val_loss_{i}')
ax[0].legend()
ax[0].set_xlabel('epoch')
ax[0].set_ylabel('loss')
ax[0].grid()
title = f"Best train_loss_0: {min(self.history['train_loss_0']):0.3f}"
if len(self.history['val_loss_0']):
title += f". Best val_loss_0: {min(self.history['val_loss_0']):0.3f}"
ax[0].set_title(title)
# val metrics
if 'val_score_0' in self.history and len(self.history['val_score_0']):
ax[1].plot(x_axis[(vals_per_epoch * plot_from)//self._n_train_iters:],
self.history['val_score_0'][(vals_per_epoch * plot_from)//self._n_train_iters:])
ax[1].set_xlabel('epoch')
ax[1].set_ylabel('score')
ax[1].grid()
if self.config.score_objective == 'max':
title = f"Best val_score_0: {max(self.history['val_score_0']):0.3f}"
elif self.config.score_objective == 'min':
title = f"Best val_score_0: {min(self.history['val_score_0']):0.3f}"
ax[1].set_title(title)
# lrs
x_axis = np.arange(1, len(self.history['train_loss_0'])+1)/self._n_train_iters
legend = []
for i in range(len(self.optimizers)):
ax[2].plot(x_axis[plot_from:], self.history[f'lr_{i}'][plot_from:])
legend.append(f'lr_{i}')
ax[2].set_xlabel('epoch')
ax[2].set_ylabel('lr')
if len(legend):
ax[2].legend(legend)
ax[2].grid()
# grad norms
x_axis = np.arange(1, len(self.history['train_loss_0'])+1)/self._n_train_iters
legend = []
for i in range(len(self.optimizers)):
ax[3].plot(x_axis[plot_from:], self.history[f'grad_norm_{i}'][plot_from:])
legend.append(f'grad_norm_{i}')
ax[3].set_xlabel('epoch')
ax[3].set_ylabel('grad_norm')
if len(legend):
ax[3].legend(legend)
ax[3].grid()
return fig, ax
def lr_sweep(self, start_lrs: Sequence[float], gamma: float, bail=np.float('inf')):
"""
Run an lr sweep starting from `start_lrs` (provide as many lrs as there
are optimizers) and with exponential growth rate `gamma` (> 1)
`bail` is the loss at which training stops
"""
if not isinstance(start_lrs, Sequence):
start_lrs = [start_lrs]
assert len(start_lrs) == len(self.optimizers), \
f"Must provide as many starting lrs as there are optimizers: {len(self.optimizers)}"
if len(start_lrs) > 1:
for i, (lr, _) in enumerate(zip(start_lrs, self.config.optimizer_params)):
self.config.optimizer_params[i]['lr'] = lr
else:
self.config.optimizer_params['lr'] = start_lrs[0]
self.config.schedulers = [torch.optim.lr_scheduler.ExponentialLR]*len(start_lrs)
self.config.scheduler_params = [{'gamma': gamma}]*len(start_lrs)
self.config.scheduler_interval_eval = '[]'
self.config.scheduler_interval = 'step'
self.reset_fitter_state()
self.fit(verbose=2, save=False, bail=bail)
self.plot_lr_sweep()
# now clean up
if self.original_config is not None:
self.config = merge_config(self.original_config)
else:
self.config = DefaultConfig()
self.reset_fitter_state()
print("LR sweep done and fitter state has been reset")
def plot_lr_sweep(self, sma_period=5):
num_lrs = len(self.optimizers)
fig, ax = plt.subplots(1, num_lrs, figsize=(10*num_lrs,5))
if num_lrs > 1:
ax = ax.flatten()
else:
ax = [ax]
max_losses = 3 # maximum number of loss traces to plot
for i in range(num_lrs):
for j in range(max_losses+1):
if f'train_loss_{j}' not in self.history:
break
if i == max_losses:
print(f"Warning: Max number of loss traces ({max_losses}) " \
+ "exceeded. Not all losses are plotted")
break
loss = | pd.Series(self.history[f'train_loss_{j}']) | pandas.Series |
import os
import shutil
import time
import pickle
from datetime import datetime
from filelock import Timeout, FileLock
import zarr
from numcodecs import Blosc
import pandas as pd
import numpy as np
import scipy as sp
import scanpy as sc
import anndata as ad
from anndata._io.zarr import read_dataframe, read_attribute, write_attribute
from status.status_functions import *
from plotting.multi_color_scale import MultiColorScale
from tasks.tasks import write_dense
save_analysis_path = "/srv/www/MiCV/cache/"
selected_datasets_path = "/srv/www/MiCV/selected_datasets/"
user_dataset_path = "/srv/www/MiCV/user_datasets/"
lock_timeout = 60
use_zarr = True
### the actual helper functions
### TODO: break all of this up into specific modules
def generate_adata_from_10X(session_ID, data_type="10X_mtx"):
data_dir = save_analysis_path + str(session_ID) + "/raw_data/"
if (data_type == "10X_mtx"):
adata = sc.read_10x_mtx(data_dir, cache=False)
elif (data_type == "10X_h5"):
adata = sc.read_10x_h5(data_dir + "data.h5ad")
else:
print("[ERROR] data type not recognized - returning None")
return None
cache_adata(session_ID, adata)
return adata
def load_selected_dataset(session_ID, dataset_key):
dataset_dict = {
"00001": "Michki2020",
"00002": "Cocanougher2020",
"00003": "Davie2018",
"00004": "10X5KPBMC",
"00005": "Sharma2020",
"00006": "Zeisel2018"
}
filename = dataset_dict[dataset_key]
if (filename is None):
return None
else:
filename = selected_datasets_path + filename
adata = sc.read_h5ad(filename + ".h5ad")
state = {"filename": str(dataset_dict[dataset_key]),
"# cells/obs": len(adata.obs.index),
"# genes/var": len(adata.var.index),
"# counts": int(np.sum(adata.obs["total_counts"]))}
cache_state(session_ID, state)
adata = cache_adata(session_ID, adata)
return adata
def cache_adata(session_ID, adata=None, group=None,
store_dir=None, store_name=None):
if ((store_dir is None) or (store_name is None)):
save_dir = save_analysis_path + str(session_ID) + "/"
filename = save_dir + "adata_cache"
chunk_factors = [150, 3] #faster, hot storage
else:
save_dir = store_dir
filename = save_dir + store_name
chunk_factors = [3, 3] #slower, cold storage
if not (os.path.isdir(save_dir)):
try:
print("[DEBUG] making directory:" + str(save_dir))
os.mkdir(save_dir)
except:
return None
lock_filename = (save_analysis_path + str(session_ID)
+ "/" + "adata.lock")
lock = FileLock(lock_filename, timeout=lock_timeout)
compressor = Blosc(cname='blosclz', clevel=3,
shuffle=Blosc.SHUFFLE)
zarr_cache_dir = filename + ".zarr"
attribute_groups = ["obs", "var", "obsm", "varm", "obsp", "varp", "layers", "X", "uns", "raw"]
extra_attribute_groups = ["X_dense", "layers_dense"]
if (adata is None): # then -> read it from the store
if (os.path.exists(zarr_cache_dir) is True):
store_store = zarr.DirectoryStore(zarr_cache_dir)
store = zarr.open_group(store=store_store, mode='r')
if (group in attribute_groups): # then -> return only that part of the object (fast)
group_exists = adata_cache_group_exists(session_ID, group, store=store)
if (group_exists is True):
ret = read_attribute(store[group])
else:
ret = None
#store_store.close()
return ret
elif (group is None): # then -> return the whole adata object (slow)
#adata = ad.read_zarr(zarr_cache_dir)
d = {}
for g in attribute_groups:
if (g in store.keys()):
if (adata_cache_group_exists(session_ID, g, store=store)):
if (g in ["obs", "var"]):
d[g] = read_dataframe(store[g])
else:
d[g] = read_attribute(store[g])
#store_store.close()
adata = ad.AnnData(**d)
if not (adata is None):
return adata
else:
print("[ERROR] adata object not saved at: " + str(filename))
return None
else: # then -> update the state dictionary and write adata to the store
if (group is None):
cache_state(session_ID, key="# cells/obs", val=len(adata.obs.index))
cache_state(session_ID, key="# genes/var", val=len(adata.var.index))
if ("total_counts" in adata.obs):
cache_state(session_ID, key="# counts", val=int(np.sum(adata.obs["total_counts"])))
else:
cache_state(session_ID, key="# counts", val=int(np.sum(adata.X)))
elif (group == "obs"):
cache_state(session_ID, key="# cells/obs", val=len(adata.index))
elif (group == "var"):
cache_state(session_ID, key="# genes/var", val=len(adata.index))
with lock:
store_store = zarr.DirectoryStore(zarr_cache_dir)
store = zarr.open_group(store=store_store, mode='a')
if (group in attribute_groups): # then -> write only that part of the object (fast)
if (group == "var"):
if (np.nan in adata.var.index):
adata.var.index = pd.Series(adata.var.index).replace(np.nan, 'nanchung')
adata.var["gene_ID"] = pd.Series(adata.var["gene_ID"]).replace(np.nan, 'nanchung')
adata.var["gene_ids"] = pd.Series(adata.var["gene_ids"]).replace(np.nan, 'nanchung')
write_attribute(store, group, adata) # here "adata" is actually just a subset of adata
# write dense copies of X or layers if they're what was passed
if (group == "X"):
dense_name = "X_dense"
write_dense.delay(zarr_cache_dir, "X",
dense_name, chunk_factors)
if (group == "layers"):
for l in list(adata.keys()): #layers was passed with parameter name "adata"
dense_name = "layers_dense/" + str(l)
write_dense.delay(zarr_cache_dir, "layers/" + l,
dense_name, chunk_factors)
#store_store.flush()
#store_store.close()
lock.release()
else:
# check that necessary fields are present in adata object
if not ("leiden_n" in adata.obs):
if ("leiden" in adata.obs):
adata.obs["leiden_n"] = pd.to_numeric(adata.obs["leiden"])
if not ("cell_ID" in adata.obs):
adata.obs["cell_ID"] = adata.obs.index
if not ("cell_numeric_index" in adata.obs):
adata.obs["cell_numeric_index"] = pd.to_numeric(list(range(0,len(adata.obs.index))))
for i in ["user_" + str(j) for j in range(0, 6)]:
if not (i in adata.obs.columns):
adata.obs[i] = ["0" for k in adata.obs.index.to_list()]
if not ("gene_ID" in adata.var):
adata.var["gene_ID"] = adata.var.index
# make sure that there are no "nan" genes in the var index
if (np.nan in adata.var.index):
adata.var.index = pd.Series(adata.var.index).replace(np.nan, 'nanchung')
adata.var["gene_ID"] = pd.Series(adata.var["gene_ID"]).replace(np.nan, 'nanchung')
adata.var["gene_ids"] = pd.Series(adata.var["gene_ids"]).replace(np.nan, 'nanchung')
# save it all to the cache, but make dense copies of X and layers
write_attribute(store, "obs", adata.obs)
write_attribute(store, "var", adata.var)
write_attribute(store, "obsm", adata.obsm)
write_attribute(store, "varm", adata.varm)
write_attribute(store, "obsp", adata.obsp)
write_attribute(store, "varp", adata.varp)
write_attribute(store, "uns", adata.uns)
write_attribute(store, "raw", adata.raw)
write_attribute(store, "X", adata.X)
write_attribute(store, "layers", adata.layers)
# making dense copies of X and layers (compressed to save disk space)
dense_name = "X_dense"
write_dense.delay(zarr_cache_dir, "X",
dense_name, chunk_factors)
for l in list(adata.layers.keys()):
dense_name = "layers_dense/" + str(l)
write_dense.delay(zarr_cache_dir, "layers/" + l,
dense_name, chunk_factors)
#store_store.flush()
#store_store.close()
lock.release()
# set the file mod and access times to current time
# then return adata as usual
os.utime(zarr_cache_dir)
return adata
def adata_cache_exists(session_ID):
save_dir = save_analysis_path + str(session_ID) + "/"
filename = save_dir + "adata_cache"
zarr_cache_dir = filename + ".zarr"
if (os.path.exists(zarr_cache_dir) is True):
return True
return False
def adata_cache_group_exists(session_ID, group, store=None):
save_dir = save_analysis_path + str(session_ID) + "/"
filename = save_dir + "adata_cache"
zarr_cache_dir = filename + ".zarr"
if (store is None):
try:
store_store = zarr.DirectoryStore(zarr_cache_dir)
store = zarr.open_group(store=store_store, mode='r')
keys = list(store.group_keys())
#store_store.close()
except:
return False
else:
keys = list(store.group_keys())
if (group in keys):
return True
else:
return False
def cache_gene_list(session_ID, gene_list=None):
filename = save_analysis_path + str(session_ID) + "/gene_list_cache.pickle"
lock_filename = filename + ".lock"
lock = FileLock(lock_filename, timeout=20)
if (gene_list is None):
if (os.path.isfile(filename) is True):
with open(filename, "rb") as f:
gene_list = pickle.load(f)
else:
print("[ERROR] gene list cache does not exist at: " + str(filename))
gene_list = None
return gene_list
else:
gene_list.sort(key=str.lower)
with lock:
with open(filename, "wb") as f:
pickle.dump(gene_list, f)
return gene_list
# returns a list of cell_IDs
# expects a list of lists of datapoint dictionaries
def get_cell_intersection(session_ID, list_of_selections,
pt_min=0, pt_max=1):
obs = cache_adata(session_ID, group="obs")
cell_intersection = set(obs.index.to_list())
for cell_list in list_of_selections:
if (cell_list in ["", 0, None, []]):
continue
cell_set = set()
for cell in cell_list["points"]:
cell_ID = (cell["text"]).rsplit(" ", 1)[-1]
cell_set.add(cell_ID)
cell_intersection &= cell_set
if ("pseudotime" in obs):
if ((pt_min > 0) or (pt_max < 1)):
for cell in list(cell_intersection):
if ((obs.loc[cell, "pseudotime"] < pt_min)
or (obs.loc[cell, "pseudotime"] > pt_max)):
cell_intersection.remove(cell)
return cell_intersection
# returns dictionary of points that are in all violin selections
# across all genes that could be selected on
def get_violin_intersection(session_ID, violin_selected):
if (violin_selected is None):
return None
# test which traces (genes) cells were selected from
curves = set()
for cell in violin_selected["points"]:
curves.add(cell["curveNumber"])
if (len(curves) == 0):
# no cells selected - return None
return None
# get cells selected in each of these curves
cells_in_curves = [set() for curve in curves]
if (len(cells_in_curves) == 1):
return violin_selected
for cell in violin_selected["points"]:
n = cell["curveNumber"]
if (n in curves):
cell_ID = (cell["text"]).rsplit(" ", 1)[-1]
(cells_in_curves[n]).add(cell_ID)
# do the intersection
cell_intersection = cells_in_curves[0]
for cell_set in cells_in_curves:
cell_intersection &= cell_set
# get the final list of points in dict format
points = []
for cell in violin_selected["points"]:
cell_ID = (cell["text"]).rsplit(" ", 1)[-1]
if (cell_ID in cell_intersection):
points.append(cell)
return {"points": points}
def get_pseudotime_min_max(session_ID, selected):
if (selected in ["", 0, None, []]):
return [0,1]
x_vals = []
for point in selected["points"]:
x_vals.append(point["x"])
if (len(x_vals) == 0):
return [0,1]
x_min = np.min(x_vals)
x_max = np.max(x_vals)
return [x_min, x_max]
def get_ortholog_data(session_ID, selected_gene):
filename = (save_analysis_path
+ "dmel_human_orthologs_disease_fb_2019_05.csv")
ortholog_data = | pd.read_csv(filename, sep="\t") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 13:59:04 2021
@author: saidsa
"""
from copy import deepcopy
import pandas as pd
import numpy as np
from numpy.linalg import inv
from Stock import Stock
from scipy.stats import norm
def normalize (x):
#Scaling positive values so that they sum up to 1
x[x>0.001] = x[x>0.001] / x[x>0.001].sum()
#Scaling negative values so that they sum up to -1
x[x<-0.001] = x[x<-0.001] / - x[x<-0.001].sum()
return x
def stock_obj_arr_to_return_mat(stock_obj_arr):
output_dict = {}
for s in stock_obj_arr:
output_dict[s.ticker] = s['PriceClose']
price_df = pd.DataFrame(output_dict)
returns_df = price_df.pct_change(1)
return returns_df.dropna()
def stock_obj_arr_to_signal_mat(stock_obj_arr, signal_func):
output_dict = {}
for s in stock_obj_arr:
output_dict[s.ticker] = signal_func(s)
signal_df = pd.DataFrame(output_dict)
return signal_df.dropna()
def return_mat_to_rolling_var_covar_dict(returns_mat, window=126,
shrinkage_factor=0.8):
var_covar_ts = {}
for i in range(len(returns_mat)-window):
dt = returns_mat.index[i+window]
ret_mat = returns_mat.iloc[i:i+window, :]
var_cov = ret_mat.cov() * 252
# Apply shrinkage factor
# Reduce the off_diagonal terms => cov but not var
# Easier to generate inv matrix
var_cov = var_cov * shrinkage_factor + \
(1.0-shrinkage_factor)*np.diag(np.diag(var_cov))
var_covar_ts[dt] = var_cov
return var_covar_ts
def invert_var_covar_dict(var_covar_ts_dict):
inv_var_covar_ts = deepcopy(var_covar_ts_dict)
for dt, var_cov_mat in var_covar_ts_dict.items():
#Updating the values inside inv_var_covar_ts to preserve the structure
#when doing inverse
inv_var_covar_ts[dt].loc[:, :] = inv(var_cov_mat)
return inv_var_covar_ts
def MVOpt_LS_Fixed_risk(r, Sig, s, Sig_inv = None):
# r is the returns vector for a given day
# Sig is the var_covar mat for a given day
# s is a fixed level of risk for the portfolio
# Given a returns vector, a var_covar matrix, and a specified level of risk,
# we want to construct a Long Short Portfolio that maximizes returns with respect
# to weights such that the sum of weights is = 0 (constraint 1)
# and the variance if the portfolio is equal to s (constarint 2)
if Sig_inv is None:
Sig_inv = inv(Sig)
o = np.ones_like(r)
lam_2_num = o.T.dot(Sig_inv).dot(r)
lam_2_den = o.T.dot(Sig_inv).dot(o)
lam_2 = lam_2_num / lam_2_den
r_lam2_1 = r - lam_2 * o
lam_1_mat_prod = r_lam2_1.T.dot(Sig_inv).dot(r_lam2_1)
lam_1 = np.sqrt(lam_1_mat_prod/(4 * s))
w = (1/(2*lam_1)) * Sig_inv.dot(r_lam2_1)
return w
def MVOpt_LS_Fixed_risk_beta(r, Sig, s, beta, Sig_inv = None):
# adding 3rd constraint: weights * beta = 0 => hedging portfolio
# beta vector same shape as returns, for a given day, we have a vector of betas
# and a vector of returns
if Sig_inv is None:
Sig_inv = inv(Sig)
o = np.ones_like(r)
o_sig_o = o.T.dot(Sig_inv).dot(o)
o_sig_b = o.T.dot(Sig_inv).dot(beta)
b_sig_o = beta.T.dot(Sig_inv).dot(o)
b_sig_b = beta.T.dot(Sig_inv).dot(beta)
o_sig_r = o.T.dot(Sig_inv).dot(r)
b_sig_r = beta.T.dot(Sig_inv).dot(r)
lam_2, lam_3 = inv([[o_sig_o, o_sig_b], [b_sig_o, b_sig_b]]).dot([o_sig_r, b_sig_r])
r_lam_2_lam_3 = r - (lam_2 * o) - (lam_3 * beta)
r_sig_r = r_lam_2_lam_3.T.dot(Sig_inv).dot(r_lam_2_lam_3)
lam_1 = np.sqrt(r_sig_r / (4 * s))
w = (1/ (2 * lam_1)) * Sig_inv.dot(r_lam_2_lam_3)
return w
def MVOpt_L_Min_Var(Sig, Sig_inv = None):
if Sig_inv is None:
Sig_inv = inv(Sig)
o = np.ones(Sig.shape[0])
lam_1 = 1/ (o.T.dot(Sig_inv).dot(o))
w = lam_1 * Sig_inv.dot(o)
return w
class MeanVarianceOptimization(object):
def __init__(self, stock_arr, s = 0.35, shrinkage_factor=0.80):
self.stock_arr = [Stock(s) if isinstance(s, str) else s for s in stock_arr]
self.s = s
self.shrinkage_factor = shrinkage_factor
self.returns_df = stock_obj_arr_to_return_mat(self.stock_arr)
self.returns_shifted_df = self.returns_df.shift(1)
self.var_covar_ts = return_mat_to_rolling_var_covar_dict(self.returns_df,
window=126,
shrinkage_factor=self.shrinkage_factor)
self.inv_var_covar_ts = invert_var_covar_dict(var_covar_ts_dict=self.var_covar_ts)
self.expected_returns_df = self.returns_df.rolling(window = 126).mean().shift(1).dropna()*252
self.weights_df = self.build_weights()
def build_weights(self):
weights_dict = {}
for dt, Sig_inv in self.inv_var_covar_ts.items():
# for a given day, construct the weights of your protfolio
r = self.expected_returns_df.loc[dt,:]
Sig = self.var_covar_ts[dt]
w = MVOpt_LS_Fixed_risk(r = r, Sig = Sig, s = self.s, Sig_inv = Sig_inv)
weights_dict[dt] = w
weights_df = pd.DataFrame(weights_dict).T
return weights_df
class MinVarianceOptimization(object):
def __init__(self, stock_arr, shrinkage_factor=0.80, window=126):
self.stock_arr = [Stock(s) if isinstance(s, str) else s for s in stock_arr]
self.shrinkage_factor = shrinkage_factor
self.window = window
self.returns_df = stock_obj_arr_to_return_mat(self.stock_arr)
self.returns_shifted_df = self.returns_df.shift(1)
self.var_covar_ts = return_mat_to_rolling_var_covar_dict(self.returns_df,
window=self.window,
shrinkage_factor=self.shrinkage_factor)
self.inv_var_covar_ts = invert_var_covar_dict(var_covar_ts_dict=self.var_covar_ts)
self.weights_df = self.build_weights()
def build_weights(self):
weights_dict = {}
for dt, Sig_inv in self.inv_var_covar_ts.items():
# for a given day, construct the weights of your protfolio
Sig = self.var_covar_ts[dt]
w = MVOpt_L_Min_Var(Sig = Sig, Sig_inv = Sig_inv)
weights_dict[dt] = w
weights_df = pd.DataFrame(weights_dict).T
return weights_df
class SimpleBlackLitterman(object):
def __init__(self, stock_arr, signal_func_arr, signal_view_ret_arr,
A=1.0, tau=1.0, shrinkage_factor=0.80):
self.stock_arr = [Stock(s) if isinstance(s, str) else s for s in stock_arr]
self.signal_func_arr = signal_func_arr
self.signal_view_ret_arr = signal_view_ret_arr
self.A = A
self.tau = tau
self.shrinkage_factor = shrinkage_factor
self.returns_df = stock_obj_arr_to_return_mat(self.stock_arr)
self.returns_shifted_df = self.returns_df.shift(1)
self.weights_df = self.build_weights()
self.weights_shifted_df = self.weights_df.shift(1)
self.var_covar_ts = return_mat_to_rolling_var_covar_dict(self.returns_df,
window=126,
shrinkage_factor=self.shrinkage_factor)
self.inv_var_covar_ts = invert_var_covar_dict(var_covar_ts_dict=self.var_covar_ts)
self.implied_returns_df = self.generate_implied_returns()
self.signal_df_dict = {'signal_'+str(i):self.build_signal_df(sf).dropna() \
for i, sf in enumerate(self.signal_func_arr)}
self.signal_ts_dict = self.generate_signal_ts_dict()
self.link_mat_ts = self.generate_link_mats()
self.view_var_covar_ts = self.generate_view_var_covar_mats()
self.view_inv_var_covar_ts = self.generate_view_inv_var_covar_mats()
self.black_litterman_weights_df = self.generate_black_litterman_weights()
def build_weights(self):
output_dict = {}
for s in self.stock_arr:
output_dict[s.ticker] = s['PriceClose'] * s['ShareIssued']
marketcap_df = pd.DataFrame(output_dict).dropna()
weights_df = marketcap_df.apply(lambda x: normalize(x), axis = 1)
return weights_df
def generate_implied_returns(self):
implied_returns_dict = {}
for dt, var_cov_mat in self.var_covar_ts.items():
if dt in self.weights_shifted_df.index:
weigts_arr = self.weights_shifted_df.loc[dt, :]
implied_returns_arr = self.A*var_cov_mat.dot(weigts_arr)
implied_returns_dict[dt] = implied_returns_arr
implied_returns_df = pd.DataFrame(implied_returns_dict).T.dropna()
return implied_returns_df
def build_signal_df(self, signal_func):
signal_dict = {}
for stock_obj in self.stock_arr:
stock_signal_ts = signal_func(stock_obj)
stock_ticker = stock_obj.ticker
signal_dict[stock_ticker] = stock_signal_ts
signal_df = pd.DataFrame(signal_dict)
return signal_df
def generate_signal_ts_dict(self):
dts = None
for signal_label, signal_df in self.signal_df_dict.items():
if dts is None:
dts = signal_df.index
else:
dts = dts.intersection(signal_df.index)
output_dict = {}
for dt in dts:
date_dict = {}
for signal_label, signal_df in self.signal_df_dict.items():
date_dict[signal_label] = signal_df.loc[dt, :]
date_df = pd.DataFrame(date_dict).T
output_dict[dt] = date_df
return output_dict
def generate_link_mats(self):
link_mat_ts = {}
for dt, signal_raw in self.signal_ts_dict.items():
link_mat_ts[dt] = signal_raw.apply( lambda x: 2*((x.rank() - 1) / ( np.sum(~np.isnan(x)) - 1)) - 1, axis=1).fillna(0)
return link_mat_ts
def generate_view_var_covar_mats(self):
view_var_covar_ts = {}
for dt, var_cov_mat in self.var_covar_ts.items():
if dt in self.link_mat_ts:
P = self.link_mat_ts[dt]
Sigma = self.var_covar_ts[dt]
Omega = self.tau*P.dot(Sigma).dot(P.T)
# Apply shrinkage factor
Omega = Omega * self.shrinkage_factor + \
(1.0-self.shrinkage_factor)*np.diag(np.diag(Omega))
view_var_covar_ts[dt] = Omega
return view_var_covar_ts
def generate_view_inv_var_covar_mats(self):
view_inv_var_covar_ts = deepcopy(self.view_var_covar_ts)
for dt, view_var_cov_mat in self.view_var_covar_ts.items():
view_inv_var_covar_ts[dt].loc[:, :] = inv(view_var_cov_mat)
return view_inv_var_covar_ts
def generate_black_litterman_weights(self):
black_litterman_weights_dict = {}
for dt, view_inv_var_cov_mat in self.view_inv_var_covar_ts.items():
if dt in self.implied_returns_df.index:
black_litterman_weights_dict[dt] = self.tau*(self.inv_var_covar_ts[dt].dot(self.implied_returns_df.loc[dt])) \
+ self.link_mat_ts[dt].T.dot(view_inv_var_cov_mat).dot(self.signal_view_ret_arr)
black_litterman_weights_df = pd.DataFrame(black_litterman_weights_dict).T
return black_litterman_weights_df
class PairTradingPortfolio(object):
def __init__(self, stock_obj1 , stock_obj2, signal_func, flip_signal=False):
self.stock_obj1 = stock_obj1
self.stock_obj2 = stock_obj2
self.signal_func = signal_func
self.flip_signal = flip_signal
self.stock_obj1_signal_ts = signal_func(stock_obj1)
self.stock_obj2_signal_ts = signal_func(stock_obj2)
self.relative_signal_ts = None
self.stock_obj1_wght_ts = None
self.stock_obj2_wght_ts = None
self.stock_obj1_return = self.stock_obj1['PriceClose'].pct_change(1)
self.stock_obj2_return = self.stock_obj2['PriceClose'].pct_change(1)
def relative_scaling(self, window = 90):
self.relative_signal_ts = (self.stock_obj1_signal_ts / self.stock_obj2_signal_ts).rolling(window = window).apply(lambda x: (x[-1] - x[0:-1].mean())/(x[0:-1].std()))
if self.flip_signal:
self.relative_signal_ts = -1 * self.relative_signal_ts
self.stock_obj1_wght_ts = self.relative_signal_ts.apply(lambda x: (norm.cdf(x) * 2) - 1)
self.stock_obj2_wght_ts = -1 * self.stock_obj1_wght_ts
self.portfolio_return_ts = self.stock_obj1_wght_ts.shift(1) * self.stock_obj1_return + \
self.stock_obj2_wght_ts.shift(1) * self.stock_obj2_return
def relative_differencing(self, window = 90):
self.relative_signal_ts = (self.stock_obj1_signal_ts - self.stock_obj2_signal_ts).rolling(window = window).apply(lambda x: (x[-1] - x[0:-1].mean())/(x[0:-1].std()))
if self.flip_signal:
self.relative_signal_ts = -1 * self.relative_signal_ts
self.stock_obj1_wght_ts = self.relative_signal_ts.apply(lambda x: (norm.cdf(x) * 2) - 1)
self.stock_obj2_wght_ts = -1 * self.stock_obj1_wght_ts
self.portfolio_return_ts = self.stock_obj1_wght_ts.shift(1) * self.stock_obj1_return + \
self.stock_obj2_wght_ts.shift(1) * self.stock_obj2_return
def get_returns(self):
self.portfolio_return_ts = self.stock_obj1_wght_ts.shift(1) * self.stock_obj1_return + \
self.stock_obj2_wght_ts.shift(1) * self.stock_obj2_return
return self.portfolio_return_ts
class SingleSignalPortfolio(object):
def __init__(self, stock_obj_arr, signal_func):
self.stock_obj_arr = stock_obj_arr
self.signal_func = signal_func
self.n_stocks = len(stock_obj_arr)
self.signal_df = None
returns_dict = {}
for stock_obj in self.stock_obj_arr:
returns_dict[stock_obj.ticker] = stock_obj['PriceClose'].pct_change(1)
self.returns_df = pd.DataFrame(returns_dict).dropna()
def relative_ranking(self):
signal_dict = {}
for stock_obj in self.stock_obj_arr:
stock_signal_ts = self.signal_func(stock_obj)
stock_ticker = stock_obj.ticker
signal_dict[stock_ticker] = stock_signal_ts
self.signal_df = | pd.DataFrame(signal_dict) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 16:41:37 2018
@author: krzysztof
This module contains utilities useful when performing data analysis and drug sensitivity prediction with
Genomics of Drug Sensitivity in Cancer (GDSC) database.
Main utilities are Drug classes and Experiment class. All classes beginning with a word "Drug" represent the compound
coming from GDSC. There is a separate class for every corresponding experiment setup and genomic feature space. All Drug
classes contain methods for extraction and storage of proper input data. Available data types include: gene expression, binary copy number and coding variants, and cell line tissue type. The set of considered genes is represented as "targets"
attribute of Drug classes.
The Experiment class is dedicated for storage and analysis of results coming from machine learning experiments. Actual
machine learning is done outside of a class. The Experiment class have methods for storage, analysis and visualisation
of results.
Classes:
Drug: Basic class representing a compound from GDSC.
DrugWithDrugBank: Inherits from Drug, accounts for target genes from DrugBank database.
DrugGenomeWide: Inherits from Drug, designed for using genome-wide gene exression as input data.
DrugDirectReactome: Inherits from DrugWithDrugBank, uses only input data related to target genes resulting
from direct compound-pathway matching from Reactome.
DrugWithGenesInSamePathways: Inherits from DrugWithDrugBank, uses only input data related to genes that belong in
the same pathways as target genes.
Experiment: Designed to store and analyze results coming from machine learning experiments.
"""
# Imports
import pandas as pd
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from scipy.stats import pearsonr
from sklearn.linear_model import ElasticNet
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
from sklearn.dummy import DummyRegressor
from sklearn.pipeline import Pipeline
from sklearn import feature_selection
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import clone
# General imports
import multiprocessing
import numpy as np
import pandas as pd
import time
import sys
import dill
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn import model_selection
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Lasso, ElasticNet
from stability_selection import StabilitySelection
#################################################################################################################
# Drug class
#################################################################################################################
class Drug(object):
"""Class representing compound from GDSC database.
This is the most basic, parent class. Different experimental settings will use more specific,
children classes. Main function of the class is to create and store input data corresponding to a given
drug. Five types of data are considered: gene expression, copy number variants, coding variants, gene expression
signatures, and tumor tissue type. Class instances are initialized with four basic drug properties: ID, name, gene
targets and target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_cnv_data_faster: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
extract_merck_signatures_data: Generate a DataFrame with gene expression signatures provided by Merck.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data and saves it in corresponding instance's
field.
return_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data but does not save it.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of Drug class objects, each referenced by it's ID
(keys are drug GDSC ID's)
load_data: Load all needed data files as DataFrames with one function call.
"""
# Class variables
map_from_hgnc_to_ensembl = None
map_from_ensembl_to_hgnc = None
# Instance methods
def __init__(self, gdsc_id, name, targets, target_pathway):
"""Intiliaze the class instance with four basic attributes. "Targets" are gene names
and get mapped into Ensembl IDs using class mapping variable."""
self.gdsc_id = gdsc_id
self.name = name
self.targets = targets
self.target_pathway = target_pathway
self.ensembl_targets = []
for x in self.targets:
try:
self.ensembl_targets.append(self.map_from_hgnc_to_ensembl[x])
except KeyError:
pass
def extract_drug_response_data(self, sensitivity_profiles_df, metric="AUC"):
"""Generate a DataFrame containing reponses for every cell line screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
None
"""
df = sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id][
["COSMIC_ID", metric]]
df.columns = ["cell_line_id", metric] # Insert column with samples ID
self.total_no_samples_screened = df.shape[0] # Record how many screened cell lines for drug
self.response_data = df # Put DataFrame into corresponding field
def extract_screened_cell_lines(self, sensitivity_profiles_df):
"""Generate set of cell lines screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
self.screened_cell_lines = list(
sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id]["COSMIC_ID"])
def extract_gene_expression(self, gene_expression_df):
"""Generate DataFrame of gene expression data for cell lines screened for this drug, only
considering drug's target genes.
Arguments:
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
cell_lines_str = [] # Gene expressesion DF column names are strings
for x in self.screened_cell_lines:
cell_lines_str.append(str(x))
cl_to_extract = []
for x in cell_lines_str:
if x in list(gene_expression_df.columns):
cl_to_extract.append(x) # Extract only cell lines contained in gene expression data
gene_expr = gene_expression_df[
gene_expression_df.ensembl_gene.isin(self.ensembl_targets)][["ensembl_gene"] + cl_to_extract]
gene_expr_t = gene_expr.transpose()
columns = list(gene_expr_t.loc["ensembl_gene"])
gene_expr_t.columns = columns
gene_expr_t = gene_expr_t.drop(["ensembl_gene"])
rows = list(gene_expr_t.index)
gene_expr_t.insert(0, "cell_line_id", rows) # Insert columns with cell line IDs
gene_expr_t.reset_index(drop=True, inplace=True)
gene_expr_t["cell_line_id"] = pd.to_numeric(gene_expr_t["cell_line_id"])
self.gene_expression_data = gene_expr_t # Put DataFrame into corresponding field
def extract_mutation_data(self, mutation_df):
"""Generate a DataFrame with binary mutation calls for screened cell lines and target genes.
Arguments:
mutation_df: DataFrame with original mutation calls from GDSC.
Returns:
None
"""
targets = [x + "_mut" for x in self.targets]
df = mutation_df.copy()[
mutation_df.cosmic_sample_id.isin(self.screened_cell_lines)]
df = df[df.genetic_feature.isin(targets)][["cosmic_sample_id", "genetic_feature", "is_mutated"]]
cosmic_ids = []
genetic_features = {}
for feature in df.genetic_feature.unique():
genetic_features[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
df_cl = df[df.cosmic_sample_id == cl_id]
for feature in genetic_features:
mutation_status = df_cl[
df_cl.genetic_feature == feature]["is_mutated"].iloc[0]
genetic_features[feature].append(mutation_status)
df1 = pd.DataFrame()
df1.insert(0, "cell_line_id", cosmic_ids) # Insert column with samples IDs
for feature in genetic_features:
df1[feature] = genetic_features[feature]
self.mutation_data = df1 # Put DataFrame into corresponding field
def extract_cnv_data(self, cnv_binary_df):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in df.genetic_feature.unique():
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = df[
(df.cosmic_sample_id == cl_id) & (df.genetic_feature == feature)]["is_mutated"].iloc[0]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_cnv_data_faster(self, cnv_binary_df, map_cl_id_and_feature_to_status):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Faster implementation than original "extract_cnv_data" by using mapping between genes and
genomic segments.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in features_to_extract:
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = map_cl_id_and_feature_to_status[(cl_id, feature)]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_tissue_data(self, cell_line_list):
"""Generate (dummy encoded) data with cell line tissue type.
Arguments:
cell_line_list (DataFrame): Cell line list from GDSC.
Returns:
None
"""
df = cell_line_list[
cell_line_list["COSMIC_ID"].isin(self.screened_cell_lines)][["COSMIC_ID", "Tissue"]]
df.rename(columns={"COSMIC_ID": "cell_line_id"}, inplace=True)
self.tissue_data = pd.get_dummies(df, columns = ["Tissue"])
def extract_merck_signatures_data(self, signatures_df):
"""Generate data with gene expression signature scores for GDSC cell lines, provided by Merck.
Arguments:
signatures_df (DataFrame): DataFrame with gene signatures for cell lines.
Returns:
None
"""
# Compute list of screened cell lines as strings with prefix "X" in order to match
# signatures DataFrame columns
cell_lines_str = ["X" + str(cl) for cl in self.screened_cell_lines]
# Compute list of cell lines that are contained in signatures data
cls_to_extract = [cl for cl in cell_lines_str
if cl in list(signatures_df.columns)]
# Extract desired subset of signatures data
signatures_of_interest = signatures_df[cls_to_extract]
# Transpose the DataFrame
signatures_t = signatures_of_interest.transpose()
# Create a list of cell line IDs whose format matches rest of the data
cl_ids = pd.Series(signatures_t.index).apply(lambda x: int(x[1:]))
# Insert proper cell line IDs as a new column
signatures_t.insert(0, "cell_line_id", list(cl_ids))
# Drop the index and put computed DataFrame in an instance field
self.merck_signatures = signatures_t.reset_index(drop=True)
def concatenate_data(self, data_combination):
"""Generate data containing chosen combination of genetic data classes.
Arguments:
data_combination: List of strings containing data classes to be included. Available options are:
"mutation", "expression", "CNV", "tissue", "merck signatures".
Returns:
None
"""
# Create a list of DataFrames to include
objects = [self.response_data]
if "mutation" in data_combination and self.mutation_data.shape[0] > 0:
objects.append(self.mutation_data)
if "expression" in data_combination and self.gene_expression_data.shape[0] > 0:
objects.append(self.gene_expression_data)
if "CNV" in data_combination and self.cnv_data.shape[0] > 0:
objects.append(self.cnv_data)
if "tissue" in data_combination and self.tissue_data.shape[0] > 0:
objects.append(self.tissue_data)
if "merck signatures" in data_combination and self.merck_signatures.shape[0] > 0:
objects.append(self.merck_signatures)
# Find intersection in cell lines for all desirable DataFrames
cl_intersection = set(list(self.response_data["cell_line_id"]))
for obj in objects:
cl_intersection = cl_intersection.intersection(set(list(obj["cell_line_id"])))
objects_common = []
for obj in objects:
objects_common.append(obj[obj["cell_line_id"].isin(cl_intersection)])
# Check if all DataFrames have the same number of samples
no_samples = objects_common[0].shape[0]
for obj in objects_common:
assert obj.shape[0] == no_samples
obj.sort_values("cell_line_id", inplace=True)
obj.reset_index(drop=True, inplace=True)
cl_ids = objects_common[0]["cell_line_id"]
df_concatenated = pd.concat(objects_common, axis=1, ignore_index=False)
metric = self.response_data.columns[-1] # Extract the name of metric which was used for sensitivity
sensitivities = df_concatenated[metric]
df_concatenated = df_concatenated.drop(["cell_line_id", metric], axis=1)
df_concatenated.insert(0, "cell_line_id", cl_ids)
df_concatenated.insert(df_concatenated.shape[1], metric, sensitivities)
self.full_data = df_concatenated
def create_full_data(self, sensitivity_profiles_df, gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_line_list=None, mutation_df=None, merck_signatures_df=None,
data_combination=None, metric="AUC"):
"""Combine extraction methods in one to generate a DataFrame with desired data.
When calling a function, original DataFrames parsed should match strings in
data_combination argument. If any of the "_df" arguments is None (default value),
the corresponding data is not included in the output DataFrame.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data.
cell_line_list (DataFrame): Cell line list from GDSC.
mutation_df (DataFrame): DataFrame with original mutation calls from GDSC.
data_combination (list): list of strings containing data classes to be included. Available
options are: "mutation", "expression", "CNV, "tissue", "merck signatures".
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
DataFrame containing desired data for the drug
"""
# Call separate methods for distinct data types
self.extract_screened_cell_lines(sensitivity_profiles_df)
self.extract_drug_response_data(sensitivity_profiles_df, metric)
if type(gene_expression_df) == type(pd.DataFrame()):
self.extract_gene_expression(gene_expression_df)
if type(cnv_binary_df) == type(pd.DataFrame()):
self.extract_cnv_data_faster(cnv_binary_df, map_cl_id_and_feature_to_status)
if type(cell_line_list) == type(pd.DataFrame()):
self.extract_tissue_data(cell_line_list)
if type(mutation_df) == type(pd.DataFrame()):
self.extract_mutation_data(mutation_df)
if type(merck_signatures_df) == type(pd.DataFrame()):
self.extract_merck_signatures_data(merck_signatures_df)
self.concatenate_data(data_combination)
return self.full_data
def return_full_data(self, sensitivity_profiles_df, gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_line_list=None, mutation_df=None, merck_signatures_df=None,
data_combination=None, metric="AUC"):
"""Compute full data with desired data classes and return it, but after that delete data from
instance's data fields in order to save memory.
When calling a function, original DataFrames parsed should match strings in
data_combination argument. If any of the "_df" arguments is None (default value),
the corresponding data is not included in the output DataFrame.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data.
cell_line_list (DataFrame): Cell line list from GDSC.
mutation_df (DataFrame): DataFrame with original mutation calls from GDSC.
data_combination (list): list of strings containing data classes to be included. Available
options are: "mutation", "expression", "CNV, "tissue", "merck signatures".
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
DataFrame containing desired data for the drug
"""
full_df = self.create_full_data(sensitivity_profiles_df, gene_expression_df, cnv_binary_df,
map_cl_id_and_feature_to_status,
cell_line_list, mutation_df, merck_signatures_df,
data_combination, metric)
if type(gene_expression_df) == type(pd.DataFrame()):
self.gene_expression_data = None
if type(cnv_binary_df) == type(pd.DataFrame()):
self.cnv_data = None
if type(cell_line_list) == type(pd.DataFrame()):
self.tissue_data = None
if type(mutation_df) == type(pd.DataFrame()):
self.mutation_data = None
if type(merck_signatures_df) == type(pd.DataFrame()):
self.merck_signatures = None
self.full_data = None
return full_df
def __repr__(self):
"""Return string representation of an object, which can be used to create it."""
return 'Drug({}, "{}", {}, "{}")'.format(self.gdsc_id, self.name, self.targets, self.target_pathway)
def __str__(self):
"""Return string representation of an object"""
return "{} -- {}".format(self.name, self.gdsc_id)
# Class methods
@classmethod
def load_mappings(cls, filepath_hgnc_to_ensembl, filepath_ensembl_to_hgnc):
"""Load dictonaries with gene mappings between HGNC and Ensembl (from pickle files) and assign it
to corresponding class variables. Ensembl IDs are needed for gene expression data.
This method should be called on a Drug class before any other actions with the class.
Arguments:
filepath_hgnc_to_ensembl: file with accurate mapping
filepath_ensembl_to_hgnc: file with accurate mapping
Returns:
None
"""
cls.map_from_hgnc_to_ensembl = pickle.load(open(filepath_hgnc_to_ensembl, "rb"))
cls.map_from_ensembl_to_hgnc = pickle.load(open(filepath_ensembl_to_hgnc, "rb"))
# Static methods
@staticmethod
def create_drugs(drug_annotations_df):
"""Create a dictionary of Drug class objects, each referenced by it's ID (keys are drug GDSC ID's).
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website
Returns:
Dictionary of Drug objects as values and their ID's as keys
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
gdsc_id = getattr(row, "DRUG_ID")
name = getattr(row, "DRUG_NAME")
targets = getattr(row, "TARGET").split(", ")
target_pathway = getattr(row, "TARGET_PATHWAY")
drugs[gdsc_id] = Drug(gdsc_id, name, targets, target_pathway)
return drugs
@staticmethod
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response):
"""Load all needed files by calling one function and return data as tuple of DataFrames. All
argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = pd.read_excel(drug_response)
return (drug_annotations_df, cell_lines_list_df, gene_expression_df, cnv_binary_df, coding_variants_df,
drug_response_df)
#################################################################################################################
# DrugWithDrugBank class
##################################################################################################################
class DrugWithDrugBank(Drug):
"""Class representing drug from GDSC database.
Contrary to the parent class Drug, this class also incorporates data related to targets
derived from DrugBank, not only those from GDSC. Main function of the class is to create and store input data
corresponding to a given drug. Four types of data are considered: gene expression, copy number variants,
coding variants and tumor tissue type. Class instances are initialized with four basic drug properties:
ID, name, gene targets and target pathway. Data attributes are stored as pandas DataFrames and are filled
using data files from GDSC via corresponding methods.
In general, all utilities are the same as in parent Drug class, with an exception of "create_drugs"
method, which is overloaded in order to account for target genes data coming from DrugBank.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID
(keys are drug GDSC ID's). Includes also target data coming from DrugBank.
load_data: Load all needed data files as DataFrames with one function call.
"""
def create_drugs(drug_annotations_df, drugbank_targets_mapping):
"""Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID. Add
also target data coming from DrugBank.
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website.
drugbank_targets_mapping (dictionary): Dictionary with mapping from drug ID to it's
targets from drugbank database.
Return:
Dictionary of DrugWithDrugBank objects as values and their ID's as keys.
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
name = getattr(row, "DRUG_NAME")
gdsc_id = getattr(row, "DRUG_ID")
targets = getattr(row, "TARGET").split(", ")
# Add targets from DrugBank (if drug is matched) and take a sum
if gdsc_id in drugbank_targets_mapping:
targets = list(set(targets + drugbank_targets_mapping[gdsc_id]))
target_pathway = getattr(row, "TARGET_PATHWAY")
# Create DrugWithDrugBank instance and put it into output dictionary
drugs[gdsc_id] = DrugWithDrugBank(gdsc_id, name, targets, target_pathway)
return drugs
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response, drugbank_targets):
"""Load all needed files by calling one function. All argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = | pd.read_csv(coding_variants) | pandas.read_csv |
import itertools
import operator
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from pandas.core import ops
from pandas.tests.extension import base
from pandas.tests.extension.conftest import ( # noqa: F401
as_array,
as_frame,
as_series,
fillna_method,
groupby_apply_op,
use_numpy,
)
from pint.errors import DimensionalityError
from pint.testsuite import QuantityTestCase, helpers
import pint_pandas as ppi
from pint_pandas import PintArray
ureg = ppi.PintType.ureg
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture
def dtype():
return ppi.PintType("pint[meter]")
@pytest.fixture
def data():
return ppi.PintArray.from_1darray_quantity(
np.arange(start=1.0, stop=101.0) * ureg.nm
)
@pytest.fixture
def data_missing():
return ppi.PintArray.from_1darray_quantity([np.nan, 1] * ureg.meter)
@pytest.fixture
def data_for_twos():
x = [
2.0,
] * 100
return ppi.PintArray.from_1darray_quantity(x * ureg.meter)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""Return different versions of data for count times"""
# no idea what I'm meant to put here, try just copying from https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/integer/test_integer.py
def gen(count):
for _ in range(count):
yield data
yield gen
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture
def data_for_sorting():
return ppi.PintArray.from_1darray_quantity([0.3, 10, -50] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [1 * ureg.meter, 3 * ureg.meter, 10 * ureg.centimeter]
@pytest.fixture
def data_missing_for_sorting():
return ppi.PintArray.from_1darray_quantity([4, np.nan, -5] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [4 * ureg.meter, np.nan, 10 * ureg.centimeter]
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values."""
return lambda x, y: bool(np.isnan(x.magnitude)) & bool(np.isnan(y.magnitude))
@pytest.fixture
def na_value():
return ppi.PintType("meter").na_value
@pytest.fixture
def data_for_grouping():
# should probably get more sophisticated here and use units on all these
# quantities
a = 1.0
b = 2.0 ** 32 + 1
c = 2.0 ** 32 + 10
return ppi.PintArray.from_1darray_quantity(
[b, b, np.nan, np.nan, a, a, b, c] * ureg.m
)
# === missing from pandas extension docs about what has to be included in tests ===
# copied from pandas/pandas/conftest.py
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# commented functions aren't implemented
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
# "prod",
# "std",
# "var",
"median",
# "kurt",
# "skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
# =================================================================
class TestCasting(base.BaseCastingTests):
pass
class TestConstructors(base.BaseConstructorsTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
# GH 33559 - empty index
result = pd.Series(index=[], dtype=dtype)
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
result = | pd.Series(na_value, index=[1, 2, 3], dtype=dtype) | pandas.Series |
import networkx as nx
import numpy as np
import pandas as pd
from tmp.utils import cols_attr, cols_glove, cols_empath
# Gets mean and median between tweets
tweets = pd.read_csv("../data/preprocessing/tweets.csv")
tweets.sort_values(by=["user_id", "tweet_creation"], ascending=True, inplace=True)
tweets["time_diff"] = tweets.groupby("user_id", sort=False).tweet_creation.diff()
time_diff_series_mean = tweets.groupby("user_id", sort=False).time_diff.mean()
time_diff_series_median = tweets.groupby("user_id", sort=False).time_diff.median()
time_diff = time_diff_series_mean.to_frame()
time_diff["time_diff_median"] = time_diff_series_median
time_diff.to_csv("../data/features/time_diff.csv")
users_attributes = pd.read_csv("../data/features/users_attributes.csv")
users_content = pd.read_csv("../data/features/users_content.csv")
users_content2 = pd.read_csv("../data/features/users_content2.csv")
users_time = pd.read_csv("../data/features/time_diff.csv")
users_deleted = pd.read_csv("../data/extra/deleted_account_before_guideline.csv")
users_deleted_after_guideline = pd.read_csv("../data/extra/deleted_account_after_guideline.csv")
users_date = | pd.read_csv("../data/extra/created_at.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 08 13:41:45 2018
@author: behzad
"""
import numpy as np
import pandas as pd
A1=np.array([2,5.2,1.8,5])
S1 = pd.Series([2,5.2,1.8,5],["a","b","c","d"])
S2 = pd.Series([2,5.2,1.8,5],index= ["a","b","c","d"])
S2["c"]
Q_heating = pd.Series([1150,1240,120],index=["wall","ceiling","door"])
# Were you will probabely make mistakes !
# remember that Series (starts with capital S)
# remember the first item ia list you should0nt write it like this: pd.Series(1,3,3,2)
Q_heating["door"]
# of course we can repeat the same procedure we did with arrays
Opaque_item_list = ["wall", "ceiling","door"]
Opaque_U_list = [0.438,0.25,1.78]
opaque_U_Series = | pd.Series(Opaque_U_list,index=Opaque_item_list) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
def test_to_numpy(idx):
result = idx.to_numpy()
exp = idx.values
tm.assert_numpy_array_equal(result, exp)
def test_to_frame():
tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")]
index = | MultiIndex.from_tuples(tuples) | pandas.MultiIndex.from_tuples |
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from scipy.stats import mannwhitneyu, ttest_ind, chisquare, wilcoxon, fisher_exact
from statsmodels.multivariate.manova import MANOVA
from statsmodels.sandbox.stats.multicomp import multipletests
from .utils import construct_formula, _categorical_table, _non_present_values_to_zero, \
_test_if_all_vals_equal, _create_result_table, _transform_p_dict, conf_interval, calc_mean_diff, calc_prop_diff
def test_num_feats(zipper, feat_subset=None, method=None):
"""
Performs a hypothesis test to check if the value distributions of numerical features deviate signifcantly between
the datasets. Currently t-test as a parametric and U-test as a non-parametric test are supported.
:param zipper: Dictionary storing the feature values of the datasets in a list. Feature name is used as the key.
:param feat_subset: A list containing feature names. If given, analysis will only be performed for the contained \
features. If not given all features will be considered.
:param method: Specify which statistical test should be used. "u" for Mann-Whitney-U-test, "t" for t-test and \
"wilcoxon" for a Wilcoxon signed rank test.
:return: dictionary storing the p_values of the analysis. Feature names are used as keys.
"""
# if no method is specified used Mann-Whitney-U-test as standard
if method is None:
method = "u"
# initialize dictionary which stores the p_values
p_values = dict()
if feat_subset is None:
feat_subset = zipper.keys()
for feat in feat_subset: # run through all variables
# initiate dict in dict for d1 vs d2, d2 vs d3 etc. per feature
p_values[feat] = dict()
for i in range(len(zipper[feat]) - 1): # select dataset1
for j in range(i + 1, len(zipper[feat])): # select dataset2
# handle the case that all values of current feature are equal across current datasets
if _test_if_all_vals_equal(zipper[feat][i], zipper[feat][j]):
warnings.warn(
"Values of \"{}\" are the identical across the two datasets. It will be skipped.".format(feat),
UserWarning)
# delete already created dict for i, j in p_values and continue with next feature
del p_values[feat]
continue
# only calculate score if there are values in each dataset
if zipper[feat][i] and zipper[feat][j]:
# calculate u-test and return p-value
if method == "u":
stat_test_result = mannwhitneyu(zipper[feat][i], zipper[feat][j], alternative="two-sided")
# calculate t-test and return p-value
elif method == "t":
stat_test_result = ttest_ind(zipper[feat][i], zipper[feat][j])
elif method == "wilcoxon":
stat_test_result = wilcoxon(zipper[feat][i], zipper[feat][j])
p_values[feat][i + 1, j + 1] = stat_test_result.pvalue
# if one or both sets are empty
else:
p_values[feat][i + 1, j + 1] = np.nan
return p_values
def calc_conf_inv(zipper, feat_subset, df_names):
"""
Calculates the confidence intervals of means for numerical features.
:param zipper: Zipper created from a DataCollection.
:param feat_subset: An iterable of features for which the confidence intervals shall be calculated.
:param df_names: Names of the dataframes in the DataCollection
:return:
"""
confs = dict()
for key in feat_subset:
# turns zipper values into lists storing the number of entries of the respective features per dataset
confs[key] = [np.round(conf_interval(z), 2) for z in zipper[key]]
counts = pd.DataFrame(confs).transpose()
counts.index.name = "features"
counts.columns = [name+"conf." for name in df_names]
return counts
def calc_mean_diff_conf(zipper, feat_subset):
"""
Calculates the confidence intervals for numerical features.
:param zipper: Zipper created from a DataCollection.
:param feat_subset: An iterable of features for which the confidence intervals shall be calculated.
:return:
"""
# initialize dictionary which stores the conf_invs
conf_invs = dict()
for feat in feat_subset: # run through all variables
# initiate dict in dict for d1 vs d2, d2 vs d3 etc. per feature
conf_invs[feat] = dict()
for i in range(len(zipper[feat]) - 1): # select dataset1
for j in range(i + 1, len(zipper[feat])): # select dataset2
# only calculate score if there are values in each dataset
if zipper[feat][i] and zipper[feat][j]:
interval = np.round(calc_mean_diff(zipper[feat][i], zipper[feat][j]), 2)
# indicator = True if 0 is not in the interval
if interval[0] >= 0 or interval[1] <= 0:
flag = True
else:
flag = False
conf_invs[feat][i + 1, j + 1] = (interval, flag)
# if one or both sets are empty
else:
conf_invs[feat][i + 1, j + 1] = (np.nan, np.nan)
return conf_invs
def calc_prop_diff_conf(zipper, feat_subset):
"""
Calculates the confidence intervals for numerical features.
:param zipper: Zipper created from a DataCollection.
:param feat_subset: An iterable of features for which the confidence intervals shall be calculated.
:return:
"""
# initialize dictionary which stores the conf_invs
conf_invs = dict()
for feat in feat_subset: # run through all variables
# initiate dict in dict for d1 vs d2, d2 vs d3 etc. per feature
conf_invs[feat] = dict()
for i in range(len(zipper[feat]) - 1): # select dataset1
for j in range(i + 1, len(zipper[feat])): # select dataset2
# only calculate score if there are values in each dataset
if zipper[feat][i] and zipper[feat][j]:
invs = calc_prop_diff(zipper[feat][i], zipper[feat][j], feat)
# check for each factor of the categorical feature if 0 is in the CI or not and append it to dict
for factor in invs:
inv = invs[factor]
# TODO: pass the counts over to the next function such that in can be included into the result table
#count1 = invs[factor][0]
#count2 = invs[factor][1]
# indicator = True if 0 is not in the interval
flag = inv[0] >= 0.00 or inv[1] <= 0.00
# save interval in dict
try:
conf_invs[factor][i + 1, j + 1] = (inv, flag)
except KeyError:
conf_invs[factor] = dict()
conf_invs[factor][i + 1, j + 1] = (inv, flag)
# if one or both sets are empty
else:
conf_invs[feat][i + 1, j + 1] = (np.nan, np.nan)
return conf_invs
def test_cat_feats(zipper, feat_subset=None, method=None, print_data=False):
"""
Performs hypothesis testing to identify significantly deviating categorical features. A chi-squared test is used.
:param zipper: Dictionary storing the feature values of the datasets in a list. Feature name is used as the key.
:param feat_subset: A list containing feature names. If given, analysis will only be performed for the contained \
features. If not given all features will be considered.
:return:
"""
p_values = dict()
# consider all features if no feature subset was specified
if feat_subset is None:
feat_subset = zipper.keys()
# set default method to chi-square test
if method is None:
method = "chi"
for feat in feat_subset:
# initiate dict in dict for dataset1 vs dataset2, d1 vs d3 etc. per feature
p_values[feat] = dict()
for i in range(len(zipper[feat]) - 1): # select dataset1
for j in range(i + 1, len(zipper[feat])): # select dataset2
# count occurences of categorical features like in a confusion matrix for Chi2 tests
test_data = [_categorical_table(zipper[feat][i]), _categorical_table(zipper[feat][j])]
# fill missing keys in test data:
test_data = _non_present_values_to_zero(test_data)
# sort testing data by index(categories) to align the counts for the categories
test_data = [data.sort_index() for data in test_data]
# print testing data if specified
if print_data:
print(feat)
print(pd.DataFrame(test_data))
print()
if method == "chi":
# skip feature if number of events per group is smaller than 5
if (test_data[0] < 5).any() or (test_data[1] < 5).any():
warnings.warn(feat + " has under 5 observations in one or more groups.", UserWarning)
# calculate u statistic and return p-value
p_val = chisquare(*test_data).pvalue
elif method == "fisher":
p_val = fisher_exact(test_data)[1]
p_values[feat][i + 1, j + 1] = p_val
return p_values
def p_correction(p_values):
"""
Corrects p_values for multiple testing.
:param p_values: Dictionary storing p_values with corresponding feature names as keys.
:return: DataFrame which shows the results of the analysis; p-value, corrected p-value and boolean indicating \
significance.
"""
p_trans = _transform_p_dict(p_values)
# get and drop features which are NaN to skip them in multitest correction
nan_features = p_trans[pd.isnull(p_trans[0])]
p_trans = p_trans.dropna(axis=0, subset=[0])
# extract p_value column to pass into multiple testing correction
p_val_col = p_trans[0].sort_values()
# add NaN features back to p_trans to include them into result table later on
p_trans = pd.concat([p_trans, nan_features])
# raise Error if no p_values where calculated that can be passed into multiple test correction
if p_val_col.values.size == 0:
# unpack the p_values which are stored in 2 layer nested dicts.
nested_values = []
for value in p_values.values():
nested_values.append(*value.values())
# if all p_values are nan, return an all nan result table
if | pd.isnull(nested_values) | pandas.isnull |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import sklearn
from scipy import linalg, polyfit
#import pulp
import pickle
from sklearn import linear_model
from customClass import *
from misc_utility.plot_utility import *
from misc_utility.save_utility import result2csv
from misc_utility.solvers import *
from misc_utility.load_data import *
INPUT_DIR = "/home/samuel/Documents/IGE/BdD/BdD_PO/"
OUTPUT_DIR= "/home/samuel/Documents/IGE/inversionPO/figures/inversionLARS/"
SAVE_DIR = "/home/samuel/Documents/IGE/inversionPO/results/inversionLARS/"
list_station= ["Nice","Frenes","Passy","Chamonix", "Marnaz"]
# list_station= ["Passy"]
list_POtype = ["DTTv","AAv"]
# plt.interactive(True)
OrdinaryLeastSquare = False
GeneralizedLeastSquare = False
MachineLearning = True
fromSource = True
saveFig = True
plotTS = True
plotBar = True
saveResult = True
sum_sources = True
plotAll = True
if fromSource:
name_File="_ContributionsMass_positive.csv"
else:
name_File="CHEM_conc.csv"
# sort list in order to always have the same order
list_station.sort()
list_POtype.sort()
# initialize stuff
sto = dict()
saveCoeff = dict()
saveCovm = dict()
pvalues = dict()
for POtype in list_POtype:
sto[POtype]=dict()
print("=============="+POtype+"====================")
s = pd.Series()
cov_all = pd.Series()
pie = | pd.Series() | pandas.Series |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
import mars.tensor as mt
from mars.tensor.core import TENSOR_CHUNK_TYPE, Tensor
from mars.tests.core import TestBase
from mars.dataframe.core import SERIES_CHUNK_TYPE, Series, DataFrame, DATAFRAME_CHUNK_TYPE
from mars.dataframe.indexing.iloc import DataFrameIlocGetItem, DataFrameIlocSetItem, \
IndexingError, HeadTailOptimizedOperandMixin
from mars.dataframe.indexing.loc import DataFrameLocGetItem
class Test(TestBase):
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df3 = df2.set_index('y', drop=True)
df3 = df3.tiles()
self.assertEqual(df3.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df3.chunks[0].columns_value.to_pandas(), pd.Index(['x']))
pd.testing.assert_index_equal(df3.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
df4 = df2.set_index('y', drop=False)
df4 = df4.tiles()
self.assertEqual(df4.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), pd.Index(['x', 'y']))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), pd.Index(['z']))
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
with self.assertRaises(IndexingError):
_ = df2.iloc[1, 1, 1]
# index cannot be tuple
with self.assertRaises(IndexingError):
_ = df2.iloc[(1,), ]
# index wrong type
with self.assertRaises(TypeError):
_ = df2.iloc['a1':]
with self.assertRaises(NotImplementedError):
_ = df2.iloc[0, md.Series(['a2', 'a3'])]
# fancy index should be 1-d
with self.assertRaises(ValueError):
_ = df2.iloc[[[0, 1], [1, 2]]]
with self.assertRaises(ValueError):
_ = df2.iloc[1, ...]
with self.assertRaises(IndexError):
_ = df2.iloc[-4]
with self.assertRaises(IndexError):
_ = df2.iloc[3]
# plain index
df3 = df2.iloc[1]
df3 = df3.tiles()
self.assertIsInstance(df3, Series)
self.assertIsInstance(df3.op, DataFrameIlocGetItem)
self.assertEqual(df3.shape, (3,))
self.assertEqual(df3.chunk_shape, (2,))
self.assertEqual(df3.chunks[0].shape, (2,))
self.assertEqual(df3.chunks[1].shape, (1,))
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df3.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df3.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df3.chunks[1].inputs[0].shape, (2, 1))
# slice index
df4 = df2.iloc[:, 2:4]
df4 = df4.tiles()
self.assertIsInstance(df4, DataFrame)
self.assertIsInstance(df4.op, DataFrameIlocGetItem)
self.assertEqual(df4.shape, (3, 1))
self.assertEqual(df4.chunk_shape, (2, 1))
self.assertEqual(df4.chunks[0].shape, (2, 1))
pd.testing.assert_index_equal(df4.chunks[0].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[0].dtypes, df1.dtypes[2:3])
self.assertIsInstance(df4.chunks[0].index_value.to_pandas(), type(df1.index))
self.assertEqual(df4.chunks[1].shape, (1, 1))
pd.testing.assert_index_equal(df4.chunks[1].columns_value.to_pandas(), df1.columns[2:3])
pd.testing.assert_series_equal(df4.chunks[1].dtypes, df1.dtypes[2:3])
self.assertNotEqual(df4.chunks[0].index_value.key, df4.chunks[1].index_value.key)
self.assertIsInstance(df4.chunks[1].index_value.to_pandas(), type(df1.index))
self.assertEqual(df4.chunks[0].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df4.chunks[0].inputs[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].inputs[0].index, (1, 1))
self.assertEqual(df4.chunks[1].inputs[0].shape, (1, 1))
# plain fancy index
df5 = df2.iloc[[0], [0, 1, 2]]
df5 = df5.tiles()
self.assertIsInstance(df5, DataFrame)
self.assertIsInstance(df5.op, DataFrameIlocGetItem)
self.assertEqual(df5.shape, (1, 3))
self.assertEqual(df5.chunk_shape, (1, 2))
self.assertEqual(df5.chunks[0].shape, (1, 2))
pd.testing.assert_index_equal(df5.chunks[0].columns_value.to_pandas(), df1.columns[:2])
pd.testing.assert_series_equal(df5.chunks[0].dtypes, df1.dtypes[:2])
self.assertIsInstance(df5.chunks[0].index_value.to_pandas(), type(df1.index))
self.assertEqual(df5.chunks[1].shape, (1, 1))
pd.testing.assert_index_equal(df5.chunks[1].columns_value.to_pandas(), df1.columns[2:])
pd.testing.assert_series_equal(df5.chunks[1].dtypes, df1.dtypes[2:])
self.assertIsInstance(df5.chunks[1].index_value.to_pandas(), type(df1.index))
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
self.assertEqual(df5.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df5.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df5.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df5.chunks[1].inputs[0].shape, (2, 1))
# fancy index
df6 = df2.iloc[[1, 2], [0, 1, 2]]
df6 = df6.tiles()
self.assertIsInstance(df6, DataFrame)
self.assertIsInstance(df6.op, DataFrameIlocGetItem)
self.assertEqual(df6.shape, (2, 3))
self.assertEqual(df6.chunk_shape, (2, 2))
self.assertEqual(df6.chunks[0].shape, (1, 2))
self.assertEqual(df6.chunks[1].shape, (1, 1))
self.assertEqual(df6.chunks[2].shape, (1, 2))
self.assertEqual(df6.chunks[3].shape, (1, 1))
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
self.assertEqual(df6.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df6.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df6.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df6.chunks[1].inputs[0].shape, (2, 1))
self.assertEqual(df6.chunks[2].inputs[0].index, (1, 0))
self.assertEqual(df6.chunks[2].inputs[0].shape, (1, 2))
self.assertEqual(df6.chunks[3].inputs[0].index, (1, 1))
self.assertEqual(df6.chunks[3].inputs[0].shape, (1, 1))
# plain index
df7 = df2.iloc[1, 2]
df7 = df7.tiles()
self.assertIsInstance(df7, Tensor) # scalar
self.assertIsInstance(df7.op, DataFrameIlocGetItem)
self.assertEqual(df7.shape, ())
self.assertEqual(df7.chunk_shape, ())
self.assertEqual(df7.chunks[0].dtype, df7.dtype)
self.assertEqual(df7.chunks[0].shape, ())
self.assertEqual(df7.chunks[0].op.indexes, (1, 0))
self.assertEqual(df7.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df7.chunks[0].inputs[0].shape, (2, 1))
# test Series iloc getitem
# slice
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[4:8]
series = series.tiles()
self.assertEqual(series.shape, (4,))
self.assertEqual(len(series.chunks), 2)
self.assertEqual(series.chunks[0].shape, (2,))
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes, (slice(1, 3, 1),))
self.assertEqual(series.chunks[1].shape, (2,))
self.assertEqual(series.chunks[1].op.indexes, (slice(0, 2, 1),))
self.assertEqual(series.chunks[1].index, (1,))
# fancy index
series = md.Series(pd.Series(np.arange(10)), chunk_size=3).iloc[[2, 4, 8]]
series = series.tiles()
self.assertEqual(series.shape, (3,))
self.assertEqual(len(series.chunks), 3)
self.assertEqual(series.chunks[0].shape, (1,))
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes[0], [2])
self.assertEqual(series.chunks[1].shape, (1,))
self.assertEqual(series.chunks[1].op.indexes[0], [1])
self.assertEqual(series.chunks[1].index, (1,))
self.assertEqual(series.chunks[2].shape, (1,))
self.assertEqual(series.chunks[2].op.indexes[0], [2])
self.assertEqual(series.chunks[2].index, (2,))
def testILocSetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df2 = df2.tiles()
# plain index
df3 = md.DataFrame(df1, chunk_size=2)
df3.iloc[1] = 100
df3 = df3.tiles()
self.assertIsInstance(df3.op, DataFrameIlocSetItem)
self.assertEqual(df3.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df3.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df3.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df3.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
# # slice index
df4 = md.DataFrame(df1, chunk_size=2)
df4.iloc[:, 2:4] = 1111
df4 = df4.tiles()
self.assertIsInstance(df4.op, DataFrameIlocSetItem)
self.assertEqual(df4.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df4.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df4.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df4.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[3].op.indexes, (slice(None, None, None), slice(None, None, None)))
# plain fancy index
df5 = md.DataFrame(df1, chunk_size=2)
df5.iloc[[0], [0, 1, 2]] = 2222
df5 = df5.tiles()
self.assertIsInstance(df5.op, DataFrameIlocSetItem)
self.assertEqual(df5.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df5.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df5.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df5.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
# fancy index
df6 = md.DataFrame(df1, chunk_size=2)
df6.iloc[[1, 2], [0, 1, 2]] = 3333
df6 = df6.tiles()
self.assertIsInstance(df6.op, DataFrameIlocSetItem)
self.assertEqual(df6.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df6.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df6.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df6.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
# plain index
df7 = md.DataFrame(df1, chunk_size=2)
df7.iloc[1, 2] = 4444
df7 = df7.tiles()
self.assertIsInstance(df7.op, DataFrameIlocSetItem)
self.assertEqual(df7.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df7.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df7.columns_value.to_pandas())
for c1, c2 in zip(df2.chunks, df7.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns_value.to_pandas(), c2.columns_value.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df7.chunks[1].op.indexes, (1, 0))
# test Series
# slice
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[:4] = 2
series = series.tiles()
self.assertEqual(series.shape, (10,))
self.assertEqual(len(series.chunks), 4)
self.assertEqual(series.chunks[0].op.indexes, [slice(None, None, None), ])
self.assertEqual(series.chunks[0].op.value, 2)
self.assertEqual(series.chunks[1].op.indexes, [slice(0, 1, 1), ])
self.assertEqual(series.chunks[1].op.value, 2)
# fancy index
series = md.Series(pd.Series(np.arange(10)), chunk_size=3)
series.iloc[[2, 4, 9]] = 3
series = series.tiles()
self.assertEqual(series.shape, (10,))
self.assertEqual(len(series.chunks), 4)
self.assertEqual(series.chunks[0].index, (0,))
self.assertEqual(series.chunks[0].op.indexes[0].tolist(), [2])
self.assertEqual(series.chunks[0].op.value, 3)
self.assertEqual(series.chunks[1].index, (1,))
self.assertEqual(series.chunks[1].op.indexes[0].tolist(), [1])
self.assertEqual(series.chunks[1].op.value, 3)
self.assertEqual(series.chunks[3].index, (3,))
self.assertEqual(series.chunks[3].op.indexes[0].tolist(), [0])
self.assertEqual(series.chunks[3].op.value, 3)
def testDataFrameLoc(self):
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
raw2 = raw.copy()
raw2.reset_index(inplace=True, drop=True)
df3 = md.DataFrame(raw2, chunk_size=2)
s = pd.Series([1, 3, 5], index=['a1', 'a2', 'a3'])
series = md.Series(s, chunk_size=2)
# test return scalar
df2 = df.loc['a1', 'z']
self.assertIsInstance(df2, Tensor)
self.assertEqual(df2.shape, ())
self.assertEqual(df2.dtype, raw['z'].dtype)
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 1)
self.assertIsInstance(df2.chunks[0], TENSOR_CHUNK_TYPE)
# test return series for index axis
df2 = df.loc[:, 'y']
self.assertIsInstance(df2, Series)
self.assertEqual(df2.shape, (3,))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertEqual(df2.name, 'y')
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertIsInstance(c.index_value.to_pandas(), type(raw.index))
self.assertEqual(c.name, 'y')
self.assertEqual(c.dtype, raw['y'].dtype)
# test return series for column axis
df2 = df.loc['a2', :]
self.assertIsInstance(df2, Series)
self.assertEqual(df2.shape, (3,))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.columns_value.to_pandas())
self.assertEqual(df2.name, 'a2')
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertIsInstance(c.index_value.to_pandas(), type(raw.columns))
self.assertEqual(c.name, 'a2')
self.assertEqual(c.dtype, raw.loc['a2'].dtype)
# test slice
df2 = df.loc['a2': 'a3', 'y': 'z']
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.loc[:, 'y': 'z'].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, 'y': 'z'].dtypes)
# test fancy index on index axis
df2 = df.loc[['a3', 'a2'], [True, False, True]]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
# test fancy index which is md.Series on index axis
df2 = df.loc[md.Series(['a3', 'a2']), [True, False, True]]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 2))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, [True, False, True]].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, [True, False, True]].dtypes)
# test fancy index on columns axis
df2 = df.loc[[True, False, True], ['z', 'x', 'y']]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 3))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(df2.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df2.tiles()
self.assertEqual(len(df2.chunks), 2)
for c in df2.chunks:
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
pd.testing.assert_index_equal(c.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(c.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(c.columns_value.to_pandas(),
raw.loc[:, ['z', 'x', 'y']].columns)
pd.testing.assert_series_equal(c.dtypes, raw.loc[:, ['z', 'x', 'y']].dtypes)
df2 = df.loc[md.Series([True, False, True])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 3))
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df.index_value.to_pandas())
self.assertNotEqual(df2.index_value.key, df.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([True, False, True])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (np.nan, 3))
self.assertIsInstance(df2.index_value.to_pandas(), type(raw.loc[[True, False, True]].index))
self.assertNotEqual(df2.index_value.key, df3.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
df2 = df3.loc[md.Series([2, 1])]
self.assertIsInstance(df2, DataFrame)
self.assertEqual(df2.shape, (2, 3))
self.assertIsInstance(df2.index_value.to_pandas(), type(raw2.loc[[2, 1]].index))
self.assertNotEqual(df2.index_value.key, df3.index_value.key)
pd.testing.assert_index_equal(df2.columns_value.to_pandas(), raw.columns)
pd.testing.assert_series_equal(df2.dtypes, raw.dtypes)
series2 = series.loc['a2']
self.assertIsInstance(series2, Tensor)
self.assertEqual(series2.shape, ())
self.assertEqual(series2.dtype, s.dtype)
series2 = series.loc[['a2', 'a3']]
self.assertIsInstance(series2, Series)
self.assertEqual(series2.shape, (2,))
self.assertEqual(series2.dtype, s.dtype)
self.assertEqual(series2.name, s.name)
with self.assertRaises(IndexingError):
_ = df.loc['a1', 'z', ...]
with self.assertRaises(NotImplementedError):
_ = df.loc[:, md.Series([True, False, True])]
with self.assertRaises(KeyError):
_ = df.loc[:, ['non_exist']]
def testLocUseIloc(self):
raw = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
df = md.DataFrame(raw, chunk_size=2)
self.assertIsInstance(df.loc[:3].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[1:3].op, DataFrameIlocGetItem)
self.assertIsInstance(df.loc[1].op, DataFrameIlocGetItem)
# negative
self.assertIsInstance(df.loc[:-3].op, DataFrameLocGetItem)
with self.assertRaises(KeyError):
_ = df.loc[-3]
# index 1 not None
self.assertIsInstance(df.loc[:3, :'y'].op, DataFrameLocGetItem)
# index 1 not slice
self.assertIsInstance(df.loc[:3, [True, False, True]].op, DataFrameLocGetItem)
self.assertIsInstance(df.loc[[True, False, True]].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = pd.RangeIndex(1, 4)
df2 = md.DataFrame(raw2, chunk_size=2)
self.assertIsInstance(df2.loc[:3].op, DataFrameLocGetItem)
self.assertIsInstance(df2.loc['a3':].op, DataFrameLocGetItem)
raw2 = raw.copy()
raw2.index = [f'a{i}' for i in range(3)]
df2 = md.DataFrame(raw2, chunk_size=2)
self.assertIsInstance(df2.loc[:3].op, DataFrameLocGetItem)
def testDataFrameGetitem(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
series = df['c3']
self.assertIsInstance(series, Series)
self.assertEqual(series.shape, (10,))
self.assertEqual(series.name, 'c3')
self.assertEqual(series.dtype, data['c3'].dtype)
self.assertEqual(series.index_value, df.index_value)
series = series.tiles()
self.assertEqual(series.nsplits, ((2, 2, 2, 2, 2),))
self.assertEqual(len(series.chunks), 5)
for i, c in enumerate(series.chunks):
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertEqual(c.index, (i,))
self.assertEqual(c.shape, (2,))
df1 = df[['c1', 'c2', 'c3']]
self.assertIsInstance(df1, DataFrame)
self.assertEqual(df1.shape, (10, 3))
self.assertEqual(df1.index_value, df.index_value)
pd.testing.assert_index_equal(df1.columns_value.to_pandas(), data[['c1', 'c2', 'c3']].columns)
pd.testing.assert_series_equal(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)
df1 = df1.tiles()
self.assertEqual(df1.nsplits, ((2, 2, 2, 2, 2), (2, 1)))
self.assertEqual(len(df1.chunks), 10)
for i, c in enumerate(df1.chunks[slice(0, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 0))
self.assertEqual(c.shape, (2, 2))
for i, c in enumerate(df1.chunks[slice(1, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 1))
self.assertEqual(c.shape, (2, 1))
def testDataFrameGetitemBool(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data1 = data.c1 > 0.5
mask_data2 = data.c1 < 0.5
mask1 = md.Series(mask_data1, chunk_size=2)
mask2 = md.Series(mask_data2, chunk_size=2)
r1 = df[mask1]
r2 = df[mask2]
r3 = df[mask1]
self.assertNotEqual(r1.index_value.key, df.index_value.key)
self.assertNotEqual(r1.index_value.key, mask1.index_value.key)
self.assertEqual(r1.columns_value.key, df.columns_value.key)
self.assertIs(r1.columns_value, df.columns_value)
self.assertNotEqual(r1.index_value.key, r2.index_value.key)
self.assertEqual(r1.columns_value.key, r2.columns_value.key)
self.assertIs(r1.columns_value, r2.columns_value)
self.assertEqual(r1.index_value.key, r3.index_value.key)
self.assertEqual(r1.columns_value.key, r3.columns_value.key)
self.assertIs(r1.columns_value, r3.columns_value)
def testSeriesGetitem(self):
data = pd.Series(np.random.rand(10, ), name='a')
series = md.Series(data, chunk_size=3)
result1 = series[2]
self.assertEqual(result1.shape, ())
result1 = result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(len(result1.chunks), 1)
self.assertIsInstance(result1.chunks[0], TENSOR_CHUNK_TYPE)
self.assertEqual(result1.chunks[0].shape, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
result2 = series[[4, 5, 1, 2, 3]]
self.assertEqual(result2.shape, (5,))
result2 = result2.tiles()
self.assertEqual(result2.nsplits, ((2, 2, 1),))
self.assertEqual(len(result2.chunks), 3)
self.assertEqual(result2.chunks[0].op.labels, [4, 5])
self.assertEqual(result2.chunks[1].op.labels, [1, 2])
self.assertEqual(result2.chunks[2].op.labels, [3])
data = pd.Series(np.random.rand(10), index=['i' + str(i) for i in range(10)])
series = md.Series(data, chunk_size=3)
result1 = series['i2']
self.assertEqual(result1.shape, ())
result1 = result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
self.assertTrue(result1.chunks[0].op.labels, ['i2'])
result2 = series[['i2', 'i4']]
self.assertEqual(result2.shape, (2,))
result2 = result2.tiles()
self.assertEqual(result2.nsplits, ((2,),))
self.assertEqual(result2.chunks[0].dtype, data.dtype)
self.assertTrue(result2.chunks[0].op.labels, [['i2', 'i4']])
def testSetitem(self):
data = pd.DataFrame(np.random.rand(10, 2), columns=['c1', 'c2'])
df = md.DataFrame(data, chunk_size=4)
df['new'] = 1
self.assertEqual(df.shape, (10, 3))
pd.testing.assert_series_equal(df.inputs[0].dtypes, data.dtypes)
tiled = df.tiles()
self.assertEqual(tiled.chunks[0].shape, (4, 3))
pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes)
self.assertEqual(tiled.chunks[1].shape, (4, 3))
| pd.testing.assert_series_equal(tiled.inputs[0].dtypes, data.dtypes) | pandas.testing.assert_series_equal |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize("date_format", ["iso", "epoch"])
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
# GH28156: to_json not correctly formatting Timedelta
data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
if date_format == "iso":
expected = (
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
)
else:
expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
if as_object:
expected = expected.replace("}", ',"a":"a"}')
result = ser.to_json(date_format=date_format)
assert result == expected
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
tm.assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-10 05:00:00Z"),
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
Timestamp("2013-01-10 00:00:00-0500"),
],
)
def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
@pytest.mark.parametrize(
"tz_range",
[
pd.date_range("2013-01-01 05:00:00Z", periods=2),
pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
pd.date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with tm.ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# =========================================================================== #
# INDEPENDENCE MODULE #
# =========================================================================== #
'''Modules for analyzing indendence between variables.'''
# %%
# --------------------------------------------------------------------------- #
# LIBRARIES #
# --------------------------------------------------------------------------- #
import collections
from collections import OrderedDict
import itertools
from itertools import combinations
from itertools import product
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
from scipy import stats
import scikit_posthocs as sp
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
# %%
# ---------------------------------------------------------------------------- #
# CORRELATION #
# ---------------------------------------------------------------------------- #
class Correlation:
'''Class that computes the pairwise correlation between numeric variables
and renders a heatmap
'''
def __init__(self):
self._corr = None
pass
def test(self, df, method='pearson'):
self._corr = df.corr(method)
return(self._corr)
def pairwise(self, df, x, y, method='pearson', threshold=None):
r_tests = pd.DataFrame()
for xs, ys in zip(x,y):
r = df[xs].corr(df[ys])
df_r = pd.DataFrame({'x':xs, 'y':ys}, index=[0])
df_r['r'] = r
df_r['r_abs'] = np.absolute(r)
df_r['strength'] = np.where(df_r.r_abs<0.2, 'Very Weak',
np.where(df_r.r_abs<0.4, 'Weak',
np.where(df_r.r_abs<0.6, "Moderate",
np.where(df_r.r_abs<0.8, "Strong", "Very Strong"))))
df_r['direction'] = np.where(df_r.r <0, "Negative", "Positive")
r_tests = pd.concat([r_tests, df_r], axis=0)
r_tests = r_tests.sort_values(by='r_abs', ascending=False)
if threshold:
r_tests = r_tests[r_tests.r_abs > threshold]
return(r_tests)
def corrtable(self, threshold=None):
r_tests = pd.DataFrame()
cols = self._corr.columns.tolist()
for i in range(len(cols)):
for j in range(len(cols)):
if i != j:
df_r = pd.DataFrame({'x': cols[i], 'y':cols[j], 'r': self._corr.iloc[i][j],
'r_abs': np.absolute(self._corr.iloc[i][j])}, index=[0])
df_r['strength'] = np.where(df_r.r_abs<0.2, 'Very Weak',
np.where(df_r.r_abs<0.4, 'Weak',
np.where(df_r.r_abs<0.6, "Moderate",
np.where(df_r.r_abs<0.8, "Strong", "Very Strong"))))
df_r['direction'] = np.where(df_r.r <0, "Negative", "Positive")
r_tests = pd.concat([r_tests, df_r], axis=0)
r_tests = r_tests.sort_values(by='r_abs', ascending=False)
if threshold:
r_tests = r_tests[r_tests.r_abs > threshold]
return(r_tests)
def corrplot(self):
sns.heatmap(self._corr, xticklabels=self._corr.columns,
yticklabels=self._corr.columns)
# ---------------------------------------------------------------------------- #
# INDEPENDENCE #
# ---------------------------------------------------------------------------- #
class Independence:
"Class that performs a test of independence"
def __init__(self):
self._sig = 0.05
self._x2 = 0
self._p = 0
self._df = 0
self._obs = []
self._exp = []
def summary(self):
print("\n*", "=" * 78, "*")
print('{:^80}'.format("Pearson's Chi-squared Test of Independence"))
print('{:^80}'.format('Data'))
print('{:^80}'.format("x = " + self._xvar + " y = " + self._yvar + "\n"))
print('{:^80}'.format('Observed Frequencies'))
visual.print_df(self._obs)
print("\n", '{:^80}'.format('Expected Frequencies'))
visual.print_df(self._exp)
results = ("Pearson's chi-squared statistic = " + str(round(self._x2, 3)) + ", Df = " +
str(self._df) + ", p-value = " + '{0:1.2e}'.format(round(self._p, 3)))
print("\n", '{:^80}'.format(results))
print("\n*", "=" * 78, "*")
def post_hoc(self, rowwise=True, verbose=False):
dfs = []
if rowwise:
rows = range(0, len(self._obs))
for pair in list(combinations(rows, 2)):
ct = self._obs.iloc[[pair[0], pair[1]], ]
levels = ct.index.values
x2, p, dof, exp = stats.chi2_contingency(ct)
df = pd.DataFrame({'level_1': levels[0],
'level_2': levels[1],
'x2': x2,
'N': ct.values.sum(),
'p_value': p}, index=[0])
dfs.append(df)
self._post_hoc_tests = pd.concat(dfs)
else:
cols = range(0, len(self._obs.columns.values))
for pair in list(combinations(cols, 2)):
ct = self._obs.iloc[:, [pair[0], pair[1]]]
levels = ct.columns.values
x2, p, dof, exp = stats.chi2_contingency(ct)
df = pd.DataFrame({'level_1': levels[0],
'level_2': levels[1],
'x2': x2,
'N': ct.values.sum(),
'p_value': p}, index=[0])
dfs.append(df)
self._post_hoc_tests = pd.concat(dfs)
if (verbose):
visual.print_df(self._post_hoc_tests)
return(self._post_hoc_tests)
def test(self, x, y, sig=0.05):
self._x = x
self._y = y
self._xvar = x.name
self._yvar = y.name
self._n = x.shape[0]
self._sig = sig
ct = pd.crosstab(x, y)
x2, p, dof, exp = stats.chi2_contingency(ct)
self._x2 = x2
self._p = p
self._df = dof
self._obs = ct
self._exp = pd.DataFrame(exp).set_index(ct.index)
self._exp.columns = ct.columns
if p < sig:
self._result = 'significant'
self._hypothesis = 'reject'
else:
self._result = 'not significant'
self._hypothesis = 'fail to reject'
return x2, p, dof, exp
def report(self, verbose=False):
"Returns or prints results in APA format"
tup = ("A Chi-square test of independence was conducted to "
"examine the relation between " + self._xvar + " and " + self._yvar + ". "
"The relation between the variables was " + self._result + ", "
"X2(" + str(self._df) + ", N = ", str(self._n) + ") = " +
str(round(self._x2, 2)) + ", p = " + '{0:1.2e}'.format(round(self._p, 3)))
self._report = ''.join(tup)
wrapper = textwrap.TextWrapper(width=80)
lines = wrapper.wrap(text=self._report)
if verbose:
for line in lines:
print(line)
return(self._report)
# ---------------------------------------------------------------------------- #
# ANOVA #
# ---------------------------------------------------------------------------- #
#%%
class Anova:
'''
Computes Anova tests
'''
def __init__(self):
pass
def aov_test(self, df, x, y, type=2, test='F', sig=0.05):
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('y~x', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Test': 'Anova',
'Dependent': y, 'Independent': x, 'Statistic': 'F Statistic',
'Statistic Value': aov['F'][0], 'p-Value': aov['PR(>F)'][0]
}, index=[0])
tbl['H0'] = np.where(tbl['p-Value']<sig, 'Reject', 'Fail to Reject')
return(tbl)
def aov_table(self, df, x=None, y=None, type=2, test='F', threshold=0):
tests = pd.DataFrame()
if x and y:
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('y~x', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
elif x:
dfy = df.select_dtypes(include='object')
ys = dfy.columns
for y in ys:
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('x~y', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
elif y:
dfx = df.select_dtypes(include=[np.number])
xs = dfx.columns
for x in xs:
df2 = pd.DataFrame({'x': df[x], 'y': df[y]})
df2 = df2.dropna()
model = smf.ols('x~y', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
else:
dfx = df.select_dtypes(include=[np.number])
dfy = df.select_dtypes(include='object')
xs = dfx.columns
ys = dfy.columns
for pair in list(itertools.product(xs,ys)):
df2 = df[[pair[0], pair[1]]].dropna()
df2 = pd.DataFrame({'x': df2[pair[0]], 'y': df2[pair[1]]})
model = smf.ols('x~y', data=df2).fit()
aov = sm.stats.anova_lm(model, typ=type, test=test)
tbl = pd.DataFrame({
'Dependent': y, 'Independent': x, 'Sum Sq Model': aov['sum_sq'][0],
'Sum Sq Residuals': aov['sum_sq'][1], 'df Model': aov['df'][0],
'df Residuals': aov['df'][1], 'F': aov['F'][0],
'PR(>F)': aov['PR(>F)'][0]
}, index=[0])
tbl['Eta Squared'] = tbl['Sum Sq Model'] / (tbl['Sum Sq Model'] + tbl['Sum Sq Residuals'])
tests = tests.append(tbl)
tests = tests.loc[tests['Eta Squared'] > threshold]
tests = tests.sort_values(by='Eta Squared', ascending=False)
return(tests)
# ---------------------------------------------------------------------------- #
# KRUSKAL #
# ---------------------------------------------------------------------------- #
#%%
class Kruskal:
'''
Class provides non-parametric methods for testing independence
'''
def __init__(self):
pass
def kruskal_test(self, df, x, y, sig=0.05):
'''Computes the Kruskal-Wallis H-test tests
Args:
df (pd.DataFrame): Dataframe containing data
x (str): The name of the categorical independent variable
y (str): The name of the numerical dependent variable
Returns:
DataFrame containing statistic and p-value
'''
df = df[[x,y]].dropna()
groups = {}
for grp in df[x].unique():
groups[grp] = df[y][df[x]==grp].values
args = groups.values()
k = stats.kruskal(*args)
columns = ['Test', 'Dependent', 'Independent', 'Statistic', 'Statistic Value', 'p-Value']
data = [['Kruskal', y, x, 'H-Statistic', k[0], k[1]]]
r = pd.DataFrame(data, columns = columns)
r['H0'] = np.where(r['p-Value']<sig, 'Reject', 'Fail to Reject')
return(r)
def kruskal_table(self, df, x=None, y=None, sig=0.05, sort=False):
tests = pd.DataFrame()
if x and y:
test = self.kruskal_test(df, x, y)
tests = tests.append(test)
elif x:
dfy = df.select_dtypes(include=[np.number])
ys = dfy.columns.tolist()
for y in ys:
df2 = df[[x,y]].dropna()
test = self.kruskal_test(df2, x, y)
tests = tests.append(test)
elif y:
dfx = df.select_dtypes(include='object')
xs = dfx.columns.tolist()
for x in xs:
df2 = df[[x,y]].dropna()
test = self.kruskal_test(df2, x, y)
tests = tests.append(test)
else:
dfx = df.select_dtypes(include='object')
dfy = df.select_dtypes(include=[np.number])
xs = dfx.columns.tolist()
ys = dfy.columns.tolist()
for pair in list(itertools.product(xs,ys)):
df2 = df[[pair[0], pair[1]]].dropna()
test = self.kruskal_test(df2, pair[0], pair[1])
tests = tests.append(test)
if sort:
tests = tests.sort_values(by=['Independent','Statistic Value'], ascending=False)
return(tests)
def posthoc(self, df, x, y):
df = df[[x,y]].dropna()
p = sp.posthoc_conover(df, val_col=y, group_col=x, p_adjust = 'fdr_bh')
return(p)
def sign_plot(self, df, x, y):
p = self.posthoc(df, x, y)
heatmap_args = {'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False,
'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
sp.sign_plot(p, **heatmap_args)
# %%
# ---------------------------------------------------------------------------- #
# CORRELATION #
# ---------------------------------------------------------------------------- #
def correlation(df, x, y):
'''
Computes the correlation between two quantitative variables x and y.
Args:
df (pd.DataFrame): Dataframe containing numeric variables
x (str): The column name for the x variable
y (str): The column name for the y variable
Returns:
Data frame containing the results of the correlation tests
'''
df = df.dropna()
r = stats.pearsonr(df[x], df[y])
test = pd.DataFrame({'x': x, 'y': y, "Correlation": r[0], "p-value": r[1]},
index=[0])
test['AbsCorr'] = test['Correlation'].abs()
test['Strength'] = np.where(test["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(test["AbsCorr"] < .30, 'Small Correlation',
np.where(test["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
return(test)
# ---------------------------------------------------------------------------- #
# CORR_TABLE #
# ---------------------------------------------------------------------------- #
def corr_table(df, x=None, y=None, target=None, threshold=0, sig=None):
'''For a dataframe containing numeric variables, this function
computes pairwise pearson's R tests of correlation correlation.
Args:
df (pd.DataFrame): Data frame containing numeric variables
x(str): Name of independent variable column (optional)
y(str): Name of dependent variable column (optional)
target(str):
threshold (float): Threshold above which correlations should be
reported.
Returns:
Data frame containing the results of the pairwise tests of correlation.
'''
tests = []
if x is not None:
for pair in list(itertools.product(x, y)):
df2 = df[[pair[0], pair[1]]].dropna()
x = df2[pair[0]]
y = df2[pair[1]]
r = stats.pearsonr(x, y)
tests.append(OrderedDict(
{'x': pair[0], 'y': pair[1], "Correlation": r[0], "p-value": r[1]}))
tests = pd.DataFrame(tests, index=[0])
tests['AbsCorr'] = tests['Correlation'].abs()
tests['Strength'] = np.where(tests["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(tests["AbsCorr"] < .30, 'Small Correlation',
np.where(tests["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
else:
df2 = df.select_dtypes(include=['int', 'float64'])
terms = df2.columns
if target:
if target not in df2.columns:
df2 = df2.join(df[target])
for term in terms:
df2 = df2.dropna()
x = df2[term]
y = df2[target]
r = stats.pearsonr(x, y)
tests.append(OrderedDict(
{'x': term, 'y': target, "Correlation": r[0], "p-value": r[1]}))
tests = pd.DataFrame(tests)
tests['AbsCorr'] = tests['Correlation'].abs()
tests['Strength'] = np.where(tests["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(tests["AbsCorr"] < .30, 'Small Correlation',
np.where(tests["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
else:
for pair in list(combinations(terms, 2)):
df2 = df[[pair[0], pair[1]]].dropna()
x = df2[pair[0]]
y = df2[pair[1]]
r = stats.pearsonr(x, y)
tests.append(OrderedDict(
{'x': pair[0], 'y': pair[1], "Correlation": r[0], "p-value": r[1]}))
tests = pd.DataFrame(tests)
tests['AbsCorr'] = tests['Correlation'].abs()
tests['Strength'] = np.where(tests["AbsCorr"] < .1, 'Extremely Weak Correlation',
np.where(tests["AbsCorr"] < .30, 'Small Correlation',
np.where(tests["AbsCorr"] < .5, 'Moderate Correlation',
'Strong Correlation')))
top = tests.loc[tests['AbsCorr'] > threshold]
if sig is not None:
top = tests.loc[tests['p-value']<sig]
top = top.sort_values(by='AbsCorr', ascending=False)
return top
# ---------------------------------------------------------------------------- #
# CRAMER'S V (Corrected) #
# ---------------------------------------------------------------------------- #
def cramers(contingency_table, correction=False):
""" calculate Cramers V statistic for categorical-categorical association.
If correction is True, it uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
Args:
contingency_table (pd.DataFrame): Contingency table containing
counts for the two variables
being analyzed
correction (bool): If True, use Bergsma's correction
Returns:
float: Corrected Cramer's V measure of Association
"""
chi2, p = stats.chi2_contingency(contingency_table)[0:2]
n = contingency_table.sum().sum()
phi = np.sqrt(chi2/n)
r, c = contingency_table.shape
if correction:
phi2corr = max(0, phi**2 - ((c-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
ccorr = c - ((c-1)**2)/(n-1)
V = np.sqrt(phi2corr / min((ccorr-1), (rcorr-1)))
else:
V = np.sqrt(phi**2/min(r,c))
return p, V
# %%
# ---------------------------------------------------------------------------- #
# ASSOCIATION #
# ---------------------------------------------------------------------------- #
def association(df, x, y, z=None, sig=0.05):
'''
Computes the association between two or three categorical variables.
Args:
df (pd.DataFrame): Dataframe containing categorical variables
x (str): The column name for the x variable
y (str): The column name for the y variable
z (str): Optional column containing the z variable
Returns:
Data frame containing the results of the correlation tests
'''
if z:
df = df[[x,y,z]].dropna()
ct = | pd.crosstab(df[z], [df[x], df[y]], rownames=[z], colnames=[x, y]) | pandas.crosstab |
import os
from pathlib import Path
import time
from packaging import version
import warnings
import yaml
import numpy as np
import pandas as pd
import rasterio
from rasterstats import zonal_stats
from shapely.geometry import LineString
from gisutils import df2shp, get_authority_crs
from sfrmaker.routing import find_path, renumber_segments
from sfrmaker.checks import valid_rnos, valid_nsegs, rno_nseg_routing_consistent
from sfrmaker.elevations import smooth_elevations
from sfrmaker.flows import add_to_perioddata, add_to_segment_data
from sfrmaker.gis import export_reach_data, project
from sfrmaker.observations import write_gage_package, write_mf6_sfr_obsfile, add_observations
from sfrmaker.units import convert_length_units, itmuni_values, lenuni_values
from sfrmaker.utils import get_sfr_package_format, get_input_arguments, assign_layers, update
import sfrmaker
from sfrmaker.base import DataPackage
from sfrmaker.mf5to6 import segment_data_to_period_data
from sfrmaker.reaches import consolidate_reach_conductances
from sfrmaker.rivdata import RivData
try:
import flopy
fm = flopy.modflow
mf6 = flopy.mf6
except:
flopy = False
class SFRData(DataPackage):
"""Class for working with a streamflow routing (SFR) dataset,
where the stream network is discretized into reaches contained
within individual model cells. Reaches may be grouped into segments,
with routing between segments specified, and routing between reaches
within segments based on consecutive numbering (as in MODFLOW-2005).
In this case, unique identifier numbers will be assigned to each reach
(in the rno column of the reach_data table), and routing connections
between rnos will be computed. Alternatively, reaches and their
routing connections can be specified directly, as in MODFLOW-6. In this
case, MODFLOW-2005 input will be written with one reach per segment.
Parameters
----------
reach_data : DataFrame
Table containing information on the SFR reaches.
segment_data : DataFrame
Table containing information on the segments (optional).
grid : sfrmaker.grid class instance
model_length_units : str
'meters' or 'feet'
model_time_units : str
's': seconds
'meters': minutes
'h': hours
'd': days
'y': years
enforce_increasing_nsegs : bool
If True, segment numbering is checked to ensure
that it only increases downstream, and reset if it doesnt.
package_name : str
Base name for writing sfr output.
kwargs : keyword arguments
Optional values to assign globally to SFR variables. For example
icalc=1 would assign all segments an icalc value of 1. For default
values see the sfrdata.defaults dictionary. Default values can be
assigned using MODFLOW-2005 or MODFLOW-6 terminology.
"""
# conversions to MODFLOW6 variable names
mf6names = {'rno': 'rno',
# 'node': 'cellid',
'rchlen': 'rlen',
'width': 'rwid',
'slope': 'rgrd',
'strtop': 'rtp',
'strthick': 'rbth',
'strhc1': 'rhk',
'roughch': 'man',
'flow': 'inflow',
'pptsw': 'rainfall',
'etsw': 'evaporation',
'runoff': 'runoff',
'depth1': 'depth1',
'depth2': 'depth2'}
mf5names = {v: k for k, v in mf6names.items()}
# order for columns in reach_data
rdcols = ['rno', 'node', 'k', 'i', 'j',
'iseg', 'ireach', 'rchlen', 'width', 'slope',
'strtop', 'strthick', 'strhc1',
'thts', 'thti', 'eps', 'uhc',
'outreach', 'outseg', 'asum', 'line_id', 'name',
'geometry']
# order for columns in segment_data
sdcols = ['per', 'nseg', 'icalc', 'outseg', 'iupseg',
'iprior', 'nstrpts',
'flow', 'runoff', 'etsw', 'pptsw',
'roughch', 'roughbk', 'cdpth', 'fdpth',
'awdth', 'bwdth',
'hcond1', 'thickm1', 'elevup', 'width1', 'depth1',
'thts1', 'thti1', 'eps1', 'uhc1',
'hcond2', 'thickm2', 'elevdn', 'width2', 'depth2',
'thts2', 'thti2', 'eps2', 'uhc2']
dtypes = {'rno': int, 'node': int, 'k': int, 'i': int, 'j': int,
'iseg': int, 'ireach': int, 'outreach': int, 'line_id': int,
'per': int, 'nseg': int, 'icalc': int, 'outseg': int,
'iupseg': int, 'iprior': int, 'nstrpts': int,
'name': object, 'geometry': object}
# LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3}
len_const = {0: 1.0, 1: 1.486, 2: 1.0, 3: 100.}
# {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5}
time_const = {1: 1., 2: 60., 3: 3600., 4: 86400., 5: 31557600.}
# default values
defaults = {'icalc': 1,
'roughch': 0.037,
'strthick': 1,
'strhc1': 1,
'istcb2': 223,
'gage_starting_unit_number': 250
}
package_type = 'sfr'
def __init__(self, reach_data=None,
segment_data=None, grid=None,
model=None,
isfr=None,
model_length_units="undefined", model_time_units='days',
enforce_increasing_nsegs=True,
default_slope=0.001, minimum_slope=0.0001,
maximum_slope=1.,
package_name='model',
**kwargs):
DataPackage.__init__(self, grid=grid, model=model, isfr=isfr,
model_length_units=model_length_units,
model_time_units=model_time_units,
package_name=package_name)
# attributes
self._period_data = None
self._observations = None
self._observations_filename = None
# convert any modflow6 kwargs to modflow5
kwargs = {SFRData.mf5names[k] if k in SFRData.mf6names else k:
v for k, v in kwargs.items()}
# update default values (used in segment and reach data setup)
self.defaults.update(kwargs)
self.reach_data = self._setup_reach_data(reach_data)
self.segment_data = self._setup_segment_data(segment_data)
self.isfropt0_to_1() # distribute any isfropt=0 segment data to reaches
# routing
self._segment_routing = None # dictionary of routing connections
self._rno_routing = None # dictionary of rno routing connections
self._paths = None # routing sequence from each segment to outlet
self._reach_paths = None # routing sequence from each reach number to outlet
if not self._valid_nsegs(increasing=enforce_increasing_nsegs):
self.reset_segments()
# establish rno routing
# set_outreaches checks for valid rnos and resets them if not
# resets out reaches either way using segment data
# not the ideal logic for MODFLOW 6 case where only
# rno and connections are supplied
self.set_outreaches()
self.get_slopes(default_slope=default_slope, minimum_slope=minimum_slope,
maximum_slope=maximum_slope)
# have to set the model last, because it also sets up a flopy sfr package instance
self.model = model # attached flopy model instance
# self._ModflowSfr2 = None # attached instance of flopy modflow_sfr2 package object
# MODFLOW-2005 gages will be assigned sequential unit numbers
# starting at gage_starting_unit_number
self.gage_starting_unit_number = self.defaults['gage_starting_unit_number']
@property
def const(self):
const = self.len_const[self._lenuni] * \
self.time_const[self._itmuni]
return const
@property
def _itmuni(self):
"""MODFLOW time units code"""
return itmuni_values[self.model_time_units]
@property
def _lenuni(self):
"""MODFLOW length units code"""
return lenuni_values[self.model_length_units]
@property
def structured(self):
if self.grid is not None:
return self.grid._structured
else:
return True
@property
def model(self):
return self._model
@model.setter
def model(self, model):
self._model = model
# update the sfr package object as well
self.create_modflow_sfr2(model)
@property
def package_name(self):
if self._package_name is None:
if self.model is not None:
self._package_name = self.model.name
else:
self._package_name = 'model'
return self._package_name
@package_name.setter
def package_name(self, package_name):
self._package_name = package_name
@property
def crs(self):
if self._crs is None:
self._crs = self.grid.crs
return self._crs
@property
def crs_units(self):
"""Length units of the coordinate reference system"""
return self.crs.length_units
@property
def segment_routing(self):
if self._segment_routing is None or self._routing_changed():
sd = self.segment_data.groupby('per').get_group(0)
graph = dict(
zip(sd.nseg, sd.outseg))
outlets = set(graph.values()).difference(
set(graph.keys())) # including lakes
graph.update({o: 0 for o in outlets})
self._routing = graph
return self._routing
@property
def rno_routing(self):
if self._rno_routing is None or self._routing_changed():
# enforce valid rnos; and create outreach connections
# from segment data and sequential reach numbering
# (ireach values also checked and fixed if necesseary)
self.set_outreaches()
rd = self.reach_data
graph = dict(zip(rd.rno, rd.outreach))
outlets = set(graph.values()).difference(
set(graph.keys())) # including lakes
graph.update({o: 0 for o in outlets})
self._rno_routing = graph
return self._rno_routing
@property
def modflow_sfr2(self):
"""A `flopy.modflow.mfsfr2.ModflowSfr2` represenation of the sfr dataset."""
if self._ModflowSfr2 is None:
self.create_modflow_sfr2()
return self._ModflowSfr2
@classmethod
def get_empty_reach_data(cls, nreaches=0, default_value=0):
rd = fm.ModflowSfr2.get_empty_reach_data(nreaches,
default_value=default_value)
df = pd.DataFrame(rd)
for c in SFRData.rdcols:
if c not in df.columns:
df[c] = default_value
elif c != 'geometry':
df[c] = df[c].astype(cls.dtypes.get(c, np.float32))
return df[cls.rdcols]
def _setup_reach_data(self, reach_data):
rd = SFRData.get_empty_reach_data(len(reach_data))
reach_data.index = range(len(reach_data))
for c in reach_data.columns:
rd[c] = reach_data[c].astype(SFRData.dtypes.get(c, np.float32))
assert rd[c].dtype == SFRData.dtypes.get(c, np.float32)
# assign kwargs to reach data
for k, v in self.defaults.items():
if k in self.rdcols and k not in reach_data.columns:
rd[k] = v
return rd
@classmethod
def get_empty_segment_data(cls, nsegments=0, default_value=0):
sd = fm.ModflowSfr2.get_empty_segment_data(nsegments,
default_value=default_value)
sd = pd.DataFrame(sd)
sd['per'] = 0
for c in sd.columns:
sd[c] = sd[c].astype(cls.dtypes.get(c, np.float32))
return sd[cls.sdcols]
def _setup_segment_data(self, segment_data):
# if no segment_data was provided
if segment_data is None:
# create segment_data from iseg and ireach columns in reach data
# if
if valid_nsegs(self.reach_data.iseg, increasing=False) and \
self.reach_data.outseg.sum() > 0:
self.reach_data.sort_values(by=['iseg', 'ireach'], inplace=True)
nss = self.reach_data.iseg.max()
sd = SFRData.get_empty_segment_data(nss)
routing = dict(zip(self.reach_data.iseg, self.reach_data.outseg))
sd['nseg'] = range(len(sd))
sd['outseg'] = [routing[s] for s in sd.nseg]
# create segment_data from reach routing (one reach per segment)
else:
has_rno_routing = self._check_reach_routing()
assert has_rno_routing, \
"Reach data must contain rno column with unique, " \
"consecutive reach numbers starting at 1. If no " \
"segment_data are supplied, segment routing must be" \
"included in iseg and outseg columns, or reach data " \
"must contain outreach column with routing connections."
sd = SFRData.get_empty_segment_data(len(self.reach_data))
sd['nseg'] = self.reach_data.rno
sd['outseg'] = self.reach_data.outreach
# transfer supplied segment data to default template
else:
sd = SFRData.get_empty_segment_data(len(segment_data))
if 'per' not in segment_data.columns:
segment_data['per'] = 0
segment_data.sort_values(by=['per', 'nseg'], inplace=True)
segment_data.index = range(len(segment_data))
for c in segment_data.columns:
values = segment_data[c].astype(SFRData.dtypes.get(c, np.float32))
# fill any nan values with 0 (same as empty segment_data;
# for example elevation if it wasn't specified and
# will be sampled from the DEM)
values.fillna(0, inplace=True)
sd[c] = values
# assign defaults to segment data
for k, v in self.defaults.items():
if k in self.sdcols and k not in segment_data.columns:
sd[k] = v
# add outsegs to reach_data
routing = dict(zip(sd.nseg, sd.outseg))
self.reach_data['outseg'] = [routing[s] for s in self.reach_data.iseg]
return sd
@property
def observations(self):
if self._observations is None:
self._observations = pd.DataFrame(columns=['obsname', 'obstype', 'rno', 'iseg', 'ireach'])
return self._observations
@property
def observations_file(self):
if self._observations_filename is None:
if self.model is not None and self.model.version == 'mf6':
self._observations_filename = self.package_name + '.sfr.obs'
else:
self._observations_filename = self.package_name + '.gage'
return self._observations_filename
@observations_file.setter
def observations_file(self, observations_filename):
self._observations_filename = observations_filename
@property
def period_data(self):
if self._period_data is None:
self._period_data = self._get_period_data()
# index by period and rno
if not self._period_data.index.names == ['per', 'rno']:
self._period_data.set_index(['per', 'rno'], inplace=True)
return self._period_data
def _get_period_data(self):
print('converting segment data to period data...')
return segment_data_to_period_data(self.segment_data, self.reach_data)
def add_to_perioddata(self, data, flowline_routing=None,
variable='inflow',
line_id_column=None,
rno_column=None,
period_column='per',
data_column='Q_avg',
one_inflow_per_path=False, distribute_flows_to_reaches=False):
return add_to_perioddata(self, data, flowline_routing=flowline_routing,
variable=variable,
line_id_column=line_id_column,
rno_column=rno_column,
period_column=period_column,
data_column=data_column,
one_inflow_per_path=one_inflow_per_path,
distribute_flows_to_reaches=distribute_flows_to_reaches)
def add_to_segment_data(self, data, flowline_routing,
variable='flow',
line_id_column=None,
segment_column='segment',
period_column='per',
data_column='Q_avg'):
return add_to_segment_data(self, data, flowline_routing,
variable=variable,
line_id_column=line_id_column,
segment_column=segment_column,
period_column=period_column,
data_column=data_column)
@property
def paths(self):
"""Dict listing routing sequence for each segment
in SFR network."""
if self._paths is None:
self._set_paths()
return self._paths
if self._routing_changed():
self._reset_routing()
return self._paths
@property
def reach_paths(self):
"""Dict listing routing sequence for each segment
in SFR network."""
if self._paths is None:
self._set_reach_paths()
return self._reach_paths
if self._routing_changed():
self._reset_routing()
return self._reach_paths
def _set_paths(self):
routing = self.segment_routing
self._paths = {seg: find_path(routing, seg) for seg in routing.keys()}
def _set_reach_paths(self):
routing = self.rno_routing
self._reach_paths = {rno: find_path(routing, rno) for rno in routing.keys()}
def _reset_routing(self):
self.reset_reaches()
self.reset_segments()
self._segment_routing = None
self._set_paths()
self._rno_routing = None
self._set_reach_paths()
def _routing_changed(self):
sd = self.segment_data.groupby('per').get_group(0)
rd = self.reach_data
# check if segment routing in dataframe is consistent with routing dict
segment_routing = dict(zip(sd.nseg, sd.outseg))
segment_routing_changed = segment_routing != self._segment_routing
# check if reach routing in dataframe is consistent with routing dict
reach_routing = dict(zip(rd.rno, rd.outreach))
reach_routing_changed = reach_routing != self._rno_routing
# check if segment and reach routing in dataframe are consistent
consistent = rno_nseg_routing_consistent(sd.nseg, sd.outseg,
rd.iseg, rd.ireach,
rd.rno, rd.outreach)
# return True if the dataframes changed,
# or are inconsistent between segments and reach numbers
return segment_routing_changed & reach_routing_changed & ~consistent
def repair_outsegs(self):
"""Set any outsegs that are not nsegs or lakes to 0 (outlet status)"""
isasegment = np.in1d(self.segment_data.outseg,
self.segment_data.nseg)
isasegment = isasegment | (self.segment_data.outseg < 0)
self.segment_data.loc[~isasegment, 'outseg'] = 0
def reset_segments(self):
"""Reset the segment numbering so that is consecutive,
starts at 1 and only increases downstream."""
r = renumber_segments(self.segment_data.nseg,
self.segment_data.outseg)
self.segment_data['nseg'] = [r[s] for s in self.segment_data.nseg]
self.segment_data['outseg'] = [r[s] for s in self.segment_data.outseg]
self.reach_data['iseg'] = [r[s] for s in self.reach_data.iseg]
self.reach_data['outseg'] = [r[s] for s in self.reach_data.outseg]
self.segment_data.sort_values(by=['per', 'nseg'], inplace=True)
self.segment_data.index = np.arange(len(self.segment_data))
assert np.array_equal(self.segment_data.loc[self.segment_data.per == 0, 'nseg'].values,
self.segment_data.loc[self.segment_data.per == 0].index.values + 1)
self.reach_data.sort_values(by=['iseg', 'ireach'], inplace=True)
def reset_reaches(self):
"""Ensure that the reaches in each segment are numbered
consecutively starting at 1."""
self.reach_data.sort_values(by=['iseg', 'ireach'], inplace=True)
reach_data = self.reach_data
segment_data = self.segment_data.groupby('per').get_group(0)
reach_counts = np.bincount(reach_data.iseg)[1:]
reach_counts = dict(zip(range(1, len(reach_counts) + 1),
reach_counts))
ireach = [list(range(1, reach_counts[s] + 1))
for s in segment_data.nseg]
ireach = np.concatenate(ireach)
try:
self.reach_data['ireach'] = ireach
except:
j=2
def set_outreaches(self):
"""Determine the outreach for each SFR reach (requires a rno column in reach_data).
Uses the segment routing specified for the first stress period to route reaches between segments.
"""
self.reach_data.sort_values(by=['iseg', 'ireach'], inplace=True)
self.segment_data.sort_values(by=['per', 'nseg'], inplace=True)
if not self._valid_rnos():
self.reach_data['rno'] = np.arange(1, len(self.reach_data) + 1)
self.reset_reaches() # ensure that each segment starts with reach 1
self.repair_outsegs() # ensure that all outsegs are segments, outlets, or negative (lakes)
rd = self.reach_data
outseg = self.segment_routing
reach1IDs = dict(zip(rd[rd.ireach == 1].iseg,
rd[rd.ireach == 1].rno))
ireach = rd.ireach.values
iseg = rd.iseg.values
rno = rd.rno.values
outreach = []
for i in range(len(rd)):
# if at the end of reach data or current segment
if i + 1 == len(rd) or ireach[i + 1] == 1:
nextseg = outseg[iseg[i]] # get next segment
if nextseg > 0: # current reach is not an outlet
nextrchid = reach1IDs[nextseg] # get reach 1 of next segment
else:
nextrchid = 0
else: # otherwise, it's the next rno
nextrchid = rno[i + 1]
outreach.append(nextrchid)
self.reach_data['outreach'] = outreach
def _valid_rnos(self):
incols = 'rno' in self.reach_data.columns
arevalid = valid_rnos(self.reach_data.rno.tolist())
return incols & arevalid
def _check_reach_routing(self):
"""Cursory check of reach routing."""
valid_rnos = self._valid_rnos()
non_zero_outreaches = 'outreach' in self.reach_data.columns & \
self.reach_data.outreach.sum() > 0
return valid_rnos & non_zero_outreaches
def _valid_nsegs(self, increasing=True):
sd0 = self.segment_data.loc[self.segment_data.per == 0]
return valid_nsegs(sd0.nseg,
sd0.outseg,
increasing=increasing)
def assign_layers(self, adjusted_botm_output_path='.'):
"""Assign model layers to SFR reaches, using the discretzation
package in the attached model. New botm elevations for the model
will be written to a text array file, if any streambed bottoms
are below the model bottom.
Parameters
----------
adjusted_botm_output_path : str
Path for writing the text array of adjusted model bottom
elevations, by default, '.'
"""
if self.model is not None and hasattr(self.model, 'dis'):
botm = self.model.dis.botm.array.copy()
idomain = None
if self.model.version == 'mf6':
idomain = self.model.dis.idomain.array.copy()
else:
bas6 = getattr(self.model, 'bas6')
if bas6 is not None:
idomain = bas6.ibound.array
nlay = botm.shape[0] + 1
layers, new_botm = assign_layers(self.reach_data, botm_array=botm, idomain=idomain)
self.reach_data['k'] = layers
if new_botm is not None:
outfiles = []
if len(new_botm.shape) == 2:
write_layers = [nlay - 1]
else:
write_layers = list(range(nlay))
outpath = Path(adjusted_botm_output_path)
for k in write_layers:
outfile = outpath / f'{self.package_name}_layer_{nlay}_new_botm_elevations.dat'
np.savetxt(outfile, new_botm[-1], fmt='%.2f')
outfiles.append(outfile)
msg = ('Sfrmaker pushed some model botm elevations downward'
'to accomodate streambed bottoms. New botm elevations'
'written to:\n')
for f in outfiles:
msg += f'{f}\n'
print(msg)
else:
print('Need a model instance with a discretization package to assign layers. '
'A model can be assigned to the SFRData.model attribute.')
def create_modflow_sfr2(self, model=None, const=None,
isfropt=1, # override flopy default of 0
unit_number=None,
ipakcb=None, istcb2=None,
**kwargs
):
if const is None:
const = self.const
if model is not None and model.version != 'mf6':
m = model
# create an flopy mf2005 model instance attached to modflow_sfr2 object
# this is a parallel model instance to self.model, that is only
# accessible through modflow_sfr2.parent. As long as this method is
# called by the @model.setter; this instance should have the same
# dis and ibound (idomain) as self.model.
# The modflow_sfr2 instance is used as basis for writing packages,
# because it includes many features, like checking and exporting,
# that ModflowGwfsfr doesn't have)
# ibound in BAS package is used by mf5to6 converter
# to fill in required "None" values when writing mf6 input
elif model is None or model.version == 'mf6':
model_ws = '.' if model is None else model.model_ws
m = fm.Modflow(modelname=self.package_name, model_ws=model_ws,
structured=self.structured)
if model is not None:
nper = 1
if 'tdis' in model.simulation.package_key_dict.keys():
tdis = model.simulation.package_key_dict['tdis']
nper = tdis.nper.array
if 'dis' in model.package_dict.keys():
dis = model.dis
botm = dis.botm.array.copy()
idomain = dis.idomain.array.copy()
nlay, nrow, ncol = botm.shape
dis = fm.ModflowDis(m, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper,
top=dis.top.array.copy(),
botm=botm)
bas = fm.ModflowBas(m, ibound=idomain)
# translate segment data
# populate MODFLOW 2005 segment variables from reach data if they weren't entered
if self.segment_data[['width1', 'width2']].sum().sum() == 0:
raise NotImplementedError('Double check indexing below before using this option.')
width1 = self.reach_data.groupby('iseg')['width'].min().to_dict()
width2 = self.reach_data.groupby('iseg')['width'].max().to_dict()
self.segment_data['width1'] = [width1[s] for s in self.segment_data.nseg]
self.segment_data['width2'] = [width2[s] for s in self.segment_data.nseg]
assert not np.any(np.isnan(self.segment_data))
# create record array for each stress period
sd = self.segment_data.groupby('per')
sd = {per: sd.get_group(per).drop('per', axis=1).to_records(index=False)
for per in self.segment_data.per.unique()}
# translate reach data
flopy_cols = fm.ModflowSfr2. \
get_default_reach_dtype(structured=self.structured).names
columns_not_in_flopy = set(self.reach_data.columns).difference(set(flopy_cols))
rd = self.reach_data.drop(columns_not_in_flopy, axis=1).copy()
rd = rd.to_records(index=False)
nstrm = -len(rd)
self._ModflowSfr2 = fm.ModflowSfr2(model=m, nstrm=nstrm, const=const,
reach_data=rd, segment_data=sd,
isfropt=isfropt, unit_number=unit_number,
ipakcb=ipakcb, istcb2=istcb2,
**kwargs)
# if model.version == 'mf6':
# self._ModflowSfr2.parent = model
return self._ModflowSfr2
def create_mf6sfr(self, model=None, unit_conversion=None,
stage_filerecord=None,
budget_filerecord=None,
flopy_rno_input_is_zero_based=True,
**kwargs
):
if unit_conversion is None:
unit_conversion = self.const
if stage_filerecord is None:
stage_filerecord = '{}.sfr.stage.bin'.format(self.package_name)
if budget_filerecord is None:
budget_filerecord = '{}.sfr.cbc'.format(self.package_name)
if model is not None and model.version == 'mf6':
m = model
if 'tdis' in model.simulation.package_key_dict.keys():
tdis = model.simulation.package_key_dict['tdis']
nper = tdis.nper.array
# create an flopy mf2005 model instance attached to modflow_sfr2 object
# this is a parallel model instance to self.model, that is only
# accessible through modflow_sfr2.parent. As long as this method is
# called by the @model.setter; this instance should have the same
# dis and ibound (idomain) as self.model.
# The modflow_sfr2 instance is used as basis for writing packages,
# because it includes many features, like checking and exporting,
# that ModflowGwfsfr doesn't have)
# ibound in BAS package is used by mf5to6 converter
# to fill in required "None" values when writing mf6 input
elif model is None or model.version != 'mf6':
# create simulation
sim = flopy.mf6.MFSimulation(version='mf6', exe_name='mf6',
sim_ws='')
m = flopy.mf6.ModflowGwf(sim)
# create an sfrmaker.Mf6SFR instance
from .mf5to6 import Mf6SFR
sfr6 = Mf6SFR(self.modflow_sfr2)
# package data
# An error occurred when storing data "packagedata" in a recarray.
# packagedata data is a one or two dimensional list containing the variables
# "<rno> <cellid> <rlen> <rwid> <rgrd> <rtp> <rbth> <rhk> <man> <ncon> <ustrf> <ndv>"
# (some variables may be optional, see MF6 documentation)
packagedata = sfr6.packagedata.copy()
if self.structured:
columns = packagedata.drop(['k', 'i', 'j', 'idomain'], axis=1).columns.tolist()
packagedata['cellid'] = list(zip(packagedata.k,
packagedata.i,
packagedata.j))
columns.insert(1, 'cellid')
connectiondata = [(rno, *sfr6.connections[rno])
for rno in sfr6.packagedata.rno if rno in sfr6.connections]
# as of 9/12/2019, flopy.mf6.modflow.ModflowGwfsfr requires zero-based input for rno
if flopy_rno_input_is_zero_based and self.modflow_sfr2.reach_data['reachID'].min() == 1:
packagedata['rno'] -= 1
zero_based_connectiondata = []
for record in connectiondata:
zero_based_record = []
for rno in record:
if rno is not None:
if rno > 0:
rno -= 1
elif rno < 0:
rno += 1
zero_based_record.append(rno)
#zero_based_record = tuple(i - 1 if i > 0 else i + 1 for i in record)
if len(zero_based_record) == 1:
zero_based_record.append(None)
zero_based_connectiondata.append(zero_based_record)
connectiondata = zero_based_connectiondata
assert packagedata['rno'].min() == 0
#assert np.min(list(map(np.min, map(np.abs, connectiondata)))) < 1
# set cellids to None for unconnected reaches or where idomain == 0
# can only do this with flopy versions 3.3.1 and later, otherwise flopy will bomb
if version.parse(flopy.__version__) > version.parse('3.3.0'):
unconnected = ~packagedata['rno'].isin(np.array(list(sfr6.connections.keys())) - 1).values
inactive = m.dis.idomain.array[packagedata.k.values,
packagedata.i.values,
packagedata.j.values] != 1
packagedata.loc[unconnected | inactive, 'cellid'] = 'none'
packagedata = packagedata[columns].values.tolist()
period_data = None
if sfr6.period_data is not None:
# TODO: add method to convert period_data df to MF6 input
# raise NotImplemented("Support for mf6 period_data input not implemented yet. "
# "Use sfrdata.write_package(version='mf6') instead.")
pass
mf6sfr = mf6.ModflowGwfsfr(model=m, unit_conversion=unit_conversion,
stage_filerecord=stage_filerecord,
budget_filerecord=budget_filerecord,
nreaches=len(self.reach_data),
packagedata=packagedata,
connectiondata=connectiondata,
diversions=None, # TODO: add support for diversions
perioddata=period_data, # TODO: add support for creating mf6 perioddata input
**kwargs)
return mf6sfr
def add_observations(self, data, flowline_routing=None,
obstype=None, sfrlines_shapefile=None,
x_location_column=None,
y_location_column=None,
line_id_column=None,
rno_column=None,
obstype_column=None,
obsname_column='site_no',
gage_starting_unit_number=250):
self.gage_starting_unit_number = gage_starting_unit_number
added_obs = add_observations(self, data, flowline_routing=flowline_routing,
obstype=obstype, sfrlines_shapefile=sfrlines_shapefile,
x_location_column=x_location_column,
y_location_column=y_location_column,
line_id_column=line_id_column,
rno_column=rno_column,
obstype_column=obstype_column,
obsname_column=obsname_column)
# replace any observations that area already in the observations table
if isinstance(self._observations, pd.DataFrame):
existing_obs = set(zip(self._observations['obsname'], self._observations['obstype']))
new_obs = set(zip(added_obs['obsname'], added_obs['obstype']))
exists_already = {obs[0] for obs in existing_obs.intersection(new_obs)}
exists_already = self._observations['obsname'].isin(exists_already)
self._observations = self._observations.loc[~exists_already]
self._observations = self.observations.append(added_obs).reset_index(drop=True)
for df in self._observations, added_obs:
# enforce dtypes (pandas doesn't allow an empty dataframe
# to be initialized with more than one specified dtype)
for col in ['rno', 'iseg', 'ireach']:
df[col] = df[col].astype(int)
return added_obs
def interpolate_to_reaches(self, segvar1, segvar2, per=0):
"""Interpolate values in datasets 6b and 6c to each reach in stream segment
Parameters
----------
segvar1 : str
Column/variable name in segment_data array for representing start of segment
(e.g. hcond1 for hydraulic conductivity)
For segments with icalc=2 (specified channel geometry); if width1 is given,
the eigth distance point (XCPT8) from dataset 6d will be used as the stream width.
For icalc=3, an abitrary width of 5 is assigned.
For icalc=4, the mean value for width given in item 6e is used.
segvar2 : str
Column/variable name in segment_data array for representing start of segment
(e.g. hcond2 for hydraulic conductivity)
per : int
Stress period with segment data to interpolate
Returns
-------
reach_values : 1D array
One dimmensional array of interpolated values of same length as reach_data array.
For example, hcond1 and hcond2 could be entered as inputs to get values for the
strhc1 (hydraulic conductivity) column in reach_data.
"""
from sfrmaker.reaches import interpolate_to_reaches
reach_data = self.reach_data
segment_data = self.segment_data.groupby('per').get_group(per)
segment_data.sort_values(by='nseg', inplace=True)
reach_data.sort_values(by=['iseg', 'ireach'], inplace=True)
return interpolate_to_reaches(reach_data, segment_data,
segvar1, segvar2,
reach_data_group_col='iseg',
segment_data_group_col='nseg'
)
def isfropt0_to_1(self):
"""transfer isfropt=0 segment properties to reaches,
using linear interpolation.
"""
snames = {'strtop': ('elevup', 'elevdn'),
'strthick': ('thickm1', 'thickm2'),
'strhc1': ('hcond1', 'hcond2'),
'thts': ('thts1', 'thts2'),
'thti': ('thti1', 'thti2'),
'eps': ('eps1', 'eps2'),
'uhc': ('uhc1', 'uhc2'),
}
sd = self.segment_data.loc[self.segment_data.per == 0]
for col, sdcols in snames.items():
if self.reach_data[col].sum() == 0 and \
sd[[*sdcols]].values.sum(axis=(0, 1)) != 0.:
self.reach_data[col] = self.interpolate_to_reaches(*sdcols)
def sample_reach_elevations(self, dem,
method='buffers',
buffer_distance=100,
smooth=True
):
"""Computes zonal statistics on a raster for SFR reaches, using
either buffer polygons around the reach LineStrings, or the model
grid cell polygons containing each reach.
Parameters
----------
dem : path to valid raster dataset
Must be in same Coordinate Reference System as model grid.
method : str; 'buffers' or 'cell polygons'
If 'buffers', buffers (with flat caps; cap_style=2 in LineString.buffer())
will be created around the reach LineStrings (geometry column in reach_data).
buffer_distance : float
Buffer distance to apply around reach LineStrings, in crs_units.
smooth : bool
Run sfrmaker.elevations.smooth_elevations on sampled elevations
to ensure that they decrease monotonically in the downstream direction
(default=True).
Returns
-------
elevs : dict of sampled elevations keyed by reach number
"""
# get the CRS and pixel size for the DEM
with rasterio.open(dem) as src:
raster_crs = get_authority_crs(src.crs)
# make sure buffer is large enough for DEM pixel size
buffer_distance = np.max([np.sqrt(src.res[0] *
src.res[1]) * 1.01,
buffer_distance])
if method == 'buffers':
assert isinstance(self.reach_data.geometry[0], LineString), \
"Need LineString geometries in reach_data.geometry column to use buffer option."
features = [g.buffer(buffer_distance) for g in self.reach_data.geometry]
txt = 'buffered LineStrings'
elif method == 'cell polygons':
assert self.grid is not None, \
"Need an attached sfrmaker.Grid instance to use cell polygons option."
features = self.grid.df.loc[self.reach_data.node, 'geometry'].tolist()
txt = method
# to_crs features if they're not in the same crs
if raster_crs != self.crs:
features = project(features,
self.crs,
raster_crs)
print('running rasterstats.zonal_stats on {}...'.format(txt))
t0 = time.time()
results = zonal_stats(features,
dem,
stats='min')
elevs = [r['min'] for r in results]
print("finished in {:.2f}s\n".format(time.time() - t0))
if all(v is None for v in elevs):
raise Exception('No {} intersected with {}. Check projections.'.format(txt, dem))
if any(v is None for v in elevs):
raise Exception('Some {} not intersected with {}. '
'Check that DEM covers the area of the stream network.'
'.'.format(txt, dem))
if smooth:
elevs = smooth_elevations(self.reach_data.rno.tolist(),
self.reach_data.outreach.tolist(),
elevs)
else:
elevs = dict(zip(self.reach_data.rno, elevs))
return elevs
def set_streambed_top_elevations_from_dem(self, filename, elevation_units=None,
dem=None, dem_z_units=None,
method='buffers',
**kwargs):
"""Set streambed top elevations from a DEM raster.
Runs sfrdata.sample_reach_elevations
Parameters
----------
filename : path to valid raster dataset
Must be in same Coordinate Reference System as model grid.
elevation_units : str
Elevation units for DEM ('feet' or 'meters'). If None, units
are assumed to be same as model (default).
method : str; 'buffers' or 'cell polygons'
If 'buffers', buffers (with flat caps; cap_style=2 in LineString.buffer())
will be created around the reach LineStrings (geometry column in reach_data).
kwargs : keyword arguments to sfrdata.sample_reach_elevations
Returns
-------
updates strtop column of sfrdata.reach_data
"""
if dem is not None:
warnings.warn('set_streambed_top_elevations_from_dem: dem argument is deprecated. '
'Use filename instead.', DeprecationWarning)
filename = dem
if dem_z_units is not None:
warnings.warn('set_streambed_top_elevations_from_dem: dem_z_units argument is deprecated. '
'Use elevation_units instead.', DeprecationWarning)
elevation_units = dem_z_units
sampled_elevs = self.sample_reach_elevations(dem=filename, method=method,
**kwargs)
if elevation_units is None:
elevation_units = self.model_length_units
mult = convert_length_units(elevation_units, self.model_length_units)
self.reach_data['strtop'] = [sampled_elevs[rno]
for rno in self.reach_data['rno'].values]
self.reach_data['strtop'] *= mult
def get_slopes(self, default_slope=0.001, minimum_slope=0.0001,
maximum_slope=1.):
"""Compute slopes by reach using values in strtop (streambed top) and rchlen (reach length)
columns of reach_data. The slope for a reach n is computed as strtop(n+1) - strtop(n) / rchlen(n).
Slopes for outlet reaches are set equal to a default value (default_slope).
Populates the slope column in reach_data.
Parameters
----------
default_slope : float
Slope value applied to outlet reaches (where water leaves the model).
Default value is 0.001
minimum_slope : float
Assigned to reaches with computed slopes less than this value.
This ensures that the Manning's equation won't produce unreasonable values of stage
(in other words, that stage is consistent with assumption that
streamflow is primarily drive by the streambed gradient).
Default value is 0.0001.
maximum_slope : float
Assigned to reaches with computed slopes more than this value.
Default value is 1.
"""
rd = self.reach_data
assert rd.outreach.sum() > 0, "requires reach routing, must be called after set_outreaches()"
elev = dict(zip(rd.rno, rd.strtop))
dist = dict(zip(rd.rno, rd.rchlen))
dnelev = {rid: elev[rd.outreach.values[i]] if rd.outreach.values[i] != 0
else -9999 for i, rid in enumerate(rd.rno)}
slopes = np.array(
[(elev[i] - dnelev[i]) / dist[i] if dnelev[i] != -9999 and dist[i] > 0
else default_slope for i in rd.rno])
slopes[slopes < minimum_slope] = minimum_slope
slopes[slopes > maximum_slope] = maximum_slope
self.reach_data['slope'] = slopes
@classmethod
def from_package(cls, sfrpackagefile, grid, namefile=None,
sim_name=None, model_ws='.',
version=None, model_name='model', package_name=None,
linework=None):
"""Read SFR package file
Parameters
----------
sfrpackagefile : file path
Modflow-2005 or MODFLOW6 SFR package
grid : sfrmaker.grid instance
linework : shapefile path or DataFrame
Contains linestrings for each reach; must have
segment and reach, or reach number (rno in MODFLOW 6)
information.
Returns
-------
sfrdata : sfrmaker.sfrdata instance
"""
# todo: finish SFRData.from_package
raise NotImplementedError('SFRData.from_package not implemented yet.')
if version is None:
version = get_sfr_package_format(sfrpackagefile)
if package_name is None:
package_name, _ = os.path.splitext(sfrpackagefile)
# load the model and SFR package
if namefile is not None:
model_ws, namefile = os.path.split(namefile)
m = fm.Modflow.load(namefile, model_ws=model_ws, load_only=['SFR'])
elif sim_name is not None:
sim = flopy.mf6.MFSimulation.load(sim_name, 'mf6', 'mf6', model_ws)
m = sim.get_model(model_name)
else:
if version != 'mf6':
m = fm.Modflow(model_ws=model_ws)
sfr = fm.ModflowSfr2.load(sfrpackagefile, model=m)
else:
sim = flopy.mf6.MFSimulation(sim_ws=model_ws)
m = flopy.mf6.ModflowGwf(sim, modelname=model_name,
model_nam_file='{}.nam'.format(model_name))
sfr = mf6.ModflowGwfsfr.load(sfrpackagefile, model=m)
if m.version != 'mf6':
reach_data = pd.DataFrame(m.sfr.reach_data)
perioddata_list = []
for per, spd in m.sfr.segment_data.items():
if spd is not None:
spd = spd.copy()
spd['per'] = per
perioddata_list.append(spd)
segment_data = pd.concat(perioddata_list)
else:
pass
return cls(reach_data=reach_data, segment_data=segment_data,
model=m,
grid=grid)
@classmethod
def from_tables(cls, reach_data, segment_data,
grid=None, isfr=None):
reach_data = pd.read_csv(reach_data)
segment_data = | pd.read_csv(segment_data) | pandas.read_csv |
import warnings
from pathlib import Path
from typing import Union
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import warnings
from scipy.stats import linregress
from thoipapy.utils import normalise_0_1, make_sure_path_exists
warnings.filterwarnings("ignore")
def save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict, logging, AUC_ser, plot_o_over_r=False):
df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
BO_scatter_png = str(BO_barchart_png)[:-12] + "scatter.png"
#######################################################################################################
# #
# Create a dataframe with AUBOC and AUC for individual protein (df_valid_indiv) #
# #
#######################################################################################################
# load AUBOC values as a series
mean_o_minus_r_by_sample_ser = pd.read_excel(bocurve_data_xlsx, sheet_name="mean_o_minus_r_by_sample", index_col=0)["mean_o_minus_r_by_sample"]
# select sample sizes 5 and 10
df_valid_indiv = df_o_minus_r.loc[[5, 10], :].T.copy()
df_valid_indiv["AUBOC"] = mean_o_minus_r_by_sample_ser
df_valid_indiv["ROC AUC"] = AUC_ser
df_valid_indiv.sort_values("AUBOC", axis=0, ascending=False, inplace=True)
""" df_valid_indiv should now have the results from BO curve and ROC for each protein
AUBOC sample size 5 sample size 10 ROC AUC
3ij4_A-crystal 17.456522 1.913043 1.652174 0.714286
4wit_A-crystal 16.620000 2.000000 2.000000 0.622807
Q08345-ETRA 16.571429 2.809524 2.238095 0.842593
P04626-ETRA 16.456522 1.913043 1.652174 0.916667
P25189-ETRA 14.634615 2.038462 2.153846 0.812500
"""
#######################################################################################################
# #
# plot correlation between AUBOC and ROC #
# #
#######################################################################################################
# BO_barchart_png
plt.close("all")
# plt.rcParams.update({'font.size': 8})
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
# df_valid_indiv_scatter = df_valid_indiv[["AUBOC", "ROC AUC"]]
df_valid_indiv.plot(kind="scatter", ax=ax, x="AUBOC", y="ROC AUC", alpha=0.7)
# calculate linear regression for fitted line
slope, intercept, r_value, p_value, std_err = linregress(df_valid_indiv["AUBOC"], df_valid_indiv["ROC AUC"])
# fit_fn = np.poly1d(linear_regression)
# slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
x_first_last_dp = np.array([df_valid_indiv["AUBOC"].min(), df_valid_indiv["AUBOC"].max()])
y_fitted = x_first_last_dp * slope + intercept
ax.plot(x_first_last_dp, y_fitted, label="$R^2$ : {:.2f}".format(r_value ** 2))
ax.set_xlabel("AUBOC")
ax.set_ylabel("ROC AUC")
ax.legend()
fig.tight_layout()
ax.grid(False)
# BO_barchart_png = os.path.join(BO_curve_folder, "AUBOC_barchart.png")
fig.savefig(BO_scatter_png, dpi=240)
# simply normalise all between 0 and 1
for col in df_valid_indiv.columns:
df_valid_indiv[col] = normalise_0_1(df_valid_indiv[col])[0] + 0.01
bocurve_data_xlsx: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/data/{s['setname']}_thoipa_loo_bo_curve_data.xlsx"
BO_data_valid_indiv_csv: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/data/{s['setname']}_BO_curve_data_valid_indiv.csv"
make_sure_path_exists(bocurve_data_xlsx, isfile=True)
df_valid_indiv = df_valid_indiv.reindex(columns=["AUBOC", 5, 10, "ROC AUC"])
df_valid_indiv.columns = ["AUBOC", "sample size 5", "sample size 10", "ROC AUC"]
df_valid_indiv.to_csv(BO_data_valid_indiv_csv)
""" df_valid_indiv is now normalised within each column, and sorted by AUBOC
AUBOC sample size 5 sample size 10 ROC AUC
3ij4_A-crystal 1.010000 0.789166 0.727758 0.724139
4wit_A-crystal 0.980317 0.810587 0.793133 0.594927
DDR1 [Q08345-ETRA] 0.978593 1.010000 0.837883 0.905371
ErbB2 [P04626-ETRA] 0.974516 0.789166 0.727758 1.010000
MPZ [P25189-ETRA] 0.909867 0.820061 0.822048 0.862866
"""
#######################################################################################################
# #
# plot barchart #
# #
#######################################################################################################
# BO_barchart_png
plt.close("all")
# plt.rcParams.update({'font.size': 8})
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
# replace the protein names
df_valid_indiv.index = pd.Series(df_valid_indiv.index).replace(namedict)
df_valid_indiv.plot(kind="bar", ax=ax, alpha=0.7)
ax.set_ylabel("performance value\n(observed overlap - random overlap)")
ax.legend() # (["sample size = 5", "sample size = 10"])
fig.tight_layout()
ax.grid(False)
fig.savefig(BO_barchart_png, dpi=240)
#######################################################################################################
# #
# plot linechart (combined data all proteins #
# #
#######################################################################################################
if plot_o_over_r:
df_o_over_r = | pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_over_r", index_col=0) | pandas.read_excel |
r"""
:mod:`util.time` -- Time utilities
==================================
Common time methods.
"""
# Mandatory imports
from pandas import to_datetime as pandas_to_datetime
from pandas.tseries.offsets import DateOffset
from dateparser import parse
__all__ = ['to_datetime', 'set_time_range']
def to_datetime(time):
r"""Extends :meth:`pandas.to_datetime` with some more date time format
conversions.
Parameters
----------
time : mixed
A string or various datetime object.
Returns
-------
time : :class:`pandas.Timestamp`
Pandas datetime object.
"""
if time is None:
return
if isinstance(time, object) and hasattr(time, 'datetime'):
time = time.datetime
elif isinstance(time, str):
time = parse(time)
return pandas_to_datetime(time)
def set_time_range(start, end, implicit: bool = True, next_day: bool = True):
"""Set a time range based on various types.
Parameters:
-----------
start : various
Set the start time. The type should be any of: `str`,
:class:`datetime.datetime`, :class:`pandas.Timestamp`,
:class:`numpy.datetime64` or :class:`obspy.UTCDateTime`.
end : various
Set the end time. Same formats as for ``start`` can be used to
set the explicit time.
If ``implicit`` is `True` the type can also be `int` or `float`
such that ``end`` defines the duration.
implicit : `bool`, optional
Allow to set the time implicitely when `True` (default):
- Use current time when start time is `None`.
- Set start time to midnight and the end time to the next day when
end time is `None` and ``next_day`` is `True`. If ``next_day`` is
`False` end time becomes the last second of the day.
- Use ``end`` as duration when type is either `int` or `float`.
Negative duration will make ``start`` the end time.
next_day : `bool`, optional
If `True` (default), set the end time to the begin of the next day if
``implicit`` is `True` and end time is `None`. If `False`, end time
becomes the last second of the day.
Returns
-------
period : tuple
A tuple with (start, end), both of type :class:`obspy.UTCDateTime`.
"""
if not implicit and (start is None or end is None):
raise ValueError('Start and end time should be defined!')
start = to_datetime(start) or to_datetime('now')
if implicit:
if not end:
start = start + DateOffset(days=0, normalize=True)
end = start + DateOffset(days=1)
if not next_day:
end = end - | DateOffset(seconds=1) | pandas.tseries.offsets.DateOffset |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import pandas as pd
import numpy as np
from tests.fixtures import DataTestCase
import mock
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
class RelevantFeatureAugmenterTestCase(DataTestCase):
def setUp(self):
self.test_df = self.create_test_data_sample()
fc_parameters = {"length": None}
self.kind_to_fc_parameters = {"a": fc_parameters.copy(),
"b": fc_parameters.copy()}
def test_not_fitted(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
self.assertRaises(RuntimeError, augmenter.transform, X)
def test_no_timeseries(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
y = pd.Series()
self.assertRaises(RuntimeError, augmenter.fit, X, y)
def test_nothing_relevant(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
column_value="val", column_id="id", column_sort="sort",
column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = pd.DataFrame(index=[10, 500])
augmenter.set_timeseries_container(self.test_df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
self.assertEqual(list(transformed_X.columns), [])
self.assertEqual(list(transformed_X.index), list(X.index))
def test_evaluate_only_added_features_true(self):
"""
The boolean flag `evaluate_only_extracted_features` makes sure that only the time series based features are
filtered. This unit tests checks that
"""
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=True,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = | pd.DataFrame(index=[10, 500]) | pandas.DataFrame |
import click
import cooler
from hicmatrix import HiCMatrix as hm
import pandas as pd
from hicprediction.createTrainingSet import maskFunc
import numpy as np
from hicprediction.predict import getCorrelation
from hicprediction.utilities import getResultFileColumnNames
import sklearn.metrics as metrics
@click.option('-i1','--infile1',required=True,
type=click.Path(exists=True,dir_okay=False,readable=True),
help="Cooler matrix 1, 'Prediction', should be adjusted to only one chromosome")
@click.option('-i2','--infile2',required=True,
type=click.Path(exists=True,dir_okay=False,readable=True),
help="Cooler matrix 2, 'Target', should be adjusted to only one chromsome")
@click.option('-ws','--windowsize', type=click.IntRange(min=1), default=1e6,
required=False, help="window size in basepairs")
@click.option('-o','--outfile',required=True,
type=click.Path(file_okay=True,writable=True),
help="path and filename for outfile")
@click.option('-pct','--predictionCellType',type=str, default="unknown", help="Cell type for prediction")
@click.option('-mct','--modelCellType', type=str, default="unknown", help="Cell type(s) of model")
@click.option('-mchr', '--modelChromosome', type=str, default="unknown", help="Chromosome used for training, e.g. chr1")
@click.command()
def computeMetrics(infile1,infile2,windowsize,outfile, predictioncelltype, modelcelltype, modelchromosome):
#try loading HiC matrices
try:
hicMatrix1 = hm.hiCMatrix(infile1)
hicMatrix2 = hm.hiCMatrix(infile2)
except Exception as e:
print(e)
msg = "Could not load matrices, probably no cooler format"
raise SystemExit(msg)
#check bin sizes, must be equal / same matrix resolution
binSize1 = hicMatrix1.getBinSize()
binSize2 = hicMatrix2.getBinSize()
if binSize1 != binSize2:
msg = "Aborting. Bin sizes not equal.\n"
msg += "Bin size 1: {0:d}, bin size 2: {0:d}"
msg = msg.format(binSize1, binSize2)
raise SystemExit(msg)
numberOfDiagonals = int(np.round(windowsize/binSize1))
if numberOfDiagonals < 1:
msg = "Window size must be larger than bin size of matrices.\n"
msg += "Remember to specify window in basepairs, not bins."
raise SystemExit(msg)
#check chromosomes
chromList1 = hicMatrix1.getChrNames()
chromList2 = hicMatrix2.getChrNames()
if chromList1 and chromList2:
chrom1Str = str(chromList1[0])
chrom2Str = str(chromList2[0])
if chrom1Str != chrom2Str:
msg = "Aborting, chromosomes are not the same: {:s} vs. {:s}"
msg = msg.format(chrom1Str, chrom2Str)
raise SystemExit(msg)
if len(chromList1) != 1 or len(chromList2) != 1:
msg = "Warning, more than one chromosome in the matrix\n"
msg += "Consider using e.g. hicAdjustMatrix with --keep on the desired chromosome.\n"
msg += "Only taking the first chrom, {:s}"
msg = msg.format(chrom1Str)
else:
msg = "Aborting, no chromosomes found in matrix"
raise SystemExit(msg)
sparseMatrix1 = hicMatrix1.matrix
sparseMatrix2 = hicMatrix2.matrix
shape1 = sparseMatrix1.shape
shape2 = sparseMatrix2.shape
if shape1 != shape2:
msg = "Aborting. Shapes of matrices are not equal.\n"
msg += "Shape 1: ({:d},{:d}); Shape 2: ({:d},{:d})"
msg = msg.format(shape1[0],shape1[1],shape2[0],shape2[1])
raise SystemExit(msg)
if numberOfDiagonals > shape1[0]-1:
msg = "Aborting. Window size {0:d} larger than matrix size {:d}"
msg = msg.format(numberOfDiagonals, shape1[0]-1)
raise SystemExit(msg)
trapezIndices = np.mask_indices(shape1[0],maskFunc,k=numberOfDiagonals)
reads1 = np.array(sparseMatrix1[trapezIndices])[0]
reads2 = np.array(sparseMatrix2[trapezIndices])[0]
matrixDf = pd.DataFrame(columns=['first','second','distance','reads1','reads2'])
matrixDf['first'] = np.uint32(trapezIndices[0])
matrixDf['second'] = np.uint32(trapezIndices[1])
matrixDf['distance'] = np.uint32(matrixDf['second'] - matrixDf['first'])
matrixDf['reads1'] = np.float32(reads1)
matrixDf['reads2'] = np.float32(reads2)
matrixDf.fillna(0, inplace=True)
pearsonAucIndices, pearsonAucValues = getCorrelation(matrixDf,'distance', 'reads1', 'reads2', 'pearson')
pearsonAucScore = metrics.auc(pearsonAucIndices, pearsonAucValues)
spearmanAucIncides, spearmanAucValues = getCorrelation(matrixDf,'distance', 'reads1', 'reads2', 'spearman')
spearmanAucScore = metrics.auc(spearmanAucIncides, spearmanAucValues)
corrScoreOPredicted_Pearson = matrixDf[['reads1','reads2']].corr(method= \
'pearson').iloc[0::2,-1].values[0]
corrScoreOPredicted_Spearman= matrixDf[['reads1', 'reads2']].corr(method= \
'spearman').iloc[0::2,-1].values[0]
print("PearsonAUC", pearsonAucScore)
print("SpearmanAUC", spearmanAucScore)
columns = getResultFileColumnNames(sorted(list(matrixDf.distance.unique())))
resultsDf = | pd.DataFrame(columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# index_col ignored
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True, index_col=0)
tm.assert_numpy_array_equal(out, expected)
# respects names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = '1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# header order is respected even though it conflicts
# with the natural ordering of the column names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'b,a\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('b', '=i8'), ('a', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# overrides the squeeze parameter
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a\n1'
expected = np.array([(1,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True)
tm.assert_numpy_array_equal(out, expected)
# does data conversions before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
conv = lambda x: int(x) + 1
expected = np.array([(2, 'a'), (3, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True,
converters={'a': conv})
tm.assert_numpy_array_equal(out, expected)
# filters by usecols before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1,), (2,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True,
usecols=['a'])
tm.assert_numpy_array_equal(out, expected)
def test_memory_map(self):
mmap_file = os.path.join(self.dirpath, 'test_mmap.csv')
expected = DataFrame({
'a': [1, 2, 3],
'b': ['one', 'two', 'three'],
'c': ['I', 'II', 'III']
})
out = self.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(out, expected)
def test_null_byte_char(self):
# see gh-2741
data = '\x00,foo'
cols = ['a', 'b']
expected = DataFrame([[np.nan, 'foo']],
columns=cols)
if self.engine == 'c':
out = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with | tm.assert_raises_regex(ParserError, msg) | pandas.util.testing.assert_raises_regex |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
| assert_series_equal(fx, expected, check_less_precise=1, check_names=False) | pandas.testing.assert_series_equal |
# 3rd party
import numpy as np
import pandas as pd
from pandas.api.extensions import (
ExtensionDtype,
ExtensionArray,
register_extension_dtype,
register_series_accessor,
register_dataframe_accessor,
)
from uncertainties.core import AffineScalarFunc, Variable
from uncertainties.unumpy import uarray
# own project imports
from .uarray import UArray
@register_extension_dtype
class UfloatDtype(ExtensionDtype):
"""A custom data type, to be paired with an ExtensionArray"""
type = Variable # type: ignore
# linter wants a property, but pandas validates the type of name
# as a string via assertion, therefore: AssertionError if written
# as a property
name = "ufloat" # type: ignore
_is_numeric = True
_is_boolean = True
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype
Return
-------
type
"""
return UfloatArray
class UfloatArray(ExtensionArray):
"""
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* __eq__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
"""
def __init__(self, variables, dtype=None, copy=False):
self._data = UArray(variables)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new ExtensionArray from a sequence of scalars."""
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(cls, values, original):
"""Reconstruct an ExtensionArray after factorization."""
return cls(values)
def __getitem__(self, item):
"""Select a subset of self."""
return self._data[item]
def __len__(self) -> int:
"""Length of this array."""
return len(self._data)
@property
def _itemsize(self):
from sys import getsizeof as sizeof
return sizeof(Variable)
@property
def nbytes(self):
"""The byte size of the data."""
return self._itemsize * len(self)
@property
def dtype(self):
"""An instance of 'ExtensionDtype'."""
return UfloatDtype()
def isna(self):
"""A 1-D array indicating if each value is missing."""
return np.array([isinstance(x, Variable) for x in self._data], dtype=bool)
def take(self, indexer, allow_fill=False, fill_value=None):
"""Take elements from an array.
Relies on the take method defined in pandas:
https://github.com/pandas-dev/pandas/blob/e246c3b05924ac1fe083565a765ce847fcad3d91/pandas/core/algorithms.py#L1483
"""
from pandas.api.extensions import take
data = self._data
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result)
def copy(self):
"""Return a copy of the array."""
return type(self)(self._data.copy())
@classmethod
def _concat_same_type(cls, to_concat):
"""Concatenate multiple arrays."""
return cls(np.concatenate([x._data for x in to_concat]))
@register_series_accessor("u")
class UfloatSeriesAccessor:
def __init__(self, series):
#self._validate(series)
self._obj = series
self._asuarray = UArray(series)
# @staticmethod
# def _validate(obj):
# if obj.dtype != "ufloat":
# raise TypeError("Dtype has to be 'ufloat' in order to use this accessor!")
@property
def n(self):
return pd.Series(self._asuarray.n, index=self._obj.index, name=self._obj.name)
@property
def s(self):
return pd.Series(self._asuarray.s, index=self._obj.index, name=self._obj.name)
@register_dataframe_accessor("u")
class UfloatDataFrameAccessor:
def __init__(self, dataframe):
#self._validate(dataframe)
self._obj = dataframe
self._asuarray = UArray(dataframe)
# @staticmethod
# def _validate(obj):
# #! TODO
# try:
# UArray(obj)
# except Exception as e:
# raise e
@property
def n(self):
return pd.DataFrame(self._asuarray.n, index=self._obj.index, columns=self._obj.columns)
@property
def s(self):
return pd.DataFrame(self._asuarray.s, index=self._obj.index, columns=self._obj.columns)
@property
def sep(self):
df = | pd.DataFrame(data=None, index=self._obj.index) | pandas.DataFrame |
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from ast import literal_eval
import matplotlib.pyplot as plt
from collections import Counter
from argparse import ArgumentParser
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# PyTorch, PyTorch.Text, and Lightning-PyTorch methods
import torch
from torch import nn, optim, Tensor
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchtext.vocab import Vocab
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# TQDM for progress tracking
from tqdm import tqdm
# Custom methods
from classes.datasets import CONCISE_PREDICTOR_SET
from models.CPM import CPM_deep
# Train CPM_deep
def train_CPM_deep(TRAINING_SET,
VAL_SET,
TESTING_SET,
TUNE_IDX,
REPEAT,
FOLD,
OUTPUT_DIR,
BATCH_SIZE,
LEARNING_RATE,
LAYERS,
NEURONS,
DROPOUT,
ES_PATIENCE,
EPOCHS,
CLASS_WEIGHTS,
OUTPUT_ACTIVATION):
"""
Args:
TRAINING_SET (pd.DataFrame)
VAL_SET (pd.DataFrame)
TESTING_SET (pd.DataFrame)
TUNE_IDX (str)
REPEAT (int)
FOLD (int)
OUTPUT_DIR (str): directory to save model outputs
BATCH_SIZE (int): size of minibatches during training
LEARNING_RATE (float): Learning rate for ADAM optimizer
LAYERS (int): number of hidden layers in feed forward neural network
NEURONS (list of length layers): the number of neurons in each layer
DROPOUT (flaot): the proportion of each dense layer dropped out during training
ES_PATIENCE (int): patience during early stopping
EPOCHS (int): maximum epochs during training
CLASS_WEIGHTS (boolean): identifies whether loss should be weighted against class frequency
OUTPUT_ACTIVATION (string): 'softmax' for DeepMN or 'sigmoid' for DeepOR
"""
# Create a directory within current repeat/fold combination to store outputs of current tuning configuration
tune_model_dir = os.path.join(OUTPUT_DIR,'tune_'+TUNE_IDX)
os.makedirs(tune_model_dir,exist_ok = True)
# Create PyTorch Dataset objects
train_Dataset = CONCISE_PREDICTOR_SET(TRAINING_SET,OUTPUT_ACTIVATION)
val_Dataset = CONCISE_PREDICTOR_SET(VAL_SET,OUTPUT_ACTIVATION)
test_Dataset = CONCISE_PREDICTOR_SET(TESTING_SET,OUTPUT_ACTIVATION)
# Create PyTorch DataLoader objects
curr_train_DL = DataLoader(train_Dataset,
batch_size=int(BATCH_SIZE),
shuffle=True)
curr_val_DL = DataLoader(val_Dataset,
batch_size=len(val_Dataset),
shuffle=False)
curr_test_DL = DataLoader(test_Dataset,
batch_size=len(test_Dataset),
shuffle=False)
# Initialize current model class based on hyperparameter selections
model = CPM_deep(train_Dataset.X.shape[1],
LAYERS,
NEURONS,
DROPOUT,
OUTPUT_ACTIVATION,
LEARNING_RATE,
CLASS_WEIGHTS,
train_Dataset.y)
early_stop_callback = EarlyStopping(
monitor='val_AUROC',
patience=ES_PATIENCE,
mode='max'
)
checkpoint_callback = ModelCheckpoint(
monitor='val_AUROC',
dirpath=tune_model_dir,
filename='{epoch:02d}-{val_AUROC:.2f}',
save_top_k=1,
mode='max'
)
csv_logger = pl.loggers.CSVLogger(save_dir=OUTPUT_DIR,name='tune_'+TUNE_IDX)
trainer = pl.Trainer(logger = csv_logger,
max_epochs = EPOCHS,
enable_progress_bar = False,
enable_model_summary = False,
callbacks=[early_stop_callback,checkpoint_callback])
trainer.fit(model,curr_train_DL,curr_val_DL)
best_model = CPM_deep.load_from_checkpoint(checkpoint_callback.best_model_path)
best_model.eval()
# Save validation set probabilities
for i, (x,y) in enumerate(curr_val_DL):
yhat = best_model(x)
val_true_y = y.cpu().numpy()
if OUTPUT_ACTIVATION == 'softmax':
curr_val_probs = F.softmax(yhat.detach()).cpu().numpy()
curr_val_preds = pd.DataFrame(curr_val_probs,columns=['Pr(GOSE=1)','Pr(GOSE=2/3)','Pr(GOSE=4)','Pr(GOSE=5)','Pr(GOSE=6)','Pr(GOSE=7)','Pr(GOSE=8)'])
curr_val_preds['TrueLabel'] = val_true_y
elif OUTPUT_ACTIVATION == 'sigmoid':
curr_val_probs = F.sigmoid(yhat.detach()).cpu().numpy()
curr_val_probs = pd.DataFrame(curr_val_probs,columns=['Pr(GOSE>1)','Pr(GOSE>3)','Pr(GOSE>4)','Pr(GOSE>5)','Pr(GOSE>6)','Pr(GOSE>7)'])
curr_val_labels = pd.DataFrame(val_true_y,columns=['GOSE>1','GOSE>3','GOSE>4','GOSE>5','GOSE>6','GOSE>7'])
curr_val_preds = pd.concat([curr_val_probs,curr_val_labels],axis = 1)
else:
raise ValueError("Invalid output layer type. Must be 'softmax' or 'sigmoid'")
curr_val_preds.insert(loc=0, column='GUPI', value=VAL_SET.GUPI.values)
curr_val_preds['TUNE_IDX'] = TUNE_IDX
curr_val_preds.to_csv(os.path.join(tune_model_dir,'val_predictions.csv'),index=False)
best_model.eval()
# Save testing set probabilities
for i, (x,y) in enumerate(curr_test_DL):
yhat = best_model(x)
test_true_y = y.cpu().numpy()
if OUTPUT_ACTIVATION == 'softmax':
curr_test_probs = F.softmax(yhat.detach()).cpu().numpy()
curr_test_preds = pd.DataFrame(curr_test_probs,columns=['Pr(GOSE=1)','Pr(GOSE=2/3)','Pr(GOSE=4)','Pr(GOSE=5)','Pr(GOSE=6)','Pr(GOSE=7)','Pr(GOSE=8)'])
curr_test_preds['TrueLabel'] = test_true_y
elif OUTPUT_ACTIVATION == 'sigmoid':
curr_test_probs = F.sigmoid(yhat.detach()).cpu().numpy()
curr_test_probs = | pd.DataFrame(curr_test_probs,columns=['Pr(GOSE>1)','Pr(GOSE>3)','Pr(GOSE>4)','Pr(GOSE>5)','Pr(GOSE>6)','Pr(GOSE>7)']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 18:24:12 2020
@author: omar.elfarouk
"""
import pandas
import numpy
import seaborn
import scipy
import matplotlib.pyplot as plt
data = pandas.read_csv('gapminder.csv', low_memory=False)
#setting variables you will be working with to numeric
data['internetuserate'] = pandas.to_numeric(data['internetuserate'], errors='coerce')
data['urbanrate'] = | pandas.to_numeric(data['urbanrate'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# version 0.1 2020/01/08 -- <NAME>
import argparse
from pathlib import Path
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cmocean.cm as cmo
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# functions #
def write_bc_for_pli(bc_fn, gcm, pli, quantity, method, depth_avg):
"""
append or write 3D boundary conditions for quantities
bc_fn = path to write or append boundary condition data
gcm = general circulation model output which contains boundary points (xr.DataArray)
pli = table of boundary support points (pd.DataFrame)
quantity = variable to output to the BC files (salinity or water_temp)
depth_avg = flag to enable depth averaging
"""
with open(bc_fn, "a") as f:
gcm_refd, ref_date = assign_seconds_from_refdate(gcm)
for _, (x_pli, y_pli, pli_point_name) in pli.iterrows():
x_pli_east = x_pli + 360
if (quantity == "salinity") or (quantity == "water_temp"):
bc_data = interpolate_to_pli_point(
gcm_refd, quantity, x_pli_east, y_pli, pli_point_name, method
)
# check valid depth point
if bc_data is None:
continue
if depth_avg:
write_ts_record(f, bc_data, pli_point_name, quantity, ref_date)
else:
write_t3d_record(f, bc_data, pli_point_name, quantity, ref_date)
# in the case of velocity both components are interpolated
elif quantity == "velocity":
bc_data = interpolate_to_pli_point(
gcm_refd,
["water_u", "water_v"],
x_pli_east,
y_pli,
pli_point_name,
method,
)
# check valid depth point
if bc_data is None:
continue
write_vector_t3d_record(f, bc_data, pli_point_name, quantity, ref_date)
# in case of surf_el
elif quantity == "surf_el":
bc_data = interpolate_to_pli_point(
gcm_refd, quantity, x_pli_east, y_pli, pli_point_name, method
)
# check valid depth point
if bc_data is None:
continue
write_ts_record(f, bc_data, pli_point_name, quantity, ref_date)
def write_ts_record(f, bc_data, pli_point_name, quantity, ref_date):
"""
append or write time series boundary conditions for depth averaged quantities
f = file descriptor for writing boundary condition output
bc_data = data at with percent bed coords (xr.DataFrame)
pli_point_name = name for entry
quantity = variable to output to the BC files
ref_date = date used to calculate offset of the record in seconds
"""
# get units for quantity
if quantity == "salinity":
quantbnd = "salinitybnd"
units = "ppt"
elif quantity == "water_temp":
quantbnd = "temperaturebnd"
units = "°C"
elif quantity == "surf_el":
quantbnd = "waterlevelbnd"
units = "m"
else:
print('quantity needs to be either "salinity", "water_temp" or "surf_el"\n')
raise ValueError
# write a record
f.write("[forcing]\n")
f.write(f"Name = {pli_point_name}\n")
f.write(f"Function = t3d\n")
f.write(f"Time-interpolation = linear\n")
f.write(f"Vertical position type = single\n")
f.write(f"Vertical interpolation = linear\n")
f.write(f"Quantity = time\n")
f.write(f"Unit = seconds since {ref_date}\n")
f.write(f"Quantity = {quantbnd}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = 1\n")
if quantity == "surf_el":
for td, value in bc_data.to_dataframe()[quantity].iteritems():
value = f"{value:.05f}"
f.write(f"{td} {value}\n")
else:
# write data after converting to dataframe and iterating over the rows
for td, values in bc_data.to_dataframe()[quantity].unstack(level=-1).iterrows():
# take mean of values to get depth averaged
value = values.mean()
# see results of interpolation
if value > 100.0:
print(
f"Problem with {quantity} exceeding maximum allowed value: {values.max():.03f} ppt."
)
elif value < 0.0:
print(
f"Problem with {quantity} becoming negative: {values.max():.03f} ppt."
)
print(f"Negative value for {quantity} has been set to 0.01 {units}.")
value = 0.01
value = f"{value:.05f}"
f.write(f"{td} {value}\n")
f.write("\n")
def write_vector_t3d_record(f, bc_data, pli_point_name, quantity, ref_date):
"""
append or write 3D boundary conditions for quantities
f = file descriptor for writing boundary condition output
bc_data = data at with percent bed coords (xr.DataFrame)
pli_point_name = name for entry
quantity = variable to output to the BC files
ref_date = date used to calculate offset of the record in seconds
"""
if quantity == "velocity":
vector = "uxuyadvectionvelocitybnd:ux,uy"
quantbndx = "ux"
quantbndy = "uy"
x_comp = "water_u"
y_comp = "water_v"
units = "-" # no units for velocity in example provided by Kees
else:
print('quantity should be "velocity"\n')
raise ValueError
# convert percent from bed into formated string
pos_spec = [f"{perc:.02f}" for perc in bc_data.perc_from_bed.data]
pos_spec_str = " ".join(pos_spec[::-1]) # reverse order for D3D
# write a record
f.write("[forcing]\n")
f.write(f"Name = {pli_point_name}\n")
f.write(f"Function = t3d\n")
f.write(f"Time-interpolation = linear\n")
f.write(f"Vertical position type = percentage from bed\n")
f.write(f"Vertical position specification = {pos_spec_str}\n")
f.write(f"Vertical interpolation = linear\n")
f.write(f"Quantity = time\n")
f.write(f"Unit = seconds since {ref_date}\n")
f.write(f"Vector = {vector}\n")
# loop over number of vertical positions
for vert_pos in range(1, len(pos_spec) + 1):
f.write(f"Quantity = {quantbndx}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = {vert_pos}\n")
f.write(f"Quantity = {quantbndy}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = {vert_pos}\n")
# write data after converting to dataframe and iterating over the rows
for td, values in (
bc_data.to_dataframe()[[x_comp, y_comp]].unstack(level=0).iterrows()
):
# get componets as array in order to format for d3d input
x_comp_vals = values[x_comp].values[::-1] # reverse order for D3D
y_comp_vals = values[y_comp].values[::-1] # reverse order for D3D
values = [
f"{x_comp_val:.03f} {y_comp_val:.03f}"
for x_comp_val, y_comp_val in zip(x_comp_vals, y_comp_vals)
]
values_str = " ".join(values)
f.write(f"{td} {values_str}\n")
f.write("\n")
def write_t3d_record(f, bc_data, pli_point_name, quantity, ref_date):
"""
append or write 3D boundary conditions for quantities
f = file descriptor for writing boundary condition output
bc_data = data at with percent bed coords (xr.DataFrame)
pli_point_name = name for entry
quantity = variable to output to the BC files
ref_date = date used to calculate offset of the record in seconds
"""
# get units for quantity
if quantity == "salinity":
quantbnd = "salinitybnd"
units = "ppt"
elif quantity == "water_temp":
quantbnd = "temperaturebnd"
units = "°C"
else:
print('quantity needs to be either "salinity" or "water_temp"\n')
raise ValueError
# convert percent from bed into formated string
pos_spec = [f"{perc:.02f}" for perc in bc_data.perc_from_bed.data]
pos_spec_str = " ".join(pos_spec[::-1]) # reverse order for D3D
# write a record
f.write("[forcing]\n")
f.write(f"Name = {pli_point_name}\n")
f.write(f"Function = t3d\n")
f.write(f"Time-interpolation = linear\n")
f.write(f"Vertical position type = percentage from bed\n")
f.write(f"Vertical position specification = {pos_spec_str}\n")
f.write(f"Vertical interpolation = linear\n")
f.write(f"Quantity = time\n")
f.write(f"Unit = seconds since {ref_date}\n")
# loop over number of vertical positions
for vert_pos in range(1, len(pos_spec) + 1):
f.write(f"Quantity = {quantbnd}\n")
f.write(f"Unit = {units}\n")
f.write(f"Vertical position = {vert_pos}\n")
# write data after converting to dataframe and iterating over the rows
for td, values in bc_data.to_dataframe()[quantity].unstack(level=-1).iterrows():
# see results of interpolation
if values.max() > 100.0:
print(
f"problem with {quantity} exceeding maximum allowed value: {values.max():.03f} ppt"
)
elif values.min() < 0.0:
print(f"problem with {quantity} becoming negative: {values.max():.03f} ppt")
print(f"Negative values for {quantity} has been set to 0.01 {units}.")
values.where(values > 0.01, 0.01, inplace=True)
values = [f"{value:.05f}" for value in values]
values_str = " ".join(values[::-1]) # reverse order for D3D
f.write(f"{td} {values_str}\n")
f.write("\n")
def assign_seconds_from_refdate(gcm):
"""
This func assigns seconds from a user specified ref date as coords.
This is how D3D interpolates the boundary conditions in time.
gcm = model output to add coords to
"""
ref_date = gcm.time.data[0]
ref_dt = pd.to_datetime(ref_date)
ref_date_str = ref_dt.strftime("%Y-%m-%d %H:%M:%S")
timedeltas = pd.to_datetime(gcm.time.data) - ref_dt
seconds = timedeltas.days * 24 * 60 * 60 + timedeltas.seconds
gcm = gcm.assign_coords(coords={"seconds_from_ref": ("time", seconds)})
return gcm.swap_dims({"time": "seconds_from_ref"}), ref_date_str
def interpolate_to_pli_point(
gcm_refd, quantity, x_pli_east, y_pli, pli_point_name, method
):
"""interpolates the quanitites to the sigma depths and pli coords
gcm_refd = gcm with new time coordinates
quantity = variable to output to the BC files (salinity or water_temp)
x_pli_east = longitude of pli point in degrees east from meridian (GCM convention)
y_pli = latitude
"""
if quantity == "surf_el":
# interpolate to pli point and drop data below bed level at nearest gcm_refd point
bc_data = gcm_refd[quantity].interp(lon=x_pli_east, lat=y_pli, method=method)
return bc_data
else:
# interpolate to pli point and drop data below bed level at nearest gcm_refd point
bc_data = (
gcm_refd[quantity]
.interp(lon=x_pli_east, lat=y_pli, method=method)
.dropna(dim="depth")
.squeeze()
)
# add coordinate for percent from bed. D3D uses this in its bc file format
try:
gcm_refd_zb = bc_data.depth[-1] # get bed level of gcm_refd point
except IndexError:
print(
f"Depth invalid for {pli_point_name} at: {x_pli_east}, {y_pli}. Omitting point..."
)
return None
perc_from_bed = 100 * (-1 * bc_data.depth + gcm_refd_zb) / gcm_refd_zb
bc_data = bc_data.assign_coords(
coords={"perc_from_bed": ("depth", perc_from_bed)}
)
return bc_data
### main loop ###
if __name__ == "__main__":
### arguments ###
parser = argparse.ArgumentParser()
parser.add_argument(
"nc",
help="GCM NetCDF output containing boundary support points and duration of Delft3D simulation",
)
parser.add_argument(
"quantity",
help='GCM variable. Must be either "salintiy", "water_temp", or "velocity"',
)
parser.add_argument(
"--pli-list",
nargs="*",
type=str,
help="list of boundary support point polyline filenames",
required=True,
dest="pli_list",
)
parser.add_argument(
"--bc-filename",
help="Optional filename for Delft3D boundary condition filename",
type=str,
dest="bc_filename",
)
parser.add_argument(
"--depth-avg",
help="flag to enable depth averaged output",
default=False,
action="store_true",
dest="depth_avg",
)
parser.add_argument(
"--interp-method",
help="flag to enable depth averaged output",
default="linear",
type=str,
dest="method",
)
args = parser.parse_args()
gcm = args.nc
quantity = args.quantity
pli_list = args.pli_list
depth_avg = args.depth_avg
method = args.method
# validate arguments
if (
(quantity != "salinity")
and (quantity != "water_temp")
and (quantity != "velocity")
and (quantity != "surf_el")
):
print(
f'<quantity> was specfied as {quantity}, but should be either "salinity" or "water_temp".'
)
raise ValueError
# open gcm NetCDF output as Xarray dataset
try:
gcm = xr.open_dataset(Path(gcm), drop_variables="tau")
except FileNotFoundError as e:
print("<GCM output> should be path to GCM NetCDF output")
raise e
# set defualt boundary condition filename depending on quanitity
bc_fn = args.bc_filename
if bc_fn is None:
if quantity == "salinity":
bc_fn = Path("Salinity.bc")
elif quantity == "water_temp":
bc_fn = Path("Temperature.bc")
elif quantity == "velocity":
bc_fn = Path("Velocity.bc")
elif quantity == "surf_el":
bc_fn = Path("WaterLevel.bc")
# pli files opened as Pandas DataFrames
pli_points = []
for pli_fn in pli_list:
print(f"Reading in file: {pli_fn}")
pli = pd.read_csv(
pli_fn, sep="\s+", skiprows=2, header=None, names=["x", "y", "point_id"]
)
write_bc_for_pli(bc_fn, gcm, pli, quantity, method=method, depth_avg=depth_avg)
# add points to list for visualization
pli_points.append(pli)
# concat pli points
pli_points = | pd.concat(pli_points) | pandas.concat |
import numpy as np
import pandas as pd
from scipy import stats
import sys, os, time, json
from pathlib import Path
import pickle as pkl
sys.path.append('../PreProcessing/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import sklearn.linear_model as lm
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import balanced_accuracy_score as bac
from joblib import Parallel, delayed
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.text import Text
import seaborn as sns
import analyses_table as AT
import TreeMazeFunctions as TMF
sns.set(style="whitegrid",font_scale=1,rc={
'axes.spines.bottom': False,
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.edgecolor':'0.5'})
def main(sePaths, doPlots=False, overwrite = False):
try:
dat = AT.loadSessionData(sePaths)
nUnits = dat['fitTable2'].shape[0]
# univariate analyses.
fn = sePaths['CueDesc_SegUniRes']
if ( (not fn.exists()) or overwrite):
CueDescFR_Dat, all_dat_spl = CueDesc_SegUniAnalysis(dat)
CueDescFR_Dat.to_csv(sePaths['CueDesc_SegUniRes'])
if doPlots:
plotCueVDes(CueDescFR_Dat,sePaths)
plotUnitRvL(CueDescFR_Dat,all_dat_spl,sePaths)
else:
CueDescFR_Dat = pd.read_csv(fn)
# decododer analyses
fn = sePaths['CueDesc_SegDecRes']
if ((not fn.exists()) or overwrite):
singCellDec,singCellDecSummary, popDec = CueDesc_SegDecAnalysis(dat)
singCellDec['se'] = sePaths['session']
singCellDecSummary['se'] = sePaths['session']
popDec['se'] = sePaths['session']
singCellDec.to_csv(fn)
singCellDecSummary.to_csv(sePaths['CueDesc_SegDecSumRes'])
popDec.to_csv(sePaths['PopCueDesc_SegDecSumRes'])
if doPlots:
f,_ = plotMultipleDecoderResults(singCellDecSummary)
fn = sePaths['CueDescPlots'] / ('DecResByUnit.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
f,_ = plotMultipleDecoderResults(popDec)
fn = sePaths['CueDescPlots'] / ('PopDecRes.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
for unit in np.arange(nUnits):
f,_ = plotMultipleDecoderResults(singCellDec[(singCellDec['unit']==unit)])
fn = sePaths['CueDescPlots'] / ('DecRes_UnitID-{}.jpeg'.format(unitNum) )
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
else:
singCellDec = pd.read_csv(fn)
singCellDecSummary = pd.read_csv(sePaths['CueDesc_SegDecSumRes'])
popDec = pd.read_csv(sePaths['PopCueDesc_SegDecSumRes'])
return CueDescFR_Dat, singCellDec,singCellDecSummary, popDec
except:
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
return [],[],[],[]
def CueDesc_SegUniAnalysis(dat):
trDat = dat['TrialLongMat']
trConds = dat['TrialConds']
nCells = len(dat['ids']['cells'])
nMua = len(dat['ids']['muas'])
nUnits = nCells+nMua
# fixed variables (don't change with cell)
locs = TMF.ZonesNames
Trials = trConds[trConds['Good']].index.values
nTrials = len(Trials)
FeatIDs = {'A':[1],'Stem':[0,1,2],'Arm': [3,4]}
Segs = FeatIDs.keys()
HA = ['Home','SegA']
Stem = ['Home','SegA','Center']
L_Arm = ['SegE', 'I2', 'SegF', 'G3', 'SegG', 'G4']
R_Arm = ['SegB', 'I1', 'SegC', 'G1', 'SegD', 'G2']
# variable to be stored
#uni_LvR_Analyses = {'Stats':{'Cue':{},'Desc':{},'Cue_Desc':{}},'Mean':{'Cue':{},'Desc':{},'Cue_Desc':{}},'SD':{'Cue':{},'Desc':{},'Cue_Desc':{}} }
uni_LvR_Analyses = {'Cue':{'Stats':{},'Mean':{},'SD':{}},'Desc':{'Stats':{},'Mean':{},'SD':{}},'Cue_Desc':{'Stats':{},'Mean':{},'SD':{}}}
Conds = ['Cue','Desc','Cue_Desc']
dat_meas = ['Stats','Mean','SD']
all_dat_spl = {} # only used for plotting as it has overlapping data points; not necessary to store it.
for unitNum in np.arange(nUnits):
# splits of data per cell
dat_splits = {}
for k in ['Cue','Desc']:
dat_splits[k] = {}
for kk in FeatIDs.keys():
dat_splits[k][kk] = {}
dat_splits['Cue_Desc'] = {'Co_Arm':{},'L_Arm':{},'R_Arm':{}}
if unitNum==0:
for k in Conds:
for ii in dat_meas:
if ii=='Stats':
for jj in ['T','P','S']:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=dat_splits[k].keys())
else:
for jj in ['L','R']:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=dat_splits[k].keys())
if unitNum<nCells:
tt = dat['ids']['cells'][str(unitNum)][0]
cl = dat['ids']['cells'][str(unitNum)][1]
fr = dat['TrialFRLongMat']['cell_'+str(unitNum)]
#tR2 = dat['TrialModelFits']['testR2'][unitNum]
#selMod = dat['TrialModelFits']['selMod'][unitNum]
tR2 = dat['fitTable2']['testR2'][unitNum]
selMod = dat['fitTable2']['selMod'][unitNum]
else:
muaID = unitNum-nCells
tt = dat['ids']['muas'][str(muaID)][0]
cl = dat['ids']['muas'][str(muaID)][1]
fr = dat['TrialFRLongMat']['mua_'+str(muaID)]
tR2 = dat['fitTable2']['testR2'][unitNum]
selMod = dat['fitTable2']['selMod'][unitNum]
# get mean fr per trial per partition
mPartFRDat = pd.DataFrame(np.zeros((nTrials,3)),columns=FeatIDs)
cue = trConds.loc[Trials,'Cues'].values
desc = trConds.loc[Trials,'Desc'].values
cnt =0
for tr in Trials:
subset = (trDat['trID']==tr) & (trDat['IO']=='Out')
for k,v in FeatIDs.items():
mPartFRDat.loc[cnt,k]=np.nanmean(fr[subset].values[v])
cnt+=1
# univariate cue and desciscion tests by maze part
LvR = {}
l = {}
r = {}
# First & Second analyses: Cue/Desc
k = 'Cue'
l[k] = cue=='L'
r[k] = cue=='R'
k = 'Desc'
l[k]=desc=='L'
r[k]=desc=='R'
for k in ['Cue','Desc']:
LvR[k] = pd.DataFrame(np.zeros((3,3)),index=Segs,columns=['T','P','S'])
for kk in Segs:
lfr = mPartFRDat[kk][l[k]]
rfr = mPartFRDat[kk][r[k]]
temp = stats.ttest_ind(lfr,rfr)
LvR[k].loc[kk,'T'] = temp[0]
LvR[k].loc[kk,'P'] = temp[1]
dat_splits[k][kk]['l'] = lfr.values
dat_splits[k][kk]['r'] = rfr.values
LvR[k]['S'] = getSigLevel(LvR[k]['P'])
# thir analysis: Correct v Incorrect by L/R arm
k = 'Cue_Desc'
LvR[k] = pd.DataFrame(np.zeros((3,3)),index=['Co_Arm','L_Arm','R_Arm'],columns=['T','P','S'])
l = {}
r = {}
kk = 'Co_Arm'
l[kk] = mPartFRDat['Arm'][(cue=='L')&(desc=='L')]
r[kk] = mPartFRDat['Arm'][(cue=='R')&(desc=='R')]
kk = 'L_Arm'
l[kk]=mPartFRDat['Arm'][(desc=='L')&(cue=='L')]
r[kk]=mPartFRDat['Arm'][(desc=='L')&(cue=='R')]
kk = 'R_Arm'
l[kk]=mPartFRDat['Arm'][(desc=='R')&(cue=='L')]
r[kk]=mPartFRDat['Arm'][(desc=='R')&(cue=='R')]
for kk in ['Co_Arm','L_Arm','R_Arm']:
temp = stats.ttest_ind(l[kk],r[kk])
LvR[k].loc[kk,'T'] = temp[0]
LvR[k].loc[kk,'P'] = temp[1]
dat_splits[k][kk]['l'] = l[kk].values
dat_splits[k][kk]['r'] = r[kk].values
LvR[k]['S'] = getSigLevel(LvR[k]['P'])
# aggreagate results.
mlr = {}
slr = {}
for k,v in dat_splits.items():
mlr[k] = pd.DataFrame(np.zeros((3,2)),index=v.keys(),columns=['L','R'])
slr[k] = pd.DataFrame(np.zeros((3,2)),index=v.keys(),columns=['L','R'])
cnt = 0
for kk,vv in v.items():
l = vv['l']
r = vv['r']
mlr[k].loc[kk] = [np.mean(l),np.mean(r)]
slr[k].loc[kk] = [stats.sem(l),stats.sem(r)]
cnt+=1
for k in Conds: # keys : Cue, Desc, Cue_Desc
for ii in dat_meas:
if ii=='Stats':
for jj in ['T','P','S']:
if unitNum == 0:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=LvR[k].index.values)
uni_LvR_Analyses[k]['Stats'][jj].loc[unitNum] = LvR[k][jj]
else:
for jj in ['L','R']:
if unitNum == 0:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=LvR[k].index.values)
uni_LvR_Analyses[k]['Mean'][jj].loc[unitNum] = mlr[k][jj]
uni_LvR_Analyses[k]['SD'][jj].loc[unitNum] = slr[k][jj]
all_dat_spl[unitNum] = dat_splits
# reorg LvR to a pandas data frame with all the units
CueDescFR_Dat = pd.DataFrame()
for k in Conds:
cnt = 0
for kk in ['Mean','SD']:
for kkk in ['L','R']:
if kk=='Mean':
valName = 'MzFR_'+ kkk
elif kk == 'SD':
valName = 'SzFR_' + kkk
if cnt==0:
y = uni_LvR_Analyses[k][kk][kkk].copy()
y = y.reset_index()
y = y.melt(value_vars = uni_LvR_Analyses[k][kk][kkk].columns,id_vars='index',var_name='Seg',value_name= valName)
y['Cond'] = k
else:
z = uni_LvR_Analyses[k][kk][kkk].copy()
z = z.reset_index()
z = z.melt(value_vars = uni_LvR_Analyses[k][kk][kkk].columns,id_vars='index',value_name= valName)
y[valName] = z[valName].copy()
cnt+=1
for jj in ['T','P','S']:
z = uni_LvR_Analyses[k]['Stats'][jj].copy()
z = z.reset_index()
z = z.melt(value_vars = uni_LvR_Analyses[k]['Stats'][jj].columns ,id_vars='index', var_name = 'Seg', value_name = jj)
y[jj] = z[jj]
CueDescFR_Dat = pd.concat((CueDescFR_Dat,y))
CueDescFR_Dat['Sig'] = CueDescFR_Dat['P']<0.05
CueDescFR_Dat.rename(columns={'index':'unit'},inplace=True)
return CueDescFR_Dat, all_dat_spl
def CueDesc_SegDecAnalysis(dat):
nPe = 100
nRepeats = 10
nSh = 50
njobs = 20
trConds = dat['TrialConds']
trDat = dat['TrialLongMat']
nUnits = dat['fitTable2'].shape[0]
gTrialsIDs = trConds['Good']
Trials = trConds[gTrialsIDs].index.values
nTrials = len(Trials)
allZoneFR,unitIDs = reformatFRDat(dat,Trials)
CoTrials = trConds[gTrialsIDs & (trConds['Co']=='Co')].index.values
InCoTrials = trConds[gTrialsIDs & (trConds['Co']=='InCo')].index.values
nInCo = len(InCoTrials)
TrSets = {}
TrSets['all'] = np.arange(nTrials)
_,idx,_=np.intersect1d(np.array(Trials),np.array(CoTrials),return_indices=True)
TrSets['co'] = idx
_,idx,_=np.intersect1d(np.array(Trials),np.array(InCoTrials),return_indices=True)
TrSets['inco'] = idx
cueVec = trConds.loc[gTrialsIDs]['Cues'].values
descVec = trConds.loc[gTrialsIDs]['Desc'].values
predVec = {'Cue':cueVec, 'Desc':descVec}
nFeatures = {'h':np.arange(1),'a':np.arange(2),'center':np.arange(3),'be':np.arange(4),'int':np.arange(5),'cdfg':np.arange(6),'goal':np.arange(7)}
def correctTrials_Decoder(train,test):
res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
temp = mod.fit(X_train[train],y_train[train])
res.loc[0,'Test'] = 'Model'
y_hat = temp.predict(X_train[test])
res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100
# shuffle for held out train set
mod_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )
# predictions on x test
y_hat = temp.predict(X_test)
res.loc[1,'Test'] = 'Cue'
res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100
res.loc[2,'Test'] = 'Desc'
res.loc[2,'BAc'] = bac(y_test_desc,y_hat)*100
# shuffles for ytest cue/desc
cue_sh = np.zeros(nSh)
desc_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100
desc_sh[sh] = bac(y_test_desc,y_perm_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
res.loc[2,'Z'] = getPerm_Z(desc_sh, res.loc[2,'BAc'] )
res.loc[2,'P'] = getPerm_Pval(desc_sh, res.loc[2,'BAc'] )
res['nSeUnits'] = nUnits
return res
def balancedCoIncoTrial_Decoder(pe,feats):
res = pd.DataFrame(np.zeros((2,4)),columns=['Test','BAc','P','Z'])
# sample correct trials to match the number of incorrect trials.
samp_co_trials = np.random.choice(TrSets['co'],nInCo,replace=False)
train = np.concatenate( (TrSets['inco'], samp_co_trials ))
test = np.setdiff1d(TrSets['co'], samp_co_trials)
X_train = allZoneFR.loc[train,feats].values
X_test = allZoneFR.loc[test,feats].values
Y_cue_train = predVec['Cue'][train]
Y_desc_train = predVec['Desc'][train]
Y_test = predVec['Cue'][test] # cue and desc trials are the on the test set.
# model trained on the cue
res.loc[0,'Test'] = 'Cue'
cue_mod = mod.fit(X_train,Y_cue_train)
y_cue_hat = cue_mod.predict(X_test)
res.loc[0,'BAc'] = bac(Y_test,y_cue_hat)*100
cue_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm = np.random.permutation(Y_test)
cue_sh[sh] = bac(y_perm,y_cue_hat)*100
res.loc[0,'Z'] = getPerm_Z(cue_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(cue_sh, res.loc[0,'BAc'] )
# model trained on the desc
res.loc[1,'Test'] = 'Desc'
desc_mod = mod.fit(X_train,Y_desc_train)
y_desc_hat = desc_mod.predict(X_test)
res.loc[1,'BAc'] = bac(Y_test,y_desc_hat)*100
desc_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm = np.random.permutation(Y_test)
desc_sh[sh] = bac(y_perm,y_desc_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
return res
def IncoTrial_Decoder(train,test):
res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
temp = mod.fit(X_train[train],y_train[train])
res.loc[0,'Test'] = 'Model'
y_hat = temp.predict(X_train[test])
res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100
# shuffle for held out train set
mod_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )
# predictions on x test
y_hat = temp.predict(X_test)
res.loc[1,'Test'] = 'Cue'
res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100
res.loc[2,'Test'] = 'Desc'
res.loc[2,'BAc'] = 100-res.loc[1,'BAc']
# shuffles for ytest cue/desc
cue_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
res.loc[2,'Z'] = getPerm_Z(100-cue_sh, res.loc[2,'BAc'] )
res.loc[2,'P'] = getPerm_Pval(100-cue_sh, res.loc[2,'BAc'] )
return res
with Parallel(n_jobs=njobs) as parallel:
# correct trials Model:
coModsDec = pd.DataFrame()
popCoModsDec = pd.DataFrame()
try:
nFolds = 10
y_train = predVec['Cue'][TrSets['co']]
y_test_cue = predVec['Cue'][TrSets['inco']]
y_test_desc = predVec['Desc'][TrSets['inco']]
rskf = RepeatedStratifiedKFold(n_splits=nFolds,n_repeats=nRepeats, random_state=0)
t0=time.time()
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['co'], feats ].values
X_test = allZoneFR.loc[TrSets['inco'], feats ].values
cnt=0
r = parallel(delayed(correctTrials_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
t1=time.time()
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
coModsDec = pd.concat((coModsDec,res))
print(end='.')
coModsDec['Decoder'] = 'Correct'
# -population
for p,nF in nFeatures.items():
feats=np.array([])
for f in nF:
feats=np.concatenate((feats,np.arange(f,nUnits*7,7)))
feats=feats.astype(int)
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['co'], feats ].values
X_test = allZoneFR.loc[TrSets['inco'], feats ].values
cnt=0
r = parallel(delayed(correctTrials_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
popCoModsDec = pd.concat((popCoModsDec,res))
print(end='.')
print('\nDecoding Correct Model Completed. Time = {0:.2f}s \n'.format(time.time()-t0))
popCoModsDec['Decoder'] = 'Correct'
except:
print('CorrectTrials Model Failed.')
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
# balanced correct/inco model:
baModsDec = pd.DataFrame()
popBaModsDec = pd.DataFrame()
try:
t0=time.time()
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
r = parallel(delayed(balancedCoIncoTrial_Decoder)(pe, feats) for pe in np.arange(nPe))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
baModsDec = | pd.concat((baModsDec,res)) | pandas.concat |
"""
provide data encoding methods, supports label encoding, one-hot encoding, data discretization
author: Xiaoqi
date: 2019.06.24
"""
import pandas as pd
class DataEncoder(object):
def __init__(self, df_data):
self.df_data = df_data
def label_encoding(self):
try:
data = self.df_data.copy()
cat_cols = data.select_dtypes(['category']).columns
if len(cat_cols) == 0:
cat_cols = data.select_dtypes(exclude=['number']).columns
data[cat_cols] = data[cat_cols].astype('category')
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes)
return data
except:
raise Exception('Label encoding error')
def get_labels(self):
label_dic = {}
data = self.df_data.copy()
cat_cols = data.select_dtypes(exclude=['number']).columns
for col in cat_cols:
label_dic[col] = dict(enumerate(data[col].astype('category').cat.categories))
return label_dic
def onehot_encoding(self):
data = self.df_data.copy()
return | pd.get_dummies(data, drop_first=True) | pandas.get_dummies |
"""
NOMIS
=====
Collect official data from the NOMIS API, using configuration files.
"""
import requests
from collections import Counter
from io import StringIO
import pandas as pd
from collections import defaultdict
import re
import logging
import datetime
import configparser
NOMIS = "http://www.nomisweb.co.uk/api/v01/dataset/{}"
NOMIS_DEF = NOMIS.format("{}def.sdmx.json")
REGEX = re.compile(r"(\w+)_code$")
def get_config(conf_path):
config = configparser.ConfigParser()
config.read(conf_path)
return dict(config['nomis'])
def get_base(x):
"""Get the NOMIS column name base entity, for columns named '{something}_code'.
Args:
x (str): The NOMIS column name, of the form {something}_code
Returns:
base (str): The NOMIS column name base entity
"""
return REGEX.findall(x)[0]
def get_code_pairs(columns):
"""Pair NOMIS key-value column names.
Args:
columns (list): A list of NOMIS column names.
Returns:
cols (dict): Mapping of key-value column names.
"""
cols = {c: f"{get_base(c)}_name"
for c in columns if c.endswith("_code")
if f"{get_base(c)}_name" in columns}
return cols
def reformat_nomis_columns(data):
"""Reformat columns from default NOMIS data.
Args:
df (:obj:`pd.DataFrame`): Dataframe containing NOMIS data.
Returns:
tables (:obj:`list` of :obj:`dict`): Reformatted rows of data.
"""
tables = defaultdict(list)
# Generate the NOMIS key-value column pairs, and recursively
# replace them
for name, df in data.items():
_df = df.copy() # Don't mess with the original data
pairs = get_code_pairs(_df.columns)
for _code, _name in pairs.items():
base = f"{get_base(_code)}_lookup"
df_codes = _df[[_name,_code]].copy()
df_codes.columns = ["name","code"]
tables[base] += df_codes.to_dict(orient="records")
_df.drop(_name, axis=1, inplace=True)
tables[name] = _df.to_dict(orient="records")
for row in tables[name]:
row['date'] = row['date'].to_pydatetime()
# Append this pair of values
return tables
def batch_request(config, dataset_id, geographies, date_format,
record_offset=0, max_api_calls=10):
"""Fetch a NOMIS dataset from the API, in batches,
based on a configuration object.
Args:
config (dict): Configuration object, from which a get
request is formed.
dataset_id (str): NOMIS dataset ID
geographies (list): Return object from :obj:`discovery_iter`.
date_format (str): Formatting string for dates in the dataset
record_offset (int): Record to start from
max_api_calls (int): Number of requests allowed
Returns:
dfs (:obj:`list` of :obj:`pd.DataFrame`): Batch return results.
"""
config["geography"] = ",".join(str(row["nomis_id"])
for row in geographies)
config["RecordOffset"] = record_offset
date_parser = lambda x: pd.datetime.strptime(x, date_format)
# Build a list of dfs in chunks from the NOMIS API
dfs = []
offset = 25000
icalls = 0
done = False
while (not done) and icalls < max_api_calls:
#logging.debug(f"\t\t {offset}")
# Build the request payload
params = "&".join(f"{k}={v}" for k,v in config.items())
# Hit the API
r = requests.get(NOMIS.format(f"{dataset_id}.data.csv"), params=params)
# Read the data
with StringIO(r.text) as sio:
_df = pd.read_csv(sio, parse_dates=["DATE"], date_parser=date_parser)
done = len(_df) < offset
# Increment the offset
config["RecordOffset"] += offset
# Ignore empty fields
dfs.append(_df.loc[_df.OBS_VALUE > 0])
icalls += 1
# Combine and return
df = | pd.concat(dfs) | pandas.concat |
import time
start=time.time()
from tkinter import *
from tkinter import ttk
from Classes import *
win=Tk()
win_gui=window(win, 'Syncing')
def syncing():
global end
import pandas as pd
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
prog_bar['value']=10
win.update_idletasks()
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'GSpread-2ecbd68261be.json', scope)
gc = gspread.authorize(credentials)
prog_bar['value']=15
win.update_idletasks()
wks = gc.open('Students_Records').sheet1
data = wks.get_all_values()
headers = data.pop(0)
reg_sheets_table = pd.DataFrame(data, columns=headers)
prog_bar['value']=20
win.update_idletasks()
print('\nPrinting registration table (without setting index) from google sheets: \n')
print(reg_sheets_table.head())
print('\nPrinting registration table with names column as its index: \n')
reg_sheets_table['Name'] = reg_sheets_table['Name'].str.title()
reg_sheets_table['School']=reg_sheets_table['School'].str.title()
reg_sheets_table['Remarks']=reg_sheets_table['Remarks'].str.capitalize()
reg_sheets_table['Deposit Pattern'] = reg_sheets_table['Deposit Pattern'].str.title()
reg_sheets_table['Gender']=reg_sheets_table['Gender'].str.title()
reg_sheets_table['Joining Date'] = pd.to_datetime(reg_sheets_table['Joining Date'])
reg_sheets_table['Joining Date'] = reg_sheets_table['Joining Date'].dt.strftime('%d/%m/%Y')
reg_sheets_table['Class'] = reg_sheets_table['Class'].astype('str')
reg_sheets_table['Class'] = reg_sheets_table['Class'].str.title()
reg_sheets_table.set_index('Name', inplace=True)
print(reg_sheets_table.head())
prog_bar['value']=25
win.update_idletasks()
reg_excel_table=pd.read_excel('Students_Records.xlsx')
print('\nPrinting registration excel data (Without setting index):\n')
print(reg_excel_table.head())
reg_excel_table['Name'] = reg_excel_table['Name'].str.title()
reg_excel_table['School']=reg_excel_table['School'].str.title()
reg_excel_table['Remarks']=reg_excel_table['Remarks'].str.capitalize()
reg_excel_table['Deposit Pattern'] = reg_excel_table['Deposit Pattern'].str.title()
reg_excel_table['Gender']=reg_excel_table['Gender'].str.title()
reg_excel_table['Joining Date'] = | pd.to_datetime(reg_excel_table['Joining Date']) | pandas.to_datetime |
from data_science_layer.reporting.abstract_report import AbstractReport
from data_science_layer.pipeline.abstract_pipline import AbstractPipeline
import pkg_resources
import numpy as np
import pandas as pd
from sklearn import metrics
class RegressorReport(AbstractReport):
sub_folder = 'reports'
def report(self, pipeline: AbstractPipeline):
# Set Directory path
folder = ''
path = pkg_resources.resource_filename('crcdal', 'cache/' + folder + '/' + self.sub_folder + '/')
pkg_resources.ensure_directory(path)
model_train_metrics = {}
model_test_metrics = {}
for i, model in enumerate(pipeline.get_models()):
name = model.short_name
preds_y_train, _ = model.predict(pipeline.train)
preds_y_test, _ = model.predict(pipeline.test)
preds_y_train = pd.DataFrame(preds_y_train)
preds_y_test = pd.DataFrame(preds_y_test)
train_y = pd.DataFrame(pipeline.train_y)
test_y = pd.DataFrame(pipeline.test_y)
# Account for multiple y values.
k = 0
for j in range(pipeline.test_y.shape[1]):
model_train_metrics[str(name) + str(j)] = self._stats(train_y.iloc[:, j], preds_y_train.iloc[:, j])
model_test_metrics[str(name) + str(j)] = self._stats(test_y.iloc[:, j], preds_y_test.iloc[:, j])
k += 2
# Feature Metrics
df_model_train_metrics = | pd.DataFrame(model_train_metrics) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = | Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) | pandas.Series |
import unittest
from unittest.mock import patch, Mock
from random import randint, choice
from copy import deepcopy
import numpy as np
import pandas as pd
import simplejson as json
from datetime import datetime
import threading
import queue
import time
import logging
from pyvvo import equipment, utils
from pyvvo.sparql import REG_MEAS_MEAS_MRID_COL, REG_MEAS_REG_MRID_COL,\
CAP_MEAS_MEAS_MRID_COL, CAP_MEAS_CAP_MRID_COL, SWITCH_MEAS_MEAS_MRID_COL,\
SWITCH_MEAS_SWITCH_MRID_COL
import tests.data_files as _df
class EquipmentManagerRegulatorTestCase(unittest.TestCase):
"""Test EquipmentManager with regulator data."""
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
cls.reg_meas = _df.read_pickle(_df.REG_MEAS_9500)
with open(_df.REG_MEAS_MSG_9500, 'r') as f:
cls.reg_meas_msg = json.load(f)
# Do some bad, fragile stuff: loop over all the measurements
# and increment the value by one. This way, we ensure the
# regulator actually changes state (I'm pretty sure the original
# message has the regulators in their original state).
for d in cls.reg_meas_msg:
d['value'] += 1
# Just create a bogus datetime.
cls.sim_dt = datetime(2019, 9, 2, 17, 8)
cls.reg_df = _df.read_pickle(_df.REGULATORS_9500)
# noinspection PyPep8Naming
def setUp(self):
# Gotta be careful with these mutable types... Get fresh
# instances each time. It won't be that slow, I promise.
self.reg_dict = \
equipment.initialize_regulators(self.reg_df)
self.reg_mgr = \
equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=self.reg_meas,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
@staticmethod
def random_update(reg_in, list_in):
"""Helper to randomly update tap steps. Use this with the
loop_helper.
"""
new_step = reg_in.state
while new_step == reg_in.state:
new_step = randint(reg_in.low_step, reg_in.high_step)
reg_in.state = new_step
list_in.append(new_step)
def test_reg_dict_attribute(self):
self.assertIs(self.reg_dict, self.reg_mgr.eq_dict)
def test_missing_meas(self):
"""Ensure we get an exception if missing an input."""
meas = self.reg_meas.copy(deep=True)
meas = meas.drop(index=meas.index[-1])
s = 'The eq_meas input is missing equipment'
with self.assertRaisesRegex(ValueError, s):
_ = \
equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=meas,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_duplicate_meas(self):
"""Ensure we get an exception if inputs are not consistent.
"""
meas = self.reg_meas.copy(deep=True)
# Create a duplicate entry.
meas = meas.append(meas.iloc[0])
s = 'Received 2 measurements for equipment with mrid'
with self.assertRaisesRegex(ValueError, s):
_ = \
equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=meas,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_all_measurements_mapped(self):
"""Ensure all measurements are in the map."""
for meas_mrid in self.reg_meas['pos_meas_mrid'].values:
with self.subTest():
self.assertIn(meas_mrid, self.reg_mgr.meas_eq_map.keys())
def test_no_meas_for_reg(self):
"""Remove measurements for given regulator, ensure we get
proper exception.
"""
meas_view = self.reg_meas[
~(self.reg_meas['tap_changer_mrid']
== self.reg_meas['tap_changer_mrid'][0])
]
with self.assertRaisesRegex(ValueError, 'The eq_meas input is miss'):
_ = \
equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=meas_view,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_bad_reg_dict_type(self):
with self.assertRaisesRegex(TypeError,
'eq_dict must be a dictionary'):
_ = \
equipment.EquipmentManager(
eq_dict=10, eq_meas=self.reg_meas,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_bad_reg_meas_type(self):
with self.assertRaisesRegex(TypeError,
'eq_meas must be a Pandas'):
_ = equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=pd.Series(),
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_no_meas_for_single_phase_reg(self):
meas_view = self.reg_meas.drop(0, axis=0)
with self.assertRaisesRegex(ValueError, 'The eq_meas input is miss'):
_ = \
equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=meas_view,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_two_meas_for_single_phase_reg(self):
reg_meas = self.reg_meas.append(self.reg_meas.iloc[0])
with self.assertRaisesRegex(ValueError, 'Received 2 measurements for'):
_ = equipment.EquipmentManager(
eq_dict=self.reg_dict, eq_meas=reg_meas,
meas_mrid_col=REG_MEAS_MEAS_MRID_COL,
eq_mrid_col=REG_MEAS_REG_MRID_COL
)
def test_update_state_simple(self):
"""Just ensure it runs without error."""
# At the time of writing, the debug line is the last one in the
# function, so ensuring it gets hit is adequate.
with self.assertLogs(logger=self.reg_mgr.log, level='DEBUG'):
self.reg_mgr.update_state(self.reg_meas_msg, sim_dt=self.sim_dt)
def test_update_state_warns_if_not_enough_meas(self):
with self.assertLogs(logger=self.reg_mgr.log, level='WARNING'):
self.reg_mgr.update_state([], sim_dt=self.sim_dt)
def test_update_state_changes_taps(self):
"""Ensure our taps changed appropriately. We'll hard-code
this for simplicity.
"""
self.reg_mgr.update_state(self.reg_meas_msg, sim_dt=self.sim_dt)
# Loop over the message.
for msg_dict in self.reg_meas_msg:
# Grab the MRID.
meas_mrid = msg_dict['measurement_mrid']
meas_value = msg_dict['value']
# Look up the measurement mrid.
row = self.reg_meas[
self.reg_meas[REG_MEAS_MEAS_MRID_COL] == meas_mrid]
self.assertGreater(row.shape[0], 0)
# Grab regulator mrid and phase.
reg_mrid = row[REG_MEAS_REG_MRID_COL].values[0]
# Ensure this regulator got updated.
with self.subTest(meas_mrid=meas_mrid):
# noinspection PyUnboundLocalVariable
self.assertEqual(self.reg_dict[reg_mrid].state, meas_value)
def test_update_state_bad_mrid(self):
reg_meas_msg = deepcopy(self.reg_meas_msg)
reg_meas_msg.append({'measurement_mrid': '1234', 'value': 12})
with self.assertLogs(level='WARNING'):
self.reg_mgr.update_state(reg_meas_msg, sim_dt=self.sim_dt)
def test_update_state_bad_entry_1(self):
reg_meas_msg = deepcopy(self.reg_meas_msg)
reg_meas_msg.append({'measurement_mrId': '1234', 'value': 12})
with self.assertRaisesRegex(KeyError, 'measurement_mrid'):
self.reg_mgr.update_state(reg_meas_msg, sim_dt=self.sim_dt)
def test_update_state_bad_entry_2(self):
reg_meas_msg = deepcopy(self.reg_meas_msg)
reg_meas_msg.append({'measurement_mrid': '1234', 'valu3': 12})
with self.assertRaisesRegex(KeyError, 'value'):
self.reg_mgr.update_state(reg_meas_msg, sim_dt=self.sim_dt)
def test_update_state_bad_type(self):
with self.assertRaisesRegex(TypeError, 'msg must be a list'):
# noinspection PyTypeChecker
self.reg_mgr.update_state(msg='hello there', sim_dt=self.sim_dt)
def test_update_state_bad_type_2(self):
with self.assertRaisesRegex(TypeError, 'string indices must be'):
# noinspection PyTypeChecker
self.reg_mgr.update_state(msg=['hello there'], sim_dt=self.sim_dt)
def test_build_equipment_commands_no_op(self):
"""Ensure that if equipment has the same state, no update
command comes out.
"""
# Just use the same dictionary to make a "do nothing" command.
out = self.reg_mgr.build_equipment_commands(
eq_dict_forward=self.reg_dict)
for v in out.values():
self.assertEqual(0, len(v))
def test_build_equipment_commands(self):
"""One stop big function which probably should be spun off into
its own test case.
NOTE: This test is fragile as it currently relies on how the
looping is performed in build_equipment_commands.
"""
reg_dict_forward = deepcopy(self.reg_dict)
# For some reason the new eq_dict won't pickle?
# reg_dict_forward = \
# equipment.initialize_regulators(
# _df.read_pickle(_df.REGULATORS_9500))
# Initialize list to hold regulator positions.
forward_vals = []
# Randomly update all regulators.
equipment.loop_helper(eq_dict=reg_dict_forward,
func=self.random_update,
list_in=forward_vals)
# Get reverse values.
reverse_vals = []
def get_state(reg_in):
reverse_vals.append(reg_in.state)
# Get reverse values.
equipment.loop_helper(eq_dict=self.reg_dict, func=get_state)
# Command the regulators.
out = self.reg_mgr.build_equipment_commands(
eq_dict_forward=reg_dict_forward)
# Ensure we're getting the fields we need.
self.assertIn('object_ids', out)
self.assertIn('attributes', out)
self.assertIn('forward_values', out)
self.assertIn('reverse_values', out)
# Ensure our forward values match. WARNING: this is quite
# fragile as it depends on looping order.
self.assertListEqual(forward_vals, out['forward_values'])
# Ensure reverse values match (also fragile).
self.assertListEqual(reverse_vals, out['reverse_values'])
# Ensure the given MRID's all correspond to tap changers.
tap_mrids = self.reg_df['tap_changer_mrid']
self.assertTrue(tap_mrids.isin(out['object_ids']).values.all())
# Ensure the lengths are equal to all our single phases.
# I'm just going to hard-code the fact that the 9500 node model
# has 6 3-phase regs.
for v in out.values():
self.assertIsInstance(v, list)
self.assertEqual(len(v), 18)
# All our regulators should have had their expected_state
# updated.
expected_state = []
def get_expected_state(reg_in):
expected_state.append(reg_in.expected_state)
equipment.loop_helper(eq_dict=self.reg_dict, func=get_expected_state)
self.assertListEqual(forward_vals, expected_state)
def test_build_equipment_commands_mismatch(self):
"""Send mismatched reg dicts in."""
reg_dict_forward = deepcopy(self.reg_dict)
reg_dict_forward['blah'] = \
reg_dict_forward.pop(list(reg_dict_forward.keys())[0])
with self.assertRaisesRegex(ValueError, 'not matching up with'):
self.reg_mgr.build_equipment_commands(reg_dict_forward)
def test_build_equipment_commands_json_serializable(self):
"""If any Numpy data types leak in, we've got a problem in that
we can't serialize the data into json. Inject numpy types and
attempt to serialize the result.
"""
# Make a copy of the message
msg = deepcopy(self.reg_meas_msg)
# Change all the data types.
for d in msg:
d['value'] = np.int64(d['value'])
def assert_64(eq):
assert isinstance(eq.state, np.int64)
def assert_32(eq):
assert isinstance(eq.state, np.int32)
# Update the regulators.
# noinspection PyTypeChecker
self.reg_mgr.update_state(msg=msg, sim_dt='high noon')
# Ensure the equipment actually has int64 states.
equipment.loop_helper(self.reg_dict, assert_64)
# Similarly, update all the equipment in the dictionary.
# Helper for casting to int32.
def to_int32(eq):
eq.state = np.int32(eq.state)
# Get a copy of the regulators and randomly change their states.
reg_dict_copy = deepcopy(self.reg_dict)
equipment.loop_helper(eq_dict=reg_dict_copy, func=self.random_update,
list_in=[])
# Now cast the now randomized states to int32.
equipment.loop_helper(eq_dict=reg_dict_copy, func=to_int32)
equipment.loop_helper(reg_dict_copy, assert_32)
# Build the equipment commands. This returns a dictionary.
cmd = self.reg_mgr.build_equipment_commands(reg_dict_copy)
# Ensure we actually get commands out, otherwise we're giving
# ourselves false confidence in this test.
for v in cmd.values():
self.assertGreater(len(v), 0)
# Attempt to serialize cmd.
j_str = json.dumps(cmd)
# Put in an assert just for good measure.
self.assertIsInstance(j_str, str)
def test_lookup_locked(self):
"""Ensure lookup_eq_by_mrid_and_phase uses the lock."""
with patch.object(self.reg_mgr, '_lock') as p_lock:
# Unfortunately we cannot patch the method here, because
# that also patches the wrapping. So, call the method.
with self.assertRaises(KeyError):
self.reg_mgr.lookup_eq_by_mrid_and_phase('abc')
# Ensure that acquire and release were called.
self.assertEqual('acquire', p_lock.method_calls[0][0])
self.assertEqual('release', p_lock.method_calls[1][0])
def test_update_state_locked(self):
"""Ensure update_state uses the lock."""
with patch.object(self.reg_mgr, '_lock') as p_lock:
# Unfortunately we cannot patch the method here, because
# that also patches the wrapping. So, call the method.
with self.assertRaisesRegex(TypeError, 'msg must be a list'):
# noinspection PyTypeChecker
self.reg_mgr.update_state('abc', 'def')
# Ensure that acquire and release were called.
self.assertEqual('acquire', p_lock.method_calls[0][0])
self.assertEqual('release', p_lock.method_calls[1][0])
def test_build_equipment_commands_locked(self):
"""Ensure build_equipment_commands uses the lock."""
with patch.object(self.reg_mgr, '_lock') as p_lock:
# Unfortunately we cannot patch the method here, because
# that also patches the wrapping. So, call the method.
with self.assertRaises(AttributeError):
self.reg_mgr.build_equipment_commands('abc')
# Ensure that acquire and release were called.
self.assertEqual('acquire', p_lock.method_calls[0][0])
self.assertEqual('release', p_lock.method_calls[1][0])
def test_expected_not_equal_to_actual(self):
"""Test helper function _expected_not_equal_to_actual."""
eq = Mock(spec=equipment.RegulatorSinglePhase)
eq.expected_state = 7
eq.state = None
# Ensure with a state of None we get a False return.
self.assertFalse(equipment._expected_not_equal_to_actual(eq))
# Flop 'em, and should still get None.
eq.expected_state = None
eq.state = 5
self.assertFalse(equipment._expected_not_equal_to_actual(eq))
# With different settings, we should get True.
eq.expected_state = 6
self.assertTrue(equipment._expected_not_equal_to_actual(eq))
# With the same settings, we should get False.
eq.state = 6
self.assertFalse(equipment._expected_not_equal_to_actual(eq))
def test_wait_and_get_delta(self):
"""Test _wait_and_get_delta."""
# Create times which are 60 seconds apart.
old_t = datetime(2019, 11, 4, 9, 0)
self.reg_mgr.last_time = datetime(2019, 11, 4, 9, 1)
# We should get a timeout error if the event isn't toggled.
with self.assertRaisesRegex(TimeoutError, 'The update_state method '):
self.reg_mgr._wait_and_get_delta(old_t=old_t, timeout=0.01)
# Now, spin up a thread to get the time delta.
def get_delta(mgr, dt, q):
delta = mgr._wait_and_get_delta(old_t=dt, timeout=1)
q.put(delta)
mq = queue.Queue()
t = threading.Thread(target=get_delta,
args=(self.reg_mgr, old_t, mq))
t.start()
self.reg_mgr._toggle_update_state_event()
delta_out = mq.get(timeout=1)
# Hard code 60 second difference.
self.assertEqual(delta_out, 60)
def test_verify_command(self):
"""Test the verify_command method."""
# We should get a ValueError if last_time is not set (which at
# this point, it shouldn't be).
with self.assertRaisesRegex(ValueError, 'verify_command has been cal'):
self.reg_mgr.verify_command(wait_duration=0.1, timeout=0.1)
# Grab the first piece of equipment and put into a dictionary.
mrid = list(self.reg_mgr.eq_dict.keys())[0]
eq_or_dict = self.reg_mgr.eq_dict[mrid]
if isinstance(eq_or_dict, dict):
phase = list(self.reg_mgr.eq_dict[mrid].keys())[0]
eq = self.reg_mgr.eq_dict[mrid][phase]
single_eq_dict = {mrid: {phase: eq}}
else:
eq = eq_or_dict
single_eq_dict = {mrid: eq_or_dict}
# Set the last_time, and get a time 60 seconds later.
dt = datetime(2019, 11, 4, 9, 15)
dt2 = datetime(2019, 11, 4, 9, 16)
self.reg_mgr.last_time = dt
# We should get a timeout error if the update_state_event never
# gets toggled.
with self.assertRaisesRegex(TimeoutError, 'The update_state method'):
self.reg_mgr.verify_command(wait_duration=0.01, timeout=0.01)
# If no equipment has a mismatch between their expected_state
# and their state (excluding Nones), we should get a None
# return.
mq = queue.Queue()
def put_result_in_queue(mgr, q, wait_duration, timeout):
result = mgr.verify_command(wait_duration=wait_duration,
timeout=timeout)
q.put(result)
t = threading.Thread(target=put_result_in_queue,
args=(self.reg_mgr, mq, 60, 1))
t.start()
# Wait a tiny bit for the thread to kick off properly.
time.sleep(0.05)
# Update the last_time and toggle the event to simulate a
# message coming in.
self.reg_mgr.last_time = dt2
self.reg_mgr._toggle_update_state_event()
# Grab element out of the queue.
output = mq.get(timeout=1)
# No equipment has a mismatch between expected_state and state,
# so this result should be None.
self.assertIsNone(output)
# Reset the time.
self.reg_mgr.last_time = dt
# Tweak the expected_state for our equipment to ensure it's
# different from the state.
eq.expected_state = eq.state + 1
# Get a time which is thirty seconds after the first. Hard
# coding for the win.
dt3 = datetime(2019, 11, 4, 9, 15, 30)
# Fire up another thread to run verify_command again.
mq = queue.Queue()
t = threading.Thread(target=put_result_in_queue,
args=(self.reg_mgr, mq, 60, 1))
t.start()
time.sleep(0.05)
# Update time and toggle event. However, note this time is
# less than the wait_duration.
self.reg_mgr.last_time = dt3
self.reg_mgr._toggle_update_state_event()
# We shouldn't have gotten a return value yet.
with self.assertRaises(queue.Empty):
mq.get(timeout=0.1)
# Update time and toggle event, but this time we should get a
# return.
time.sleep(0.05)
self.reg_mgr.last_time = dt2
self.reg_mgr._toggle_update_state_event()
# Extract output.
actual_dict = mq.get(timeout=1)
# Output should match our single_eq_dict.
self.assertDictEqual(single_eq_dict, actual_dict)
# The equipment should be inoperable.
def is_inoperable(eq_in):
self.assertFalse(eq_in.operable)
equipment.loop_helper(eq_dict=actual_dict, func=is_inoperable)
def test_eq_count(self):
"""Ensure our eq_count matches the number of equipment."""
c = 0
def count(eq):
nonlocal c
c += 1
equipment.loop_helper(eq_dict=self.reg_mgr.eq_dict,
func=count)
self.assertEqual(c, self.reg_mgr.eq_count)
def test_update_equipment_log_level(self):
"""Test update_equipment_log_level."""
self.reg_mgr.update_equipment_log_level(level='ERROR')
call_count = 0
def _check_level(eq):
nonlocal call_count
call_count += 1
self.assertEqual(eq.log.getEffectiveLevel(), logging.ERROR)
equipment.loop_helper(eq_dict=self.reg_mgr.eq_dict, func=_check_level)
self.assertEqual(call_count, self.reg_mgr.eq_count)
class EquipmentManagerCapacitorTestCase(unittest.TestCase):
"""Test EquipmentManager with capacitor data."""
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
cls.cap_meas = _df.read_pickle(_df.CAP_MEAS_9500)
with open(_df.CAP_MEAS_MSG_9500, 'r') as f:
cls.cap_meas_msg = json.load(f)
# Just create a bogus datetime.
cls.sim_dt = datetime(2019, 9, 2, 17, 8)
# noinspection PyPep8Naming
def setUp(self):
# Gotta be careful with these mutable types... Get fresh
# instances each time. It won't be that slow, I promise.
self.cap_dict = \
equipment.initialize_capacitors(
_df.read_pickle(_df.CAPACITORS_9500))
self.cap_mgr = \
equipment.EquipmentManager(
eq_dict=self.cap_dict, eq_meas=self.cap_meas,
meas_mrid_col=CAP_MEAS_MEAS_MRID_COL,
eq_mrid_col=CAP_MEAS_CAP_MRID_COL
)
def test_cap_dict_attribute(self):
self.assertIs(self.cap_dict, self.cap_mgr.eq_dict)
def test_inconsistent_inputs(self):
"""Ensure we get an exception if inputs are not consistent.
"""
meas = self.cap_meas.copy(deep=True)
# Create a duplicate entry.
meas = meas.append(meas.iloc[0])
s = 'The number of measurements for equipment with mrid'
with self.assertRaisesRegex(ValueError, s):
_ = \
equipment.EquipmentManager(
eq_dict=self.cap_dict, eq_meas=meas,
meas_mrid_col=CAP_MEAS_MEAS_MRID_COL,
eq_mrid_col=CAP_MEAS_CAP_MRID_COL
)
def test_all_measurements_mapped(self):
"""Ensure all measurements are in the map."""
for meas_mrid in self.cap_meas[CAP_MEAS_MEAS_MRID_COL].values:
with self.subTest():
self.assertIn(meas_mrid, self.cap_mgr.meas_eq_map.keys())
def test_no_meas_for_cap(self):
"""Remove measurements for given capacitor, ensure we get
proper exception.
"""
meas_view = self.cap_meas[
~(self.cap_meas[CAP_MEAS_CAP_MRID_COL]
== self.cap_meas[CAP_MEAS_CAP_MRID_COL].iloc[-1])
]
with self.assertRaisesRegex(ValueError, 'The eq_meas input is miss'):
_ = \
equipment.EquipmentManager(
eq_dict=self.cap_dict, eq_meas=meas_view,
meas_mrid_col=CAP_MEAS_MEAS_MRID_COL,
eq_mrid_col=CAP_MEAS_CAP_MRID_COL
)
def test_bad_cap_dict_type(self):
with self.assertRaisesRegex(TypeError,
'eq_dict must be a dictionary'):
_ = \
equipment.EquipmentManager(
eq_dict=10, eq_meas=self.cap_meas,
meas_mrid_col=CAP_MEAS_MEAS_MRID_COL,
eq_mrid_col=CAP_MEAS_CAP_MRID_COL
)
def test_bad_cap_meas_type(self):
with self.assertRaisesRegex(TypeError,
'eq_meas must be a Pandas'):
_ = equipment.EquipmentManager(
eq_dict=self.cap_dict, eq_meas=pd.Series(),
meas_mrid_col=CAP_MEAS_MEAS_MRID_COL,
eq_mrid_col=CAP_MEAS_CAP_MRID_COL
)
def test_update_state_simple(self):
"""Just ensure it runs without error."""
# At the time of writing, the debug line is the last one in the
# function, so ensuring it gets hit is adequate.
with self.assertLogs(level='DEBUG'):
self.cap_mgr.update_state(self.cap_meas_msg, sim_dt=self.sim_dt)
def test_update_state_changes_state(self):
"""Ensure our states changed appropriately. We'll hard-code
this for simplicity.
"""
self.cap_mgr.update_state(self.cap_meas_msg, sim_dt=self.sim_dt)
# Loop over the message.
for msg_dict in self.cap_meas_msg:
# Grab the MRID.
meas_mrid = msg_dict['measurement_mrid']
meas_value = msg_dict['value']
# Look up the measurement mrid.
row = self.cap_meas[
self.cap_meas[CAP_MEAS_MEAS_MRID_COL] == meas_mrid]
self.assertGreater(row.shape[0], 0)
# Grab capacitor mrid and phase.
cap_mrid = row[CAP_MEAS_CAP_MRID_COL].values[0]
cap_phase = row['phase'].values[0]
# Ensure this capacitor got updated.
with self.subTest(meas_mrid=meas_mrid):
# Lookup the object.
eq = self.cap_mgr.lookup_eq_by_mrid_and_phase(mrid=cap_mrid,
phase=cap_phase)
# noinspection PyUnboundLocalVariable
self.assertEqual(eq.state, meas_value)
def test_update_state_bad_mrid(self):
cap_meas_msg = deepcopy(self.cap_meas_msg)
cap_meas_msg.append({'measurement_mrid': '1234', 'value': 12})
with self.assertLogs(level='WARNING'):
self.cap_mgr.update_state(cap_meas_msg, sim_dt=self.sim_dt)
def test_update_state_bad_entry_1(self):
cap_meas_msg = deepcopy(self.cap_meas_msg)
cap_meas_msg.append({'measurement_mrId': '1234', 'value': 12})
with self.assertRaisesRegex(KeyError, 'measurement_mrid'):
self.cap_mgr.update_state(cap_meas_msg, sim_dt=self.sim_dt)
def test_update_state_bad_entry_2(self):
cap_meas_msg = deepcopy(self.cap_meas_msg)
cap_meas_msg.append({'measurement_mrid': '1234', 'valu3': 12})
with self.assertRaisesRegex(KeyError, 'value'):
self.cap_mgr.update_state(cap_meas_msg, sim_dt=self.sim_dt)
def test_update_state_bad_type(self):
with self.assertRaisesRegex(TypeError, 'msg must be a list'):
# noinspection PyTypeChecker
self.cap_mgr.update_state(msg='hello there', sim_dt=self.sim_dt)
def test_update_state_bad_type_2(self):
with self.assertRaisesRegex(TypeError, 'string indices must'):
# noinspection PyTypeChecker
self.cap_mgr.update_state(msg=['hello there'], sim_dt=self.sim_dt)
def test_build_equipment_commands(self):
"""One stop big function which probably should be spun off into
its own test case.
NOTE: This test is fragile as it currently relies on how the
looping is performed in build_equipment_commands.
"""
cap_dict_forward = deepcopy(self.cap_mgr.eq_dict)
forward_vals = []
def update_state(cap):
"""Nested helper function."""
if cap.controllable:
new_state = choice(equipment.CapacitorSinglePhase.STATES)
cap.state = new_state
forward_vals.append(new_state)
# Randomly update steps.
equipment.loop_helper(eq_dict=cap_dict_forward, func=update_state)
# Grab reverse values.
reverse_vals = []
def get_state(cap):
if cap.controllable:
reverse_vals.append(cap.state)
equipment.loop_helper(eq_dict=self.cap_mgr.eq_dict, func=get_state)
# Build equipment commands..
out = self.cap_mgr.build_equipment_commands(
eq_dict_forward=cap_dict_forward)
# Ensure we're getting the fields we need.
self.assertIn('object_ids', out)
self.assertIn('attributes', out)
self.assertIn('forward_values', out)
self.assertIn('reverse_values', out)
# Ensure our forward values match. WARNING: this is quite
# fragile as it depends on looping order.
self.assertListEqual(forward_vals, out['forward_values'])
# Ensure reverse values match (also fragile).
self.assertListEqual(reverse_vals, out['reverse_values'])
# Ensure the lengths are equal to all our controllable
# capacitors. Hard-code the fact there are 9.
for v in out.values():
self.assertIsInstance(v, list)
self.assertEqual(len(v), 9)
# Ensure our expected_state matches the state of our forward
# items.
expected_state = []
def get_expected_state(eq):
if eq.controllable:
expected_state.append(eq.expected_state)
equipment.loop_helper(eq_dict=self.cap_mgr.eq_dict,
func=get_expected_state)
self.assertListEqual(expected_state, forward_vals)
def test_build_equipment_commands_mismatch(self):
"""Send mismatched cap dicts in."""
cap = deepcopy(self.cap_dict)
cap['blah'] = \
cap.pop(list(cap.keys())[0])
with self.assertRaisesRegex(ValueError, 'not matching up with'):
self.cap_mgr.build_equipment_commands(cap)
class EquipmentManagerSwitchTestCase(unittest.TestCase):
"""Test EquipmentManager with switch data. Since the "Regulator"
and "Capacitor" versions of this test go pretty in-depth, we'll
keep this one light and simple.
"""
@classmethod
def setUpClass(cls):
cls.switch_meas = _df.read_pickle(_df.SWITCH_MEAS_9500)
with open(_df.SWITCH_MEAS_MSG_9500, 'r') as f:
cls.switch_meas_msg = json.load(f)
# Just create a bogus datetime.
cls.sim_dt = datetime(2019, 9, 2, 17, 8)
# noinspection PyPep8Naming
def setUp(self):
# Gotta be careful with these mutable types... Get fresh
# instances each time. It won't be that slow, I promise.
self.switch_dict = \
equipment.initialize_switches(
_df.read_pickle(_df.SWITCHES_9500))
self.switch_mgr = \
equipment.EquipmentManager(
eq_dict=self.switch_dict, eq_meas=self.switch_meas,
meas_mrid_col=SWITCH_MEAS_MEAS_MRID_COL,
eq_mrid_col=SWITCH_MEAS_SWITCH_MRID_COL
)
def state_none(self, switch):
"""Helper to ensure a switch state is None."""
self.assertIsNone(switch.state)
def state_valid(self, switch):
"""Helper to ensure a switch state is valid."""
self.assertIn(switch.state, equipment.SwitchSinglePhase.STATES)
def test_update(self):
"""Send in an update message and ensure that state changed and
callbacks are called.
"""
# Add a callback.
m = Mock()
self.switch_mgr.add_callback(m)
# Start by ensuring all switches start with a status of None.
equipment.loop_helper(eq_dict=self.switch_mgr.eq_dict,
func=self.state_none)
# The update_state_event should not be set.
self.assertFalse(self.switch_mgr.update_state_event.is_set())
# Before receiving an update message, last_time should be None.
self.assertIsNone(self.switch_mgr.last_time)
# Start up a thread which will flip a variable when the
# update_state_event is set.
event_queue = queue.Queue()
def toggle_state_event_set(q):
result = self.switch_mgr.update_state_event.wait(timeout=0.5)
if result:
q.put(True)
else:
q.put(False)
t = threading.Thread(target=toggle_state_event_set,
args=(event_queue,))
t.start()
# Now that we've ensure all switches start with None status,
# update them all.
self.switch_mgr.update_state(self.switch_meas_msg, sim_dt=self.sim_dt)
# Ensure the last_time attribute has been updated.
self.assertEqual(self.switch_mgr.last_time, self.sim_dt)
# Loop again and ensure the states are now not None and are
# valid.
equipment.loop_helper(eq_dict=self.switch_mgr.eq_dict,
func=self.state_valid)
# The callback should have been called.
m.assert_called_once()
m.assert_called_with(self.sim_dt)
# Ensure our state_event_set got toggled.
self.assertTrue(event_queue.get(timeout=0.5))
def test_add_and_call_callbacks(self):
"""Test add_callback and _call_callbacks."""
# Ensure callbacks start empty.
self.assertEqual(len(self.switch_mgr._callbacks), 0)
# Add a callback.
m = Mock()
self.switch_mgr.add_callback(m)
# Callbacks should have a length of 1.
self.assertEqual(len(self.switch_mgr._callbacks), 1)
# Call the callbacks.
self.switch_mgr._call_callbacks('bananas')
# Our mock should have been called once.
m.assert_called_once()
m.assert_called_with('bananas')
# Add another callback.
m2 = Mock()
self.switch_mgr.add_callback(m2)
self.assertEqual(len(self.switch_mgr._callbacks), 2)
self.switch_mgr._call_callbacks('oranges')
self.assertEqual(m.call_count, 2)
m2.assert_called_once()
m2.assert_called_with('oranges')
def test_callback_not_called(self):
"""Ensure that for no state changes, callbacks are not called.
"""
# Update the states.
self.switch_mgr.update_state(self.switch_meas_msg, sim_dt=self.sim_dt)
# Add a callback.
m = Mock()
self.switch_mgr.add_callback(m)
# Update the states again with the same message. So, no states
# should change.
self.switch_mgr.update_state(self.switch_meas_msg, sim_dt=self.sim_dt)
# The callback should not have been called.
self.assertEqual(0, m.call_count)
def test_callback_dies(self):
"""Ensure our callbacks are held as weak references that die
when the method reference is deleted.
"""
m = Mock()
self.switch_mgr.add_callback(m)
self.assertEqual(len(self.switch_mgr._callbacks), 1)
# Delete the object and force garbage collection.
del m
import gc
gc.collect()
self.assertEqual(len(self.switch_mgr._callbacks), 0)
class EquipmentManagerBuildEquipmentCommandsInvertTestCase(unittest.TestCase):
"""Ensure build_equipment_commands acts appropriately depending on
the equipment's INVERT_STATES_FOR_COMMANDS attribute.
"""
def helper(self, invert):
"""Create equipment manager and equipment dictionaries."""
# Create dictionary with a single piece of equipment.
eq_dict = {
'mrid1': equipment.SwitchSinglePhase(
name='switch1', mrid='mrid1', phase='A', controllable=True,
state=1)
}
# Create DataFrame with measurement information.
eq_meas = | pd.DataFrame([['mrid1', 'meas1']], columns=['eq', 'meas']) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
def feature_position(hdim1_indices,hdim2_indeces,region,track_data,threshold_i,position_threshold, target):
'''
function to determine feature position
Input:
hdim1_indices: list
hdim2_indeces: list
region: list
list of 2-element tuples
track_data: numpy.ndarray
2D numpy array containing the data
threshold_i: float
position_threshold: str
target: str
Output:
hdim1_index: float
feature position along 1st horizontal dimension
hdim2_index: float
feature position along 2nd horizontal dimension
'''
if position_threshold=='center':
# get position as geometrical centre of identified region:
hdim1_index=np.mean(hdim1_indices)
hdim2_index=np.mean(hdim2_indeces)
elif position_threshold=='extreme':
#get position as max/min position inside the identified region:
if target == 'maximum':
index=np.argmax(track_data[region])
hdim1_index=hdim1_indices[index]
hdim2_index=hdim2_indeces[index]
if target == 'minimum':
index=np.argmin(track_data[region])
hdim1_index=hdim1_indices[index]
hdim2_index=hdim2_indeces[index]
elif position_threshold=='weighted_diff':
# get position as centre of identified region, weighted by difference from the threshold:
weights=abs(track_data[region]-threshold_i)
if sum(weights)==0:
weights=None
hdim1_index=np.average(hdim1_indices,weights=weights)
hdim2_index=np.average(hdim2_indeces,weights=weights)
elif position_threshold=='weighted_abs':
# get position as centre of identified region, weighted by absolute values if the field:
weights=abs(track_data[region])
if sum(weights)==0:
weights=None
hdim1_index=np.average(hdim1_indices,weights=weights)
hdim2_index=np.average(hdim2_indeces,weights=weights)
else:
raise ValueError('position_threshold must be center,extreme,weighted_diff or weighted_abs')
return hdim1_index,hdim2_index
def test_overlap(region_inner,region_outer):
'''
function to test for overlap between two regions (probably scope for further speedup here)
Input:
region_1: list
list of 2-element tuples defining the indeces of all cell in the region
region_2: list
list of 2-element tuples defining the indeces of all cell in the region
Output:
overlap: bool
True if there are any shared points between the two regions
'''
overlap=frozenset(region_outer).isdisjoint(region_inner)
return not overlap
def remove_parents(features_thresholds,regions_i,regions_old):
'''
function to remove features whose regions surround newly detected feature regions
Input:
features_thresholds: pandas.DataFrame
Dataframe containing detected features
regions_i: dict
dictionary containing the regions above/below threshold for the newly detected feature (feature ids as keys)
regions_old: dict
dictionary containing the regions above/below threshold from previous threshold (feature ids as keys)
Output:
features_thresholds pandas.DataFrame
Dataframe containing detected features excluding those that are superseded by newly detected ones
'''
list_remove=[]
for idx_i,region_i in regions_i.items():
for idx_old,region_old in regions_old.items():
if test_overlap(regions_old[idx_old],regions_i[idx_i]):
list_remove.append(idx_old)
list_remove=list(set(list_remove))
# remove parent regions:
if features_thresholds is not None:
features_thresholds=features_thresholds[~features_thresholds['idx'].isin(list_remove)]
return features_thresholds
def feature_detection_threshold(data_i,i_time,
threshold=None,
min_num=0,
target='maximum',
position_threshold='center',
sigma_threshold=0.5,
n_erosion_threshold=0,
n_min_threshold=0,
min_distance=0,
idx_start=0):
'''
function to find features based on individual threshold value:
Input:
data_i: iris.cube.Cube
2D field to perform the feature detection (single timestep)
i_time: int
number of the current timestep
threshold: float
threshold value used to select target regions to track
target: str ('minimum' or 'maximum')
flag to determine if tracking is targetting minima or maxima in the data
position_threshold: str('extreme', 'weighted_diff', 'weighted_abs' or 'center')
flag choosing method used for the position of the tracked feature
sigma_threshold: float
standard deviation for intial filtering step
n_erosion_threshold: int
number of pixel by which to erode the identified features
n_min_threshold: int
minimum number of identified features
min_distance: float
minimum distance between detected features (m)
idx_start: int
feature id to start with
Output:
features_threshold: pandas DataFrame
detected features for individual threshold
regions: dict
dictionary containing the regions above/below threshold used for each feature (feature ids as keys)
'''
from skimage.measure import label
from skimage.morphology import binary_erosion
# if looking for minima, set values above threshold to 0 and scale by data minimum:
if target == 'maximum':
mask=1*(data_i >= threshold)
# if looking for minima, set values above threshold to 0 and scale by data minimum:
elif target == 'minimum':
mask=1*(data_i <= threshold)
# only include values greater than threshold
# erode selected regions by n pixels
if n_erosion_threshold>0:
selem=np.ones((n_erosion_threshold,n_erosion_threshold))
mask=binary_erosion(mask,selem).astype(np.int64)
# detect individual regions, label and count the number of pixels included:
labels = label(mask, background=0)
values, count = np.unique(labels[:,:].ravel(), return_counts=True)
values_counts=dict(zip(values, count))
# Filter out regions that have less pixels than n_min_threshold
values_counts={k:v for k, v in values_counts.items() if v>n_min_threshold}
#check if not entire domain filled as one feature
if 0 in values_counts:
#Remove background counts:
values_counts.pop(0)
#create empty list to store individual features for this threshold
list_features_threshold=[]
#create empty dict to store regions for individual features for this threshold
regions=dict()
#create emptry list of features to remove from parent threshold value
#loop over individual regions:
for cur_idx,count in values_counts.items():
region=labels[:,:] == cur_idx
[hdim1_indices,hdim2_indeces]= np.nonzero(region)
#write region for individual threshold and feature to dict
region_i=list(zip(hdim1_indices,hdim2_indeces))
regions[cur_idx+idx_start]=region_i
# Determine feature position for region by one of the following methods:
hdim1_index,hdim2_index=feature_position(hdim1_indices,hdim2_indeces,region,data_i,threshold,position_threshold,target)
#create individual DataFrame row in tracky format for identified feature
list_features_threshold.append({'frame': int(i_time),
'idx':cur_idx+idx_start,
'hdim_1': hdim1_index,
'hdim_2':hdim2_index,
'num':count,
'threshold_value':threshold})
features_threshold=pd.DataFrame(list_features_threshold)
else:
features_threshold=pd.DataFrame()
regions=dict()
return features_threshold, regions
def feature_detection_multithreshold_timestep(data_i,i_time,
threshold=None,
min_num=0,
target='maximum',
position_threshold='center',
sigma_threshold=0.5,
n_erosion_threshold=0,
n_min_threshold=0,
min_distance=0,
feature_number_start=1
):
'''
function to find features in each timestep based on iteratively finding regions above/below a set of thresholds
Input:
data_i: iris.cube.Cube
2D field to perform the feature detection (single timestep)
i_time: int
number of the current timestep
threshold: list of floats
threshold values used to select target regions to track
dxy: float
grid spacing of the input data (m)
target: str ('minimum' or 'maximum')
flag to determine if tracking is targetting minima or maxima in the data
position_threshold: str('extreme', 'weighted_diff', 'weighted_abs' or 'center')
flag choosing method used for the position of the tracked feature
sigma_threshold: float
standard deviation for intial filtering step
n_erosion_threshold: int
number of pixel by which to erode the identified features
n_min_threshold: int
minimum number of identified features
min_distance: float
minimum distance between detected features (m)
feature_number_start: int
feature number to start with
Output:
features_threshold: pandas DataFrame
detected features for individual timestep
'''
from scipy.ndimage.filters import gaussian_filter
track_data = data_i.core_data()
track_data=gaussian_filter(track_data, sigma=sigma_threshold) #smooth data slightly to create rounded, continuous field
# create empty lists to store regions and features for individual timestep
features_thresholds=pd.DataFrame()
for i_threshold,threshold_i in enumerate(threshold):
if (i_threshold>0 and not features_thresholds.empty):
idx_start=features_thresholds['idx'].max()+1
else:
idx_start=0
features_threshold_i,regions_i=feature_detection_threshold(track_data,i_time,
threshold=threshold_i,
sigma_threshold=sigma_threshold,
min_num=min_num,
target=target,
position_threshold=position_threshold,
n_erosion_threshold=n_erosion_threshold,
n_min_threshold=n_min_threshold,
min_distance=min_distance,
idx_start=idx_start
)
if any([x is not None for x in features_threshold_i]):
features_thresholds=features_thresholds.append(features_threshold_i)
# For multiple threshold, and features found both in the current and previous step, remove "parent" features from Dataframe
if (i_threshold>0 and not features_thresholds.empty and regions_old):
# for each threshold value: check if newly found features are surrounded by feature based on less restrictive threshold
features_thresholds=remove_parents(features_thresholds,regions_i,regions_old)
regions_old=regions_i
logging.debug('Finished feature detection for threshold '+str(i_threshold) + ' : ' + str(threshold_i) )
return features_thresholds
def feature_detection_multithreshold(field_in,
dxy,
threshold=None,
min_num=0,
target='maximum',
position_threshold='center',
sigma_threshold=0.5,
n_erosion_threshold=0,
n_min_threshold=0,
min_distance=0,
feature_number_start=1
):
''' Function to perform feature detection based on contiguous regions above/below a threshold
Input:
field_in: iris.cube.Cube
2D field to perform the tracking on (needs to have coordinate 'time' along one of its dimensions)
thresholds: list of floats
threshold values used to select target regions to track
dxy: float
grid spacing of the input data (m)
target: str ('minimum' or 'maximum')
flag to determine if tracking is targetting minima or maxima in the data
position_threshold: str('extreme', 'weighted_diff', 'weighted_abs' or 'center')
flag choosing method used for the position of the tracked feature
sigma_threshold: float
standard deviation for intial filtering step
n_erosion_threshold: int
number of pixel by which to erode the identified features
n_min_threshold: int
minimum number of identified features
min_distance: float
minimum distance between detected features (m)
Output:
features: pandas DataFrame
detected features
'''
from .utils import add_coordinates
logging.debug('start feature detection based on thresholds')
# create empty list to store features for all timesteps
list_features_timesteps=[]
# loop over timesteps for feature identification:
data_time=field_in.slices_over('time')
# if single threshold is put in as a single value, turn it into a list
if type(threshold) in [int,float]:
threshold=[threshold]
for i_time,data_i in enumerate(data_time):
time_i=data_i.coord('time').units.num2date(data_i.coord('time').points[0])
features_thresholds=feature_detection_multithreshold_timestep(data_i,i_time,
threshold=threshold,
sigma_threshold=sigma_threshold,
min_num=min_num,
target=target,
position_threshold=position_threshold,
n_erosion_threshold=n_erosion_threshold,
n_min_threshold=n_min_threshold,
min_distance=min_distance,
feature_number_start=feature_number_start
)
#check if list of features is not empty, then merge features from different threshold values
#into one DataFrame and append to list for individual timesteps:
if not features_thresholds.empty:
#Loop over DataFrame to remove features that are closer than distance_min to each other:
if (min_distance > 0):
features_thresholds=filter_min_distance(features_thresholds,dxy,min_distance)
list_features_timesteps.append(features_thresholds)
logging.debug('Finished feature detection for ' + time_i.strftime('%Y-%m-%d_%H:%M:%S'))
logging.debug('feature detection: merging DataFrames')
# Check if features are detected and then concatenate features from different timesteps into one pandas DataFrame
# If no features are detected raise error
if any([not x.empty for x in list_features_timesteps]):
features= | pd.concat(list_features_timesteps, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
from numpy import mean
from numpy import std
from numpy import NaN
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
import math
from xgboost import XGBRFRegressor
import xgboost as xgb
print(xgb.__version__)
# https://www.kaggle.com/shreyagopal/suicide-rate-prediction-with-machine-learning
#from sklearn.linear_model import LinearRegression
dat = "C:/Users/LIUM3478/OneDrive Corp/OneDrive - Atkins Ltd/Work_Atkins/Docker/hjulanalys/wheel_prediction_data.csv"
df = pd.read_csv(dat, encoding = 'ISO 8859-1', sep = ";", decimal=",")
df.head()
df.groupby(['Littera','VehicleOperatorName']).size().reset_index().rename(columns={0:'count'})
y = df[['km_till_OMS']].values
X = df[["LeftWheelDiameter", "Littera", "VehicleOperatorName",
"TotalPerformanceSnapshot", "maxTotalPerformanceSnapshot"]]
# X["Littera_Operator"] = X.Littera + " " + X.VehicleOperatorName
# X.drop(["Littera", "VehicleOperatorName"], axis = 1, inplace=True)
def feature_generator (data, train = False):
features_data = data
# Create dummy variables with prefix 'Littera'
features_data = pd.concat([features_data,
pd.get_dummies(features_data['Littera'], prefix = 'L')],
axis=1)
# VehicleOperatorName dummy
features_data = pd.concat([features_data,
pd.get_dummies(features_data['VehicleOperatorName'],
prefix = 'V')], axis=1)
# delete variables we are not going to use anymore
del features_data['VehicleOperatorName']
del features_data['Littera']
return features_data
# Generate features from training dataset
X = feature_generator(X)
# correlation of X
plt.figure(figsize=(12,10))
cor = X.corr()
sns.heatmap(cor)
# Training and Testing Sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1234)
# set model
model = XGBRegressor()
# set GridSearchCV parameters
model = GridSearchCV(xgb_model, optimization_dict,
scoring='accuracy', verbose = 1, n_jobs = -1, cv = 5)
# use training data
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_pred = | pd.DataFrame(y_pred) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 11:06:22 2021
@author: madeline
"""
'''
This script converts VCF files that have been annotated by snpEFF into GVF files, including the functional annotation.
Note that the strain is obtained by parsing the file name, expected to contain the substring "/strainnamehere_ids".
Required user input is either a single VCF file or a directory containing VCF files.
Eg:
python vcf2gvf.py --vcfdir ./22_07_2021/
To also output tsvs of the unmatched mutation names:
python vcf2gvf.py --vcfdir ./22_07_2021/ --names
'''
import argparse
import pandas as pd
import re
import glob
import os
import numpy as np
from cyvcf2 import VCF, Writer
def parse_args():
parser = argparse.ArgumentParser(
description='Converts snpEFF-annotated VCF files to GVF files with functional annotation')
#make --file or --directory options mutually exclusive
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--vcfdir', type=str, default=None,
help='Path to folder containing snpEFF-annotated VCF files')
group.add_argument('--vcffile', type=str, default=None,
help='Path to a snpEFF-annotated VCF file')
#filepath can be absolute (~/Desktop/test/22_07_2021/) or relative (./22_07_2021/)
parser.add_argument('--pokay', type=str, default='functional_annotation_V.0.2.tsv',
help='Anoosha\'s parsed pokay .tsv file')
parser.add_argument('--clades', type=str, default='clade_defining_mutations.tsv',
help='.tsv of clade-defining mutations')
parser.add_argument('--outdir', type=str, default='./gvf_files/',
help='Output directory for finished GVF files: folder will be created if it doesn\'t already exist')
parser.add_argument("--names", help="Save unmatched mutation names to .tsvs for troubleshooting naming formats", action="store_true")
return parser.parse_args()
gvf_columns = ['#seqid','#source','#type','#start','#end','#score','#strand','#phase','#attributes']
vcf_colnames = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'unknown']
def vcftogvf(var_data, strain):
df = pd.read_csv(var_data, sep='\t', names=vcf_colnames)
df = df[~df['#CHROM'].str.contains("#")] #remove pragmas
df = df.reset_index(drop=True) #restart index from 0
new_df = pd.DataFrame(index=range(0,len(df)),columns=gvf_columns)
#parse EFF column
eff_info = df['INFO'].str.findall('\((.*?)\)') #series: extract everything between parentheses as elements of a list
eff_info = eff_info.apply(pd.Series)[0] #take first element of list
eff_info = eff_info.str.split(pat='|').apply(pd.Series) #split at pipe, form dataframe
#hgvs names
hgvs = eff_info[3].str.rsplit(pat='c.').apply(pd.Series)
hgvs_protein = hgvs[0].str[:-1]
hgvs_protein.replace(r'^\s+$', np.nan, regex=True)
hgvs_nucleotide = 'c.' + hgvs[1]
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'Name=' + hgvs_protein + ';'
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'nt_name=' + hgvs_nucleotide + ';'
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'nt_name=' + hgvs_nucleotide + ';'
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'gene=' + eff_info[5] + ';' #gene names
new_df['#attributes'] = new_df['#attributes'].astype(str) + 'mutation_type=' + eff_info[1] + ';' #mutation type
#columns copied straight from Zohaib's file
for column in ['REF','ALT']:
key = column.lower()
if key=='ref':
key = 'Reference_seq'
elif key=='alt':
key = 'Variant_seq'
new_df['#attributes'] = new_df['#attributes'].astype(str) + key + '=' + df[column].astype(str) + ';'
#add ao, dp, ro
info = df['INFO'].str.split(pat=';').apply(pd.Series) #split at ;, form dataframe
new_df['#attributes'] = new_df['#attributes'] + info[5].str.lower() + ';' #ao
new_df['#attributes'] = new_df['#attributes'] + info[7].str.lower() + ';' #dp
new_df['#attributes'] = new_df['#attributes'] + info[28].str.lower() + ';' #ro
#add strain name
new_df['#attributes'] = new_df['#attributes'] + 'viral_lineage=' + strain + ';'
#add WHO strain name
alt_strain_names = {'B.1.1.7': 'Alpha', 'B.1.351': 'Beta', 'P.1': 'Gamma', 'B.1.617.2': 'Delta', 'B.1.427': 'Epsilon', 'B.1.429': 'Epsilon', 'P.2': 'Zeta', 'B.1.525': 'Eta', 'P.3': 'Theta', 'B.1.526': 'Iota', 'B.1.617.1': 'Kappa'}
new_df['#attributes'] = new_df['#attributes'] + 'who_label=' + alt_strain_names.get(strain) + ';'
#add VOC/VOI designation
if strain in {'Alpha', 'Beta', 'Gamma', 'Delta'}:
new_df['#attributes'] = new_df['#attributes'] + 'status=VOC;'
else:
new_df['#attributes'] = new_df['#attributes'] + 'status=VOI;'
#remove starting NaN; leave trailing ';'
new_df['#attributes'] = new_df['#attributes'].str[3:]
#fill in other GVF columns
new_df['#seqid'] = df['#CHROM']
new_df['#source'] = '.'
new_df['#type'] = info[40].str.split(pat='=').apply(pd.Series)[1]
new_df['#start'] = df['POS']
new_df['#end'] = (df['POS'].astype(int) + df['ALT'].str.len() - 1).astype(str) #this needs fixing
new_df['#score'] = '.'
new_df['#strand'] = '+'
new_df['#phase'] = '.'
new_df = new_df[gvf_columns] #only keep the columns needed for a gvf file
return new_df
#takes 3 arguments: an output file of vcftogvf.py, Anoosha's annotation file from Pokay, and the clade defining mutations tsv.
def add_functions(gvf, annotation_file, clade_file, strain):
#load files into Pandas dataframes
df = pd.read_csv(annotation_file, sep='\t', header=0) #load functional annotations spreadsheet
clades = pd.read_csv(clade_file, sep='\t', header=0, usecols=['strain', 'mutation']) #load clade-defining mutations file
clades = clades.loc[clades.strain == strain] #only look at the relevant part of that file
attributes = gvf["#attributes"].str.split(pat=';').apply(pd.Series)
hgvs_protein = attributes[0].str.split(pat='=').apply(pd.Series)[1]
hgvs_nucleotide = attributes[1].str.split(pat='=').apply(pd.Series)[1]
gvf["mutation"] = hgvs_protein.str[2:] #drop the prefix
#merge annotated vcf and functional annotation files by 'mutation' column in the gvf
for column in df.columns:
df[column] = df[column].str.lstrip()
merged_df = pd.merge(df, gvf, on=['mutation'], how='right') #add functional annotations
merged_df = pd.merge(clades, merged_df, on=['mutation'], how='right') #add clade-defining mutations
#collect all mutation groups (including reference mutation) in a column, sorted alphabetically
#this is more roundabout than it needs to be; streamline with grouby() later
merged_df["mutation_group"] = merged_df["comb_mutation"].astype(str) + ", '" + merged_df["mutation"].astype(str) + "'"
mutation_groups = merged_df["mutation_group"].str.split(pat=',').apply(pd.Series)
mutation_groups = mutation_groups.apply(lambda s:s.str.replace("'", ""))
mutation_groups = mutation_groups.apply(lambda s:s.str.replace(" ", ""))
mutation_groups = mutation_groups.transpose()
sorted_df = mutation_groups
for column in mutation_groups.columns:
sorted_df[column] = mutation_groups.sort_values(by=column, ignore_index=True)[column]
sorted_df = sorted_df.transpose()
#since they're sorted, put everything back into a single cell, don't care about dropna
df3 = sorted_df.apply(lambda x :','.join(x.astype(str)),axis=1)
unique_groups = df3.drop_duplicates()
unique_groups_multicol = sorted_df.drop_duplicates()
merged_df["mutation_group_labeller"] = df3 #for sanity checking
#make a unique id for mutation groups that have all members represented in the vcf
#for groups with missing members, delete those functional annotations
merged_df["id"] = 'NaN'
id_num = 0
for row in range(unique_groups.shape[0]):
group_mutation_set = set(unique_groups_multicol.iloc[row])
group_mutation_set = {x for x in group_mutation_set if (x==x and x!='nan')} #remove nan and 'nan' from set
gvf_all_mutations = set(gvf['mutation'].unique())
indices = merged_df[merged_df.mutation_group_labeller == unique_groups.iloc[row]].index.tolist()
if group_mutation_set.issubset(gvf_all_mutations): #if all mutations in the group are in the vcf file, include those rows and give them an id
merged_df.loc[merged_df.mutation_group_labeller == unique_groups.iloc[row], "id"] = "ID_" + str(id_num)
id_num += 1
else:
merged_df = merged_df.drop(indices) #if not, drop group rows, leaving the remaining indices unchanged
#change semicolons in function descriptions to colons
merged_df['function_description'] = merged_df['function_description'].str.replace(';',':')
#add key-value pairs to attributes column
for column in ['function_category', 'source', 'citation', 'comb_mutation', 'function_description']:
key = column.lower()
merged_df[column] = merged_df[column].fillna('') #replace NaNs with empty string
if column in ['function_category', 'citation', 'function_description']:
merged_df["#attributes"] = merged_df["#attributes"].astype(str) + key + '=' + '"' + merged_df[column].astype(str) + '"' + ';'
else:
merged_df["#attributes"] = merged_df["#attributes"].astype(str) + key + '=' + merged_df[column].astype(str) + ';'
#change clade-defining attribute to True/False depending on content of 'strain' column
merged_df.loc[merged_df.strain == strain, "#attributes"] = merged_df.loc[merged_df.strain == strain, "#attributes"].astype(str) + "clade_defining=True;"
merged_df.loc[merged_df.strain != strain, "#attributes"] = merged_df.loc[merged_df.strain != strain, "#attributes"].astype(str) + "clade_defining=False;"
#add ID to attributes
merged_df["#attributes"] = 'ID=' + merged_df['id'].astype(str) + ';' + merged_df["#attributes"].astype(str)
if args.names:
#get list of names in tsv but not in functional annotations, and vice versa, saved as a .tsv
tsv_names = gvf["mutation"].unique()
pokay_names = df["mutation"].unique()
print(str(np.setdiff1d(tsv_names, pokay_names).shape[0]) + "/" + str(tsv_names.shape[0]) + " mutation names were not found in pokay")
in_pokay_only = pd.DataFrame({'in_pokay_only':np.setdiff1d(pokay_names, tsv_names)})
in_tsv_only = pd.DataFrame({'in_tsv_only':np.setdiff1d(tsv_names, pokay_names)})
leftover_names = in_tsv_only
leftover_names["strain"] = strain
clade_names = clades["mutation"].unique()
leftover_clade_names = pd.DataFrame({'unmatched_clade_names':np.setdiff1d(clade_names, tsv_names)})
leftover_clade_names["strain"] = strain
return merged_df[gvf_columns], leftover_names, gvf["mutation"].tolist(), leftover_clade_names
else:
return merged_df[gvf_columns]
if __name__ == '__main__':
args = parse_args()
annotation_file = args.pokay
clade_file = args.clades
outdir = args.outdir
if not os.path.exists(outdir):
os.makedirs(outdir)
#make empty list in which to store mutation names from all strains in the folder together
all_strains_mutations = []
leftover_df = pd.DataFrame() #empty dataframe to hold unmatched names
unmatched_clade_names = pd.DataFrame() #empty dataframe to hold unmatched clade-defining mutation names
pragmas = | pd.DataFrame([['##gff-version 3'], ['##gvf-version 1.10'], ['##species NCBI_Taxonomy_URI=http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=2697049']]) | pandas.DataFrame |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.DataFrame(pd.read_csv(path))
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var.head(2))
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var.head(2))
# code ends here
# --------------
# code starts here
bank.drop(['Loan_ID'],inplace=True,axis=1)
banks = | pd.DataFrame(bank) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 16:38:26 2019
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_FFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_FFR.pkl')
# remove DC offset
df_FFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_FFR_detrend_data = pd.DataFrame(signal.detrend(df_FFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_FFR_label_temp = pd.DataFrame(df_FFR.iloc[i, 1024:1031].values.reshape(1,7))
df_FFR_detrend = df_FFR_detrend.append(pd.concat([df_FFR_detrend_data, df_FFR_label_temp], axis=1, ignore_index=True))
# set the title of columns
df_FFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_detrend = df_FFR_detrend.reset_index(drop=True)
df_FFR = df_FFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_FFR
df_FFR_avg = pd.DataFrame()
df_FFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_FFR_avg_t = pd.DataFrame(df_FFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# implement the window function
df_FFR_avg_t_win = pd.DataFrame((df_FFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_FFR_label = pd.DataFrame(df_FFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_FFR_avg = df_FFR_avg.append(pd.concat([df_FFR_avg_t, df_FFR_label], axis=1, ignore_index=True))
df_FFR_avg_win = df_FFR_avg.append(pd.concat([df_FFR_avg_t_win, df_FFR_label], axis=1, ignore_index=True))
# set the title of columns
df_FFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_avg = df_FFR_avg.sort_values(by=["Condition", "Subject"])
df_FFR_avg = df_FFR_avg.reset_index(drop=True)
df_FFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_avg_win = df_FFR_avg_win.sort_values(by=["Condition", "Subject"])
df_FFR_avg_win = df_FFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_FFR_avg_sorted = df_FFR_avg.sort_values(by=["Sound Level", "Vowel", "Subject", "Condition"])
df_FFR_avg_sorted = df_FFR_avg_sorted.reset_index(drop=True)
df_FFR_avg_win_sorted = df_FFR_avg_win.sort_values(by=["Sound Level", "Vowel", "Subject", "Condition"])
df_FFR_avg_win_sorted = df_FFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_FFR_avg_85 = pd.DataFrame(df_FFR_avg_sorted.iloc[528:, :])
df_FFR_avg_85 = df_FFR_avg_85.reset_index(drop=True)
df_FFR_avg_win_85 = pd.DataFrame(df_FFR_avg_win_sorted.iloc[528:, :])
df_FFR_avg_win_85 = df_FFR_avg_win_85.reset_index(drop=True)
# average subjects, conditions
df_FFR_avg_85_aenu = pd.DataFrame()
df_FFR_avg_win_85_aenu = pd.DataFrame()
for i in range(4):
# combine next two rows later
df_FFR_avg_t = pd.DataFrame(df_FFR_avg_85.iloc[44*i: 44*i+44, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_FFR_avg_label = pd.DataFrame(df_FFR_avg_85.iloc[44*i, 1024:1031].values.reshape(1,7))
temp = | pd.concat([df_FFR_avg_t, df_FFR_avg_label], axis=1, ignore_index=True) | pandas.concat |
"""Unit tests for the :mod:`pudl.helpers` module."""
import pandas as pd
from pandas.testing import assert_frame_equal
import pudl
def test_convert_to_date():
"""Test automated cleanup of EIA date columns."""
data = [
(2019, 3, 14),
("2019", "03", "14"),
]
in_df = pd.DataFrame.from_records(
data, columns=["report_year", "report_month", "report_day"]
)
expected_df = pd.DataFrame({
"report_date": pd.to_datetime([
"2019-03-14",
"2019-03-14",
]),
})
out_df = pudl.helpers.convert_to_date(in_df)
assert_frame_equal(out_df, expected_df)
def test_fix_eia_na():
"""Test cleanup of bad EIA spreadsheet NA values."""
in_df = pd.DataFrame({
"vals": [
"",
" ",
"\t",
".",
".0", # Should only replace naked decimals
"..", # Only single naked decimals?
" ", # 2 spaces -- we only replace single whitespace chars?
"\t\t", # 2 tabs -- we only replace single whitespace chars?
]
})
expected_df = pd.DataFrame({
"vals": [
pd.NA,
pd.NA,
pd.NA,
pd.NA,
".0",
"..",
" ",
"\t\t",
]
})
out_df = pudl.helpers.fix_eia_na(in_df)
assert_frame_equal(out_df, expected_df)
def test_fix_leading_zero_gen_ids():
"""Test removal of leading zeroes from EIA generator IDs."""
in_df = pd.DataFrame({
"generator_id": [
"0001", # Leading zeroes, all numeric string.
"26", # An appropriate numeric string w/o leading zeroes.
100, # Integer, should get stringified.
100.0, # What happens if it's a float?
"01-A", # Leading zeroes, alphanumeric. Should not change.
"HRSG-01", # Alphanumeric, should be no change.
]
})
expected_df = pd.DataFrame({
"generator_id": [
"1",
"26",
"100",
"100.0",
"01-A",
"HRSG-01",
]
})
out_df = pudl.helpers.fix_leading_zero_gen_ids(in_df)
assert_frame_equal(out_df, expected_df)
def test_convert_df_to_excel_file():
"""Test converting a dataframe into a pandas ExcelFile."""
in_df = pd.DataFrame([[1, 2], [1, 2]])
expected_df = pd.DataFrame([[1, 2], [1, 2]])
out_excel_file = pudl.helpers.convert_df_to_excel_file(in_df, index=False)
out_df = pd.read_excel(out_excel_file)
| assert_frame_equal(out_df, expected_df) | pandas.testing.assert_frame_equal |
import pandas as pd
def generate_demand_csv(input_fn: str, user_data_dir: str):
# Demand
demand = pd.read_excel(input_fn, sheet_name='2.3 EUD', index_col=0, header=1, usecols=range(5))
demand.columns = [x.strip() for x in demand.columns]
demand.index = [x.strip() for x in demand.index]
# Add additional information
demand_aux = pd.read_csv(f"{user_data_dir}/aux_demand.csv", index_col=0)
demand = pd.merge(demand, demand_aux, left_index=True, right_index=True)
# Rename and reorder columns
demand.index.name = 'parameter name'
demand = demand.reset_index()
demand = demand[['Category', 'Subcategory', 'parameter name', 'HOUSEHOLDS',
'SERVICES', 'INDUSTRY', 'TRANSPORTATION', 'Units']]
demand.to_csv(f"{user_data_dir}/Demand.csv", sep=',', index=False)
def generate_resources_csv(input_fn: str, user_data_dir: str):
# Resources
resources = pd.read_excel(input_fn, sheet_name='2.1 RESOURCES', index_col=0, header=1,
usecols=range(5))
resources.index = [x.strip() for x in resources.index]
resources.columns = [x.split(" ")[0] for x in resources.columns]
# Add additional information
resources_aux = pd.read_csv(f"{user_data_dir}/aux_resources.csv", index_col=0)
resources = pd.merge(resources, resources_aux, left_index=True, right_index=True)
# Rename and reorder columns
resources.index.name = 'parameter name'
resources = resources.reset_index()
resources = resources[['Category', 'Subcategory', 'parameter name', 'avail', 'gwp_op', 'c_op', 'einv_op']]
# resources.columns = ['Category', 'Subcategory', 'parameter name', 'Availability', 'Direct and indirect emissions',
# 'Price', 'Direct emissions']
# Add a line with units
units = pd.Series(['', '', 'units', '[GWh/y]', '[ktCO2-eq./GWh]', '[Meuro/GWh]', '[GWh/y]'],
index=resources.columns)
resources = pd.concat((units.to_frame().T, resources), axis=0)
resources.to_csv(f"{user_data_dir}/Resources.csv", sep=',', index=False)
def generate_technologies_csv(input_fn: str, user_data_dir: str):
# Technologies
technologies = pd.read_excel(input_fn, sheet_name='3.2 TECH', index_col=1)
technologies = technologies.drop(technologies.columns[[0]], axis=1)
technologies.index = [x.strip() for x in technologies.index]
# Add additional information
technologies_aux = pd.read_csv(f"{user_data_dir}/aux_technologies.csv", index_col=0)
technologies = pd.merge(technologies, technologies_aux, left_index=True, right_index=True)
# Rename and reorder columns
technologies.index.name = 'parameter name'
technologies = technologies.reset_index()
technologies = technologies[['Category', 'Subcategory', 'Technologies name', 'parameter name', 'c_inv', 'c_maint',
'gwp_constr', 'einv_constr', 'lifetime', 'c_p', 'fmin_perc', 'fmax_perc',
'f_min', 'f_max']]
# Add a line with units
units = pd.Series(['', '', 'Name (simplified)', 'Name (in model and documents)',
'[Meuro/GW],[Meuro/GWh],[Meuro/(Mkmpass/h)],[Meuro/(Mtonkm/h)]',
'[Meuro/GW],[Meuro/GWh],[Meuro/(Mkmpass/h)],[Meuro/(Mtonkm/h)]',
'[ktonCO2_eq/GW],[ktonCO2_eq/GWh],[ktonCO2_eq/(Mkmpass/h)],[ktonCO2_eq/(Mtonkm/h)]',
'[GWh/y]', '[years]', '[]', '[]', '[]', '[GW]', '[GW]'],
index=technologies.columns)
technologies = pd.concat((units.to_frame().T, technologies), axis=0)
technologies.to_csv(f"{user_data_dir}/Technologies.csv", sep=',', index=False)
def generate_layers_csv(input_fn: str, dev_data_dir: str):
# Layers in-out
layers = pd.read_excel(input_fn, sheet_name='3.1 layers_in_out', index_col=1)
layers = layers.drop(layers.columns[0], axis=1)
layers.columns = [x.strip() for x in layers.columns]
layers.to_csv(f"{dev_data_dir}/Layers_in_out.csv", sep=',')
def generate_storage_csv(input_fn: str, dev_data_dir: str):
# Storage eff in
storage_eff_in = pd.read_excel(input_fn, sheet_name='3.3 STO', header=2, nrows=25, index_col=0)
storage_eff_in.index = [x.strip() for x in storage_eff_in.index]
storage_eff_in.to_csv(f"{dev_data_dir}/Storage_eff_in.csv", sep=',')
# Storage eff out
storage_eff_out = pd.read_excel(input_fn, sheet_name='3.3 STO', header=30, nrows=25, index_col=0)
storage_eff_out.index = [x.strip() for x in storage_eff_out.index]
storage_eff_out.to_csv(f"{dev_data_dir}/Storage_eff_out.csv", sep=',')
# Storage characteristics
storage_c = | pd.read_excel(input_fn, sheet_name='3.3 STO', header=58, nrows=25, index_col=0) | pandas.read_excel |
""" Libraries """
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import sqlite3
sns.set()
""" Read in the Northwoods League Data from the sqlite3 Database """
northwoods_db = sqlite3.connect("Northwoods.db")
cur = northwoods_db.cursor()
""" import data without outliers """
bat = pd.read_sql_query("SELECT * FROM ALL_NORTHWOODS_DATA WHERE PA_X >= 50 AND PA_Y >= 50 AND OPS_y >= .4", northwoods_db)
# Clean it up
bat = bat.apply(pd.to_numeric, errors='coerce').combine_first(bat)
bat = bat.dropna(how = "all", axis = "columns")
bat = bat.fillna(0)
bat = bat.drop_duplicates(subset = ["AgeDif_x", "Age_x", "TB_y", "SLG_y"], keep = "first")
# Exploratory Data Analysis
def scatter(x, y, xlabel, ylabel):
plt.scatter(x, y, marker= "o", alpha = .5)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
def normality(x):
plt.hist(x, bins = 50)
plt.show()
normality(bat["TB_y"])
scatter(bat["TB_x"], bat["TB_y"], "Spring TB", "Summer TB")
# Bootstrapping data & creating dummy variables
def btstrap(df):
np.random.seed(3)
btstr_data = pd.DataFrame(columns=df.columns)
for data in range(df.shape[0]):
selected_num = np.random.choice(range(df.shape[0]))
btstr_data = btstr_data.append(df[selected_num : selected_num + 1])
return btstr_data
def draw_bs_data(dataframe, num_times_strapped):
data = [btstrap(dataframe) for i in range(num_times_strapped)]
return pd.concat(data)
bs_data = draw_bs_data(bat, 2)
dummies = | pd.get_dummies(bs_data) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import datetime
from dateutil import parser
import os
import csv
import matplotlib.pyplot as plt
import pandas as pd
def data_preprocess(dir_path):
dir_list = os.listdir(dir_path)
total_data = []
for dir_csv in dir_list:
total_path = dir_path+'/'+dir_csv+'/prices.csv'
# print(total_path)
file = open(total_path,'r')
rdr = csv.reader(file)
# for d in rdr:
# if 'FAX' in d[0]:
# total_data.append(d)
# break
[total_data.append(d) for d in rdr if 'FAX' in d[0]]
# total_data = list(set(total_data))
# print(total_data)
return total_data
def data_pre_pro_walk(dir_path, key):
total_data = []
for (paths, dirs, files) in os.walk(dir_path):
for fs in files:
if fs == 'prices.csv':
# print(paths,fs)
with open(paths+'/'+fs,'r') as file:
rdr = csv.reader(file)
# [total_data.append(d) for d in rdr if key in d[0]]
for da in [d for d in rdr if key in d[0]]:
da.extend([parser.parse(da[1]).weekday()])
total_data.append(da)
# print(da)
np_sdata = np.array(total_data)
#np_sdata[:,1] is means the date
# following command applies unique to the date!
# unique is always sorted
uni_np, indic = np.unique(np_sdata[:,1],return_index=True)
# print(np_sdata[indic])
# print(uni_np)
#sdata_sorted = sorted(sdata,key=lambda x: time.mktime(time.strptime(x[1],"%Y-%m-%d")))
return np_sdata[indic]
#data = data_preprocess('2017data')
#sdata = sorted(data, key=lambda x: time.mktime(time.strptime(x[1],"%Y-%m-%d")))
def data_pre_pro_walk_pandas(dir_path, key):
total_data = []
for (paths, dirs, files) in os.walk(dir_path):
for fs in files:
if fs == 'prices.csv':
# print(paths,fs)
with open(paths+'/'+fs,'r') as file:
rdr = csv.reader(file)
# [total_data.append(d) for d in rdr if key in d[0]]
for da in [d for d in rdr if key in d[0]]:
da.extend([parser.parse(da[1]).weekday()])
total_data.append(da)
# print(da)
np_sdata = np.array(total_data)
#np_sdata[:,1] is means the date
# following command applies unique to the date!
# unique is always sorted
uni_np, indic = np.unique(np_sdata[:,1],return_index=True)
udata = np_sdata[indic]
dates = | pd.DatetimeIndex(udata[:,1]) | pandas.DatetimeIndex |
import pandas as pd
import os
import matplotlib.pyplot as plt
import sys
#This script will process two input sequencing files for mRNA
#and DNA into a data set for all genes
# The four inline arguments passed to this script are
# 1: file name of mRNA sequencing .fastq file.
# 2: file name of DNA sequencing .fastq file.
# 3: output name prefix. The output name for each gene file will be given
# as gene_name + this_input + 'dataset'
# 4 group number. Genes are separated into 18 groups, labeled 101 to 118.
# Only those genes in the given group number will have their datasets generated
# by this script. The associated group number to gene association is given
# by the genetogroupnum file.
name = sys.argv[1]
nameplas = sys.argv[2]
barcode_length = 20
trailing_sequence_length = 21
#define needed functions
def comb_tag(s):
'''function to combine mutated sequence with barcode'''
return s['seq'] + s['tag']
#set no maximum length on output column size.
pd.set_option('max_colwidth',int(1e8))
#load in dataframe version of mRNA sequences.
df = pd.io.parsers.read_csv(name,header=None)
#extract the sequences from the fastq format.
df = df.loc[1::4]
#we will select out the barcodes from each sequence. They will be located
#from -41 to -21 bases from the end of the sequence.
tags = df[0].str.slice(-trailing_sequence_length - barcode_length,-trailing_sequence_length)
#we will get the numbers of each barcode.
tagcounts = tags.value_counts()
#We will now preform an identical procedure for the DNA sequences.
dfplas = pd.io.parsers.read_csv(nameplas,header=None)
dfplas = dfplas.loc[1::4]
tagsplas = dfplas[0].str.slice(-trailing_sequence_length - barcode_length,-trailing_sequence_length)
tagcountsplas = tagsplas.value_counts()
#we will get the genes for the associated group number. This is generally 6
#genes.
#load in key for group number for each gene
genecodes = pd.io.parsers.read_csv('../data/test_data/genetogroupnum')
#use group code to find the genes we need to make datasets for.
gene = list(sys.argv[5])
#load in the file that relates barcode to mutated sequence.
tagkeyname = sys.argv[3]
tagkey = pd.io.parsers.read_csv(tagkeyname,delim_whitespace=True)
#reset the barcode to be the pandas index.
tagkey = tagkey.set_index('tag')
#make a dataframe that has the number of counts for the assocated barcode
#for the mRNA sequencing. After this we will do the same for the DNA plasmid
#sequencing then combine them.
#make dataframe with mutated sequence and barcode
tempdf = tagkey.reindex(tagcounts.copy().index)
#assign sequencing counts based on mRNA sequencing file.
tempdf['ct_1'] = tagcounts.copy()
tempdf = tempdf.dropna()
#we now do the same thing for the DNA plasmid sequencing.
c = tagkey.reindex(tagcountsplas.copy().index)
c['ct_0'] = tagcountsplas.copy()
c = c.dropna()
#combine the dataframes to get sequencing counts for mRNA and DNA
outdf = | pd.concat([tempdf,c],axis=0,sort=True) | pandas.concat |
import kfp.dsl as dsl
import kfp.components as comp
from collections import OrderedDict
from kubernetes import client as k8s_client
def loaddata(vol_shared_volume_kale_volumes: str):
import os
import shutil
from kale.utils import pod_utils
from kale.marshal import resource_save as _kale_resource_save
from kale.marshal import resource_load as _kale_resource_load
_kale_data_directory = "/shared_volume/datasets_for_notebooks/titanic/"
if not os.path.isdir(_kale_data_directory):
os.makedirs(_kale_data_directory, exist_ok=True)
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import style
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
path = "/shared_volume/datasets_for_notebooks/titanic/"
PREDICTION_LABEL = 'Survived'
test_df = pd.read_csv(path + "test.csv")
train_df = | pd.read_csv(path + "train.csv") | pandas.read_csv |
# This program loads the HILT data and parses it into a nice format
import argparse
import pathlib
import zipfile
import re
from datetime import datetime, date
import pandas as pd
import numpy as np
from sampex_microburst_widths import config
class Load_SAMPEX_HILT:
def __init__(self, load_date, extract=False,
time_index=True, verbose=False):
"""
Load the HILT data given a date. If this class will look for
a file with the "hhrrYYYYDOY*" filename pattern and open the
found csv file. If the file is zipped, it will first be unzipped.
If you want to extract the file as well, set extract=True.
time_index=True sets the time index of self.hilt to datetime objects
otherwise the index is just an enumerated list.
"""
self.load_date = load_date
self.verbose = verbose
# If date is in string format, convert to a pd.Timestamp object
if isinstance(self.load_date, str):
self.load_date = | pd.to_datetime(self.load_date) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
from logging import getLogger, basicConfig
import pandas as pd
import streamlit as st
from wordleaisql import __version__ as wordleaisql_version
from wordleaisql.approx import WordleAIApprox
from wordleaisql.utils import default_wordle_vocab, wordle_judge, decode_judgement, read_vocabfile
# Log message will be printed on the console
logger = getLogger(__file__)
# constants
APP_VERSION = "0.0.9"
WORD_PAIR_LIMIT = 500000
CANDIDATE_SAMPLE_SIZE = 500
CSS = """
td.letter {
width: 60px;
height: 30px;
text-align: center;
border: 5px white solid;
border-bottom: 8px white solid;
border-top: 8px white solid;
font-weight: 700;
font-size: 30px;
color: #eaeaea;
}
td.exact {
background-color: green;
}
td.partial {
background-color: orange;
}
td.nomatch {
background-color: #666666;
}
"""
def wordle_judge_html(judges: list):
rows = []
_match_to_class = {"2": "exact", "1": "partial", "0": "nomatch"}
for word, judge in judges:
judge = str(judge).zfill(len(word))
cells = ["""<td class="letter {}">{}</td>""".format(_match_to_class[match], " " if letter==" " else letter) for letter, match in zip(word, judge)]
rows.append("<tr>{}</tr>".format(" ".join(cells)))
html = "<table>{}</table>".format(" ".join(rows))
#print(html)
return html
def _thousand_sep(x: int, sep: str=",")-> str:
return "{:,}".format(x).replace(",", sep)
def _init_state_if_not_exist(key: str, value):
if key not in st.session_state:
st.session_state[key] = value
# Want to cache this somehow, but gets error due to 'sqlite3.Connection object is unhashable'
# both with st.cache and st.experimental_singleton
# Since making an AI object is trivial for typical vocabs with 10k words,
# I let the AI is generated again at every rerun.
# @st.cache(allow_output_mutation=True) # <-- this works but shows 'Running make_ai(...)' for a sec
def make_ai(words: list or dict, word_pair_limit: int=500000, candidate_samplesize: int=500, strength: float=6):
logger.info("Generating AI")
ai = WordleAIApprox(vocabname="wordle", words=words, inmemory=True, strength=strength,
word_pair_limit=word_pair_limit, candidate_samplesize=candidate_samplesize)
return ai
def wordle_vocabfile(level: int):
return os.path.join(os.path.dirname(__file__), "wordle-level{}.txt".format(level))
def main():
st.set_page_config(
page_title="Wordle AI SQL"
)
st.markdown("""<style> {} </style>""".format(CSS), unsafe_allow_html=True)
with st.sidebar:
select_mode = st.selectbox("", ("Solver", "Challenge"), index=0)
#select_word_pair_limit = st.selectbox("Word pair limit", (50000, 100000, 500000, 1000000), index=2)
#select_candidate_sample = st.selectbox("Candidate sample size", (250, 500, 1000, 2000), index=1)
if select_mode == "Challenge":
ai_strength = st.selectbox("AI level", tuple(range(11)), index=6)
visible = st.checkbox("Opponent words are visible", value=False)
alternate = st.checkbox("Choose a word in turns", value=False)
same_answer = st.checkbox("Same answer word", value=False)
ai_first = st.checkbox("AI plays first", value=True)
answer_difficulty = st.selectbox(
"Answer word difficulty", (1, 2, 3, 4, 5), index=1,
format_func=lambda a: "1 (basic)" if a==1 else "5 (unlimited)" if a==5 else str(a),
help=("Change the possible answer set from 1 (basic) to 5 (unlimited). "
"Adjust this option to reduce the chance that you do not know the answer word. "
"This does not change the words that you can input."))
st.markdown("App ver {appver} / [wordleaisql ver {libver}](https://github.com/kota7/wordleai-sql)".format(libver=wordleaisql_version, appver=APP_VERSION))
if select_mode == "Solver":
ai = make_ai(default_wordle_vocab(), word_pair_limit=WORD_PAIR_LIMIT, candidate_samplesize=CANDIDATE_SAMPLE_SIZE)
words_set = set(ai.words)
for w in words_set:
wordlen = len(w)
break
_init_state_if_not_exist("solverHistory", [])
def _solver_history():
return st.session_state["solverHistory"]
def _show_info(column=None):
(st if column is None else column).markdown(wordle_judge_html(_solver_history()), unsafe_allow_html=True)
st.markdown("""
<font size="+6"><b>Wordle Solver</b></font> <i>with SQL backend</i>
""", unsafe_allow_html=True)
word_sample = []
for i, w in enumerate(words_set):
word_sample.append(w)
if i >= 6:
break
word_sample = ", ".join(word_sample)
if len(words_set) > len(word_sample):
word_sample += ", ..."
st.write("%s words: [ %s ]" % (_thousand_sep(len(words_set)), word_sample))
_show_info()
if len(_solver_history()) > 0:
cols = st.columns(5) # make larger column to limit the space between buttons
if cols[0].button("Clear info"):
_solver_history().clear()
st.experimental_rerun()
if cols[1].button("Delete one line"):
_solver_history().pop()
st.experimental_rerun()
cols = st.columns(3)
input_word_solver = cols[0].text_input("Word", max_chars=wordlen, placeholder="weary")
input_judge = cols[1].text_input("Judge", max_chars=wordlen, placeholder="02110",
help=("Express the judge on the word by a sequence of {0,1,2}, where "
"'2' is the match with correct place, "
"'1' is the math with incorrect place, "
"and '0' is no match."))
# workaround to locate the ENTER button to the bottom
for _ in range(3):
cols[2].write(" ")
enter_button = cols[2].button("Enter")
if enter_button:
def _validate_input():
if input_word_solver == "":
return False
if not input_word_solver in words_set:
st.info("'%s' is not in the vocab" % input_word_solver)
return False
if not all(l in "012" for l in input_judge):
st.error("Judge must be a sequence of {0,1,2}, but '%s'" % input_judge)
return False
if len(input_judge) != len(input_word_solver):
st.error("Judge must have the same length as the word, but '%s'" % input_judge)
return False
return True
if _validate_input():
_solver_history().append((input_word_solver, input_judge.zfill(wordlen)))
st.experimental_rerun()
eval_button = st.button("Ask AI")
def _eval():
ai.clear_info()
for w, r in _solver_history():
ai.update(w, r)
# report remaining candidates
candidates = ai.candidates
n_candidates = len(candidates)
if n_candidates == 0:
st.write("No answer word consistent with this information")
return
if n_candidates == 1:
st.markdown("'**{}**' should be the answer!".format(candidates[0]))
return
candidate_sample = ", ".join(candidates[:6])
if n_candidates > 6:
candidate_sample += ", ..."
st.write("%s answer candidates remaining: [ %s ]" % (_thousand_sep(n_candidates), candidate_sample))
with st.spinner("AI is thinking..."):
res = ai.evaluate(top_k=15)
if len(res) > 0:
df = | pd.DataFrame.from_records(res, columns=res[0]._fields) | pandas.DataFrame.from_records |
from requests import Session
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '\
'AppleWebKit/537.36 (KHTML, like Gecko) '\
'Chrome/75.0.3770.80 Safari/537.36'}
def search_symbol(symbol):
"""
Search for symbol's link in Simply Wall Street
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
url = f'https://api.simplywall.st/api/search/{symbol}'
# Request and transform response in json
screener = s.get(url)
json = screener.json()
if len(json) != 0:
# Stock URL
stock_url = json[0]['url']
else:
stock_url = 'not found'
return stock_url
def extract_all_urls(stocks_path='../docs/my_stocks.feather'):
"""
Create file with urls to call api
"""
# Read csv with stocks
my_stocks_df = pd.read_feather(stocks_path)
# Create List with stocks
my_stocks_list = list(my_stocks_df['symbol'].unique())
# Find all urls and store in a dataframe
results = []
for stock in my_stocks_list:
print(stock)
url = search_symbol(stock)
results.append([stock, url])
# Convert into a dataframe
results_df = pd.DataFrame(results, columns=['symbol', 'url'])
# Export to csv
results_df.to_csv('../docs/simplywallurls.csv', index=0)
return results_df
def symbol_data(stock_url):
"""
Extract data from Simply Wall Steet
"""
# Create Session
s = Session()
# Add headers
s.headers.update(HEADERS)
# JSON Key Field
metrics_url = f'https://api.simplywall.st/api/company{stock_url}?include=info%2Cscore%2Cscore.snowflake%2Canalysis.extended.raw_data%2Canalysis.extended.raw_data.insider_transactions&version=2.0'
# Request and transform response in json
screener = s.get(metrics_url)
# check status
if screener.status_code == 200:
json = screener.json()
else:
json = 'not found'
return json
def extract_values(json_response, symbol):
"""
Extract important values from json_response for each symbol
"""
# Define important fields
fields_dictionary = {'total_assets': 'total_assets',
'total_current_assets': 'total_ca',
'cash_st_investments': 'cash_st_invest',
'total_receivables': 'total_receiv',
'inventory': 'inventory',
'net_property_plant_equip': 'nppe',
'short_term_debt': 'current_port_capital_leases',
'total_current_liabilities': 'total_cl',
'long_term_debt': 'lt_debt',
'total_liabilities': 'total_liabilities',
'total_equity': 'total_equity',
'accounts_payable': 'ap',
'total_revenue_ttm': 'total_rev',
'ebt_ttm':'ebt',
'ebitda_ttm': 'ebitda',
'ebit_ttm': 'ebit',
'pre_tax_income': 'earning_co',
'gross_profit_ttm': 'gross_profit',
'net_income_ttm': 'ni',
'g_a_expense_ttm': 'g_a_expense',
'income_tax_ttm': 'income_tax',
'interest_exp_ttm': 'interest_exp',
'basic_eps_ttm': 'basic_eps',
'net_oper_cf_ttm': 'cash_oper',
'net_investing_cf_ttm': 'cash_f_investing',
'net_financing_cf_ttm': 'cash_f_financing',
'levered_fcf_ttm': 'levered_fcf',
'capex_ttm': 'capex',
'beta_5yr': 'beta_5yr'}
# Check response code
if json_response != 'not found':
# Get to fields that really matter
assets = json_response['data']['analysis']['data']['extended']['data']['raw_data']['data']['past']
# check if there's data
if len(assets) > 0:
# Extract Available dates
dates = assets.keys()
# Create empty list to store results
results = []
# Create first row with headers
headers = []
headers.append('date')
headers.append('symbol')
[headers.append(row) for row in list(fields_dictionary.keys())]
results.append(headers)
# For each date in dates
for date in dates:
# Create Temporary list to append results for each date
temp_results = []
temp_results.append(date)
temp_results.append(symbol)
# See available keys - not all fields are available all the time
available_keys = assets[date].keys()
# For field in list of fields to pull
for field in fields_dictionary.values():
# if field is available
if field in available_keys:
# create value and append that
value = assets[date][field]['value']
temp_results.append(value)
# if field doesn't exist then append NaN
else:
temp_results.append(np.nan)
# Append to results
results.append(temp_results)
return results
else:
return 'not found'
def extract_fundamentals(update_urls=False, urls_path='../docs/simplywallurls.csv'):
"""
Function to extract all fundamentals for all stocks
"""
# Check if we need to update list of urls
if update_urls == False:
# Read csv with stocks
urls_df = pd.read_csv(urls_path, header=0)
else:
urls_df = extract_all_urls()
# Create variable with total number of stocks so we can track progress
length = len(urls_df)
# create list to store results
results = []
# Loop through symbols
for index, row in urls_df.iterrows():
# Extract values
stock_url = row['url']
symbol = row['symbol']
# Print progress
print( str( round((((index + 1) / length) * 100), 2)) + '% Complete', symbol)
# If url is different than 'not found'
if row['url'] != 'not found':
# Extract json with values
stock_json_response = symbol_data(stock_url)
# Check if there's data
if stock_json_response != 'not found':
# Keep onlu relevant values
stock_numbers = extract_values(stock_json_response, symbol)
# Add that to results list
results.append(stock_numbers)
# Transform results into a dataframe, first create a list where every row is one record for each stock
to_df_list = [i for stock in results for i in stock]
# Convert it to a dataframe - dropping duplicates for headers (not the best solution)
df = pd.DataFrame(to_df_list, columns=to_df_list[0]).drop_duplicates()
# Remove first row with headers
df = df[1:]
# Export that
df.to_csv('../docs/my_stocks_fundamentals.csv', index=0)
return df
def update_fundamental_dates():
"""
Function to update fundamental data from Simply Wall Street
"""
# Import Fundamental Data and Earnings
df_fund = pd.read_csv('../docs/my_stocks_fundamentals.csv')
df_earnings = pd.read_csv('../docs/earnings.csv')
# Remove duplicates from df_earnings
df_earnings['earnings_date'] = pd.to_datetime(df_earnings['earnings_date']).dt.date
df_earnings = df_earnings.drop_duplicates(keep='first', subset=['symbol', 'earnings_date'])
# Create columns with previous Qs numbers
# First we need to define the relevant columns
relevant_columns = list(set(list(df_fund.columns)) - set(['date', 'symbol']))
relevant_columns = ['basic_eps_ttm', 'net_income_ttm', 'net_oper_cf_ttm', 'total_revenue_ttm']
# Loop through columns and create a new column with previous numbers
for column in relevant_columns:
for i in range(1,17):
number = i * -1
df_fund[f'{column}_{i}Q'] = df_fund.groupby('symbol')[column].shift(number)
# Now we need to pull data from earnings, because we need to tell exactly when all the data was available
# Transform dataframes
df_fund['date_str'] = df_fund['date'].astype(str).str[:-3]
df_fund['earnings_quarter'] = pd.to_datetime(df_fund['date_str'], unit='s')
# Figure out the correct dates in which earnings was released
df_earnings['key'] = 0
df_fund['key'] = 0
# Merge all together, looking at all possibilities
clean_df = pd.merge(df_earnings, df_fund, on=['symbol', 'key'])
clean_df['earnings_quarter'] = | pd.to_datetime(clean_df['earnings_quarter']) | pandas.to_datetime |
import pandas as pd
import requests
import os
import time
from requests.exceptions import HTTPError
class ForecastException(Exception):
pass
class Forecaster(object):
"""Class for interacting with the DarkSky forecast API.
For details on the API see
"""
def __init__(
self, API_key, latitude = 57.6568, longitude = -3.5818, tz='Europe/London'
):
"""Instantiate class with API key and lat/long (if used somewhere other
than Findhorn)
Arguments:
API_key {string} -- active API key for communicating with DarkSky
latitude {float or string} -- latitude
longitude {float or string} -- longitude
"""
self._API_key = API_key
self.tz = tz
if latitude:
self.latitude = latitude
if latitude:
self.longitude = longitude
def get_forecast(self, sim_start_time: pd.Timestamp = None) -> pd.DataFrame:
"""Get 48 hour forecast
Combine API calls to DarkSky to make one DataFrame with
meteorological data starting at the start of today and ending 48 hours
time. If a start_time is supplied works from the start of that day
to 48 hours after start_time
Arguments:
sim_start_time {pd.Timestamp} -- simulation start time; if not
supplied start time is the current hour.
"""
# First up - check we haven't already pulled one this hour
start_time = sim_start_time or pd.Timestamp.now(tz=self.tz)
# We can't get a forecast for a date in the future
if (start_time>pd.Timestamp.now(tz=self.tz)):
raise ForecastException('Cannot get forecast for future date')
start_time = start_time.replace(minute=0, second=0)
filename = ('forecasts/forecast-'
+ start_time.strftime('%Y-%m-%d-%H%M')
+ '.csv')
if os.path.exists(filename):
forecast = pd.read_csv(
filename,
index_col='datetime',
parse_dates=['datetime'],
dayfirst = True
)
# First call - start of today until end of tomorrow
unixtime = int(time.mktime(start_time.timetuple()))
try:
json_response = self._call_darksky(str(unixtime))
except Exception as err:
raise ForecastException(f'Communication error occurred: {err}')
past_data = pd.DataFrame.from_dict(json_response['hourly']['data'])
past_data['datetime'] = pd.to_datetime(
past_data['time'],
unit = 's'
)
past_data.set_index(
'datetime',
inplace = True
)
past_data.index = past_data.index.tz_localize('UTC').tz_convert(self.tz)
# Second call - might be another historical one...
if not sim_start_time:
# No, this is the standard forecast
try:
json_response = self._call_darksky()
except Exception as err:
raise ForecastException(f'Communication error occurred: {err}')
# We have to do this differently for historic data as the DarkSky
# API doesn't appear to be returning 2 day forecasts for historical
# data as the docs indicate it should.
future_data = pd.DataFrame.from_dict(json_response['hourly']['data'])
future_data['datetime'] = pd.to_datetime(
future_data['time'],
unit = 's'
)
future_data.set_index(
'datetime',
inplace = True
)
else:
# We need to do two more calls then.
# We'll trim it later.
second_day = (sim_start_time+ | pd.Timedelta(days=1) | pandas.Timedelta |
import sys
import numpy as np
import pandas as pd
from sys import argv, __stdout__
from datetime import datetime, timedelta
import os
### This program makes a timing dataframe from output logfiles generated by graphB.
### It can take multiple files as command line arguments manually, in which it will generate
### one dataframe with the results of each log as its own row.
### This file is run automatically in the postprocessing step of graphB. See README in
### graphB for description of the different timing metrics generated.
def convert_timedelta(convert_dict):
new_dict = {}
for key in convert_dict:
if key != "FILENAME":
try:
for val in convert_dict[key]:
if val != "None":
time = val.split(":")
if len(list(time[0])) > 1:
d = int(time[0].split("day,")[0])
h = (24 * d) + int(time[0].split("day,")[1])
else:
h = int(time[0])
m = int(time[1])
if len(time[2].split(".")) > 1:
s = int(time[2].split(".")[0])
ms = int(time[2].split(".")[1])
else:
s = int(time[2])
ms = 0
val = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)
new_dict.setdefault(key, []).append(val)
else:
val = timedelta(hours=0, minutes=0, seconds=0, microseconds=0)
new_dict.setdefault(key, []).append(val)
except Exception as error:
print(
"ERROR IN CONVERT TIMEDELTA FUNCTION, key is: ",
key,
" File is: ",
convert_dict["FILENAME"],
" Exception: ",
error,
)
return new_dict
def create_avg_or_sum_labels(avg_series, sum_series, new_series):
if not avg_series.empty:
for index_to_change in avg_series.index:
if index_to_change != "FILENAME":
new_series["AVG_" + index_to_change] = avg_series[index_to_change]
new_series["SUM_" + index_to_change] = sum_series[index_to_change]
else:
keywords = [
"TOTAL_BALANCE_TIME",
"BALANCE_TIME",
"COMPONENT_LIST_GEN_TIME",
"COMPONENT_STATS_TIME",
"TREE_TIME",
]
for word in keywords:
new_series["AVG_" + word] = timedelta(
hours=0, minutes=0, seconds=0, microseconds=0
)
new_series["SUM_" + word] = timedelta(
hours=0, minutes=0, seconds=0, microseconds=0
)
return new_series
def change_to_seconds(timedelta_series):
timedelta_series = timedelta_series.total_seconds()
return timedelta_series
def create_write_filename(outfiles):
outfile = os.path.normpath(outfiles[0])
split_dir = os.path.dirname(outfile).split(os.sep)
write_dir = (
os.sep.join(split_dir[:-2]) + "/Timing/" + split_dir[-1] + "/"
)
os.makedirs(write_dir, exist_ok=True)
write_file = (
write_dir
+ "_".join(os.path.basename(outfile).split("_")[0:3])
+ "_timing_results"
)
return write_file
def create_timing_results(output_files, write_filename):
FINAL_COLUMNS = [
"AVG_COMPONENT_LIST_GEN_TIME",
"AVG_COMPONENT_STATS_TIME",
"SUM_COMPONENT_STATS_TIME",
"SUM_COMPONENT_LIST_GEN_TIME",
"SUM_TREE_TIME",
"AVG_TREE_TIME",
"AVG_TOTAL_BALANCE_TIME",
"AVG_BALANCE_TIME",
"SUM_TOTAL_BALANCE_TIME",
"SUM_BALANCE_TIME",
"TOTAL_TIME",
"VERTEX_DF_TIME",
"MATRIX_CREATE_TIME",
"SYM_MATRIX_CREATE_TIME",
"CALC_STATUS_TIME",
"TOTAL_PREPROCESS_TIME",
"TOTAL_PROCESS_TIME",
"TOTAL_POSTPROCESS_TIME",
"COMPUTE_TIME_NO_IO",
]
total_df_datetime = pd.DataFrame(columns=FINAL_COLUMNS)
total_df_seconds = pd.DataFrame(columns=FINAL_COLUMNS)
for outfile in output_files:
outfile_source = os.path.basename(outfile).split("_")[2]
tree_keywords = {
"COMPONENT_LIST_GEN_TIME": [],
"COMPONENT_STATS_TIME": [],
"TREE_TIME": [],
"BALANCE_TIME": [],
"TOTAL_BALANCE_TIME": [],
"FILENAME": "",
}
global_keywords = {
"TOTAL_PREPROCESS_TIME": [],
"TOTAL_PROCESS_TIME": [],
"TOTAL_POSTPROCESS_TIME": [],
"TOTAL_TIME": [],
"VERTEX_DF_TIME": [],
"CALC_STATUS_TIME": [],
"MATRIX_CREATE_TIME": [],
"SYM_MATRIX_CREATE_TIME": [],
"FILENAME": "",
}
with open(outfile, "r") as outfile:
global_keywords["FILENAME"] = outfile
tree_keywords["FILENAME"] = outfile
for line in outfile:
if outfile_source == "LEAP":
keyword = line.split(":")[0]
elif outfile_source == "current":
keyword = line.split(":")[2]
if keyword in tree_keywords:
tree_keywords.setdefault(keyword, []).append(
line.split(")")[1].replace("\n", "").replace(" ", "")
)
if keyword in global_keywords:
if not global_keywords[
keyword
]: # only want one entry in case there were multiple input h5s created.
global_keywords[keyword].append(
line.split(")")[1].replace("\n", "").replace(" ", "")
)
tree_keywords = convert_timedelta(tree_keywords)
global_keywords = convert_timedelta(global_keywords)
global_keywords["TOTAL_TIME"] = (
global_keywords["TOTAL_PREPROCESS_TIME"][0]
+ global_keywords["TOTAL_PROCESS_TIME"][0]
+ global_keywords["TOTAL_POSTPROCESS_TIME"][0]
)
### These two for loops put in because spark doesn't consistently write all the print output.
### This resulted in the tree time having one less entry than the other times and the mean on
### line 55 would not compute. Solution was to compute the mean of all the other entries and
### add in another entry equal to the mean for that column so the length of all columns would
### match while still not affecting the overall average.
max_length = 0
for key in tree_keywords:
if len(tree_keywords[key]) > max_length:
max_length = len(tree_keywords[key])
for key in tree_keywords:
mean = sum(tree_keywords[key], timedelta()) / len(tree_keywords[key])
if len(tree_keywords[key]) < max_length:
tree_keywords.setdefault(key, []).append(mean)
tree_sums = pd.DataFrame(tree_keywords).sum()
tree_series = | pd.DataFrame(tree_keywords) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
@pytest.fixture
def idx_expected(self):
idx = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B").tz_localize(
"US/Pacific"
)
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
assert expected.dtype == idx.dtype
return idx, expected
def test_to_series_keep_tz_deprecated_true(self, idx_expected):
# convert to series while keeping the timezone
idx, expected = idx_expected
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
assert msg in str(m[0].message)
tm.assert_series_equal(result, expected)
def test_to_series_keep_tz_deprecated_false(self, idx_expected):
idx, expected = idx_expected
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
def test_setitem_dt64series(self, idx_expected):
# convert to utc
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx_expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
idx, expected = idx_expected
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
from abc import ABC, abstractmethod
import re
import pandas as pd
from pprint import pprint
from collections import abc
from negation.structure2 import batch, constants, factory, modObject, patientObject
# Master Class
class varObject(abc.Collection):
def __init__(self):
self.objects = []
# self.objects_tag = []
# self.objects_mod = []
def _addTargetTag(self, tagObject):
"""Adds tagobject, and dictionary of mods,
as dictionary to self.objects list:
self.objects = [
{
"instance" : tagObject,
"mods" :
{
"negation" : negMod,
"date" : dateMod,
etc
}
},
{
"instance" : tagObject,
"mods" :
{
"negation" : negMod,
"date" : dateMod,
etc
}
}
]
"""
# """Adds tagObject to end of self.objects list.
# Also adds modObjects of each type to self.objects_mod
# """
# self.objects_tag.append(tagObject)
# self.objects_mod.append(fact.createModObject())
# Put everything in
self.objects.append({
"instance" : tagObject,
"mods" : fact.createModObject()})
def _addModifiers(self, mods):
for mod in mods:
cat = mod["category"]
# Translate cat into modifier type:
found = False
# Lookup in which modObject subclass this cat belongs
for key, list in constants.mod_type_dict.items():
if cat in list:
type = key
# select last dict from mod object list:
self.objects[-1]["mods"][type]._addModifierTag(mod)
found = True
# Can skip remainder of loop
break
# If type for this mod not found:
if not found:
raise Exception("categoryString of mod was not recognized")
def __str__(self):
result = []
for i in self.objects:
result.append(i["instance"])
return(str(result))
def isEmpty(self):
if self.objects: return False
else: return True
def __contains__(self, x):
for i in self.objects:
if x in i["instance"].values():
return(True)
# if x[1] == i["instance"][x[0]]:
# return(True)
return(False)
def __iter__(self):
"""Iterates over self.objects list and returns:
(var, varObject)
"""
for i in self.objects:
yield (i["instance"], i["mods"])
# def __next__(self):
# if self._n <= len(self.objects):
# result = self.objects[1]
# self.n += 1
# return result
# else:
# raise StopIteration
def __len__(self):
"""Returns the number of instances
"""
return(len(self.objects))
def _getType(self):
return(str(type(self).__name__))
def getDataframe(self):
"""Returns dataframe of varObject"""
# If no findings, return empty dataframe
if len(self) == 0:
return(pd.DataFrame())
ls = []
for index, i in enumerate(self.objects):
# Per instance, gather var information
data = i["instance"]
# var_index = str(self._getType()+str(index))
var_index = str(i["instance"]["var"]+str(index))
data.update({"index" : var_index})
serie = pd.Series(data)
df_var = pd.DataFrame([serie])
df_var = df_var.add_prefix("var_")
# Determine number of mod combinations, so number of rows
n_mod_comb = sum([len(mod) for mod in i["mods"].values()])
# If there are no mods, return current df
if n_mod_comb == 0:
ls.append(df_var)
# Paste mod info to dataframe as new columns
else:
# Gather all modifier information
df_mods = []
for mod in i["mods"].values():
if not mod.isEmpty():
df_mods.append(mod.getDataframe())
df_mods = pd.concat(df_mods, axis=0, ignore_index=True)
# Combine dataframes:
# Only if mods are present
if len(df_mods) == 0:
raise Exception(
"df_mod contains no rows")
# Multiply rows of vars to match number of mods
df_var = | pd.concat([df_var]*n_mod_comb, ignore_index=True) | pandas.concat |
import click, re, os
from baseq.snv import cli
import pandas as pd
import numpy as np
@cli.command(short_help="Stats The VCF File")
@click.option('--vcf', '-i', default = '', help = 'VCF path')
@click.option('--vcf_lists', '-l', default = '', help = 'VCF path List, sample name and path')
@click.option('--depth', '-d', default = 100, help = 'Filter Depth')
@click.option('--name', '-n', default = 'sample', help = 'Name of process')
def vcf_stats(vcf, vcf_lists, depth, name):
from baseq.snv.vcf.GATK import vcf_stats
#build VCF lists
if vcf and os.path.exists(vcf):
vcfs = [name, vcf]
elif vcf_lists and os.path.exists(vcf_lists):
with open(vcf_lists, 'r') as infile:
lines = infile.readlines()
infos = [re.split("\s+", x) for x in lines]
vcfs = [x for x in infos if x[0] and os.path.exists(x[1])]
else:
pass
results = []
import multiprocessing as mp
pool = mp.Pool(processes=20)
for vcf in vcfs:
results.append(pool.apply_async(vcf_stats, (vcf[0], vcf[1], int(depth),)))
pool.close()
pool.join()
results = [x.get() for x in results]
MAF = [[x['sample']] + x["MAF"].tolist() for x in results]
writer = pd.ExcelWriter('VCF_stats.xlsx', engine='xlsxwriter', options={'font_name':'arial'})
# pd.DataFrame(results, columns=["sample", "counts", "mean_depth", "GT_01", "GT_02"]).to_excel("VCF.xls")
# pd.DataFrame(MAF, columns=["sample"]+[str(round(x/50, 2)) for x in range(50)]).to_excel("MAF.xls")
| pd.DataFrame(results, columns=["sample", "counts", "mean_depth", "GT_01", "GT_11"]) | pandas.DataFrame |
"""
Utility functions for training and validating models.
"""
import time
import torch
import pandas as pd
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from vaa.utils import correct_predictions
def train(model,
dataloader,
optimizer,
criterion,
epoch_number,
max_gradient_norm):
"""
Train a model for one epoch on some input data with a given optimizer and
criterion.
Args:
model: A torch module that must be trained on some input data.
dataloader: A DataLoader object to iterate over the training data.
optimizer: A torch optimizer to use for training on the input model.
criterion: A loss criterion to use for training.
epoch_number: The number of the epoch for which training is performed.
max_gradient_norm: Max. norm for gradient norm clipping.
Returns:
epoch_time: The total time necessary to train the epoch.
epoch_loss: The training loss computed for the epoch.
epoch_accuracy: The accuracy computed for the epoch.
"""
# Switch the model to train mode.
model.train()
device = model.device
epoch_start = time.time()
batch_time_avg = 0.0
running_loss = 0.0
correct_preds = 0
tqdm_batch_iterator = tqdm(dataloader)
for batch_index, batch in enumerate(tqdm_batch_iterator):
batch_start = time.time()
# Move input and output data to the GPU if it is used.
premises = batch["premise"].to(device)
premises_lengths = batch["premise_length"].to(device)
hypotheses = batch["hypothesis"].to(device)
hypotheses_lengths = batch["hypothesis_length"].to(device)
labels = batch["label"].to(device)
optimizer.zero_grad()
logits, probs, _, _ = model(premises,
premises_lengths,
hypotheses,
hypotheses_lengths)
loss = criterion(logits, labels)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_gradient_norm)
optimizer.step()
batch_time_avg += time.time() - batch_start
running_loss += loss.item()
correct_preds += correct_predictions(probs, labels)
description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\
.format(batch_time_avg/(batch_index+1),
running_loss/(batch_index+1))
tqdm_batch_iterator.set_description(description)
epoch_time = time.time() - epoch_start
epoch_loss = running_loss / len(dataloader)
epoch_accuracy = correct_preds / len(dataloader.dataset)
return epoch_time, epoch_loss, epoch_accuracy
def validate(model, dataloader, criterion):
"""
Compute the loss and accuracy of a model on some validation dataset.
Args:
model: A torch module for which the loss and accuracy must be
computed.
dataloader: A DataLoader object to iterate over the validation data.
criterion: A loss criterion to use for computing the loss.
epoch: The number of the epoch for which validation is performed.
device: The device on which the model is located.
Returns:
epoch_time: The total time to compute the loss and accuracy on the
entire validation set.
epoch_loss: The loss computed on the entire validation set.
epoch_accuracy: The accuracy computed on the entire validation set.
"""
# Switch to evaluate mode.
model.eval()
device = model.device
epoch_start = time.time()
running_loss = 0.0
running_accuracy = 0.0
# Deactivate autograd for evaluation.
with torch.no_grad():
for batch in dataloader:
# Move input and output data to the GPU if one is used.
premises = batch["premise"].to(device)
premises_lengths = batch["premise_length"].to(device)
hypotheses = batch["hypothesis"].to(device)
hypotheses_lengths = batch["hypothesis_length"].to(device)
labels = batch["label"].to(device)
logits, probs, _, _ = model(premises,
premises_lengths,
hypotheses,
hypotheses_lengths)
loss = criterion(logits, labels)
running_loss += loss.item()
running_accuracy += correct_predictions(probs, labels)
epoch_time = time.time() - epoch_start
epoch_loss = running_loss / len(dataloader)
epoch_accuracy = running_accuracy / (len(dataloader.dataset))
return epoch_time, epoch_loss, epoch_accuracy
def test(model, dataloader):
"""
Compute the loss and accuracy of a model on some validation dataset.
Args:
model: A torch module for which the loss and accuracy must be
computed.
dataloader: A DataLoader object to iterate over the validation data.
criterion: A loss criterion to use for computing the loss.
epoch: The number of the epoch for which validation is performed.
device: The device on which the model is located.
Returns:
epoch_time: The total time to compute the loss and accuracy on the
entire validation set.
epoch_loss: The loss computed on the entire validation set.
epoch_accuracy: The accuracy computed on the entire validation set.
"""
# Switch to evaluate mode.
model.eval()
device = model.device
data = None
# Deactivate autograd for evaluation.
with torch.no_grad():
for batch in dataloader:
# Move input and output data to the GPU if one is used.
premises = batch["premise"].to(device)
premises_lengths = batch["premise_length"].to(device)
hypotheses = batch["hypothesis"].to(device)
hypotheses_lengths = batch["hypothesis_length"].to(device)
logits, probs, _, _ = model(premises,
premises_lengths,
hypotheses,
hypotheses_lengths)
pred = torch.argmax(probs, dim=1).cpu().numpy()
gold_labels = []
# label_names = ['entailment', 'neutral', 'contradiction']
for x in pred:
if x == 0:
gold_labels.append('entailment')
elif x == 1:
gold_labels.append('neutral')
else:
gold_labels.append('contradiction')
temp = pd.DataFrame()
temp['pairID'] = batch["id"]
temp['gold_label'] = gold_labels
if data is None:
data = temp
else:
data = | pd.concat((data, temp)) | pandas.concat |
import pandas as pd
import allel
import numpy as np
import logging
logger = logging.getLogger(__name__)
# FUNCTIONS
def load_hdf5_data(hdf5_fn, chrom, s1, s2, gdistkey=None):
import hdf5
samples1 = get_sample_ids(s1)
samples2 = get_sample_ids(s2)
samples_x = h5py.File(hdf5_fn)[chrom]["samples"][:]
sample_name = [sid.decode() for sid in samples_x.tolist()]
idx1 = np.array([sample_name.index(sid) for sid in samples1])
idx2 = np.array([sample_name.index(sid) for sid in samples2])
h5fh = h5py.File(hdf5_fn, mode="r")[chrom]
g = allel.GenotypeChunkedArray.from_hdf5(h5fh["calldata"]["genotype"])
pos = allel.SortedIndex(h5fh["variants"]["POS"][:])
if gdistkey is not None:
gdist = h5fh["variants"][gdistkey][:]
else:
gdist = None
return g.take(idx1, axis=1), g.take(idx2, axis=1), pos, gdist
def load_zarr_data(zarr_fn, chrom, s1, s2, gdistkey=None):
import zarr
samples1 = get_sample_ids(s1)
samples2 = get_sample_ids(s2)
zfh = zarr.open_group(zarr_fn, mode="r")[chrom]
samples_x = zfh["samples"][:]
sample_name = [sid.decode() for sid in samples_x.tolist()]
idx1 = np.array([sample_name.index(sid) for sid in samples1])
idx2 = np.array([sample_name.index(sid) for sid in samples2])
g = allel.GenotypeChunkedArray(zfh["calldata"]["genotype"])
pos = allel.SortedIndex(zfh["variants"]["POS"][:])
if gdistkey is not None:
gdist = h5fh["variants"][gdistkey][:]
else:
gdist = None
return g.take(idx1, axis=1), g.take(idx2, axis=1), pos, gdist
def load_text_format_data(mapfn, pop_a_fn, pop_b_fn):
tbl = pd.read_csv(mapfn, sep="\t", header=None, engine="c")
try:
tbl.columns = ["ID", "CHROM", "GDist", "POS", "REF", "ALT"]
except ValueError:
logger.info("File not tab delimited as expected- trying with spaces")
tbl = pd.read_csv(
mapfn, sep=" ", header=None, engine="c", names=["ID", "CHROM", "GDist", "POS", "REF", "ALT"])
try:
vartbl = allel.VariantChunkedTable(tbl.to_records(), index="POS")
except ValueError:
tbl = tbl.sort_values(["CHROM", "POS"])
logger.warning("Possible SNPs file is not sorted. Attempting to sort. This is likely to be inefficient")
vartbl = allel.VariantChunkedTable(tbl.to_records(), index="POS")
d1 = np.loadtxt(pop_a_fn, dtype="int8")
geno1 = allel.GenotypeChunkedArray(d1.reshape((d1.shape[0], -1, 2)))
d2 = np.loadtxt(pop_b_fn, dtype="int8")
geno2 = allel.GenotypeChunkedArray(d2.reshape((d2.shape[0], -1, 2)))
pos = allel.SortedIndex(vartbl.POS[:])
assert np.isnan(pos).sum() == 0, "nans values are not supported"
return geno1, geno2, allel.SortedIndex(vartbl.POS[:]), vartbl.GDist[:]
# function that either splits a string or reads a file
def get_sample_ids(sample_input):
if "," in sample_input:
# assume split and return
logger.debug("Assuming sample IDs given as comma-separated strings.")
samples = sample_input.split(",")
else:
logger.debug("Assuming sample IDs provided in a file.")
with open(sample_input, "r") as reader:
samples = [x.strip() for x in reader.readlines()]
return samples
def load_vcf_wrapper(path, seqid, samples, samples_path):
callset = allel.read_vcf(
path,
region=seqid,
fields=['variants/POS', 'calldata/GT', 'samples'],
tabix="tabix",
samples=samples)
assert "samples" in callset.keys(), "None of the samples provided in {0!r} are found in {1!r}".format(
samples_path, path)
p = allel.SortedIndex(callset["variants/POS"])
g = allel.GenotypeArray(callset['calldata/GT'])
return p, g
def load_vcf_format_data(vcf_fn, chrom, s1, s2, gdistkey=None):
# geno1, geno2, pos = q, q, q
samples1 = get_sample_ids(s1)
samples2 = get_sample_ids(s2)
pos1, geno1 = load_vcf_wrapper(vcf_fn, chrom, samples1, s1)
pos2, geno2 = load_vcf_wrapper(vcf_fn, chrom, samples2, s2)
assert np.array_equal(pos1, pos2), "POS fields not the same"
assert geno1.shape[0] == pos1.shape[0], "For samples 1, genotypes do not match positions"
assert geno2.shape[0] == pos2.shape[0], "For samples 2, genotypes do not match positions"
assert geno1.shape[1] == len(samples1)
assert geno2.shape[1] == len(samples2)
return geno1, geno2, pos1, None
def tabulate_results(chrom, model_li, null_li, selectionc,
counts, count_avail, windows, edges):
lidf = pd.DataFrame(np.vstack((model_li, null_li, selectionc, counts, count_avail)).T,
columns=["modelL", "nullL", "sel_coef", "nSNPs", "nSNPs_avail"])
# these are the nominal windows
winf = pd.DataFrame(windows, columns=["start", "stop"])
# these are the "real" windows. Gives a guide to how close we are.
realf = | pd.DataFrame(edges, columns=["pos_start", "pos_stop"]) | pandas.DataFrame |
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from scipy.special import softmax
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas
import matplotlib.cm as cm
import umap
import tqdm
import scanpy as sc
import matplotlib.gridspec as gridspec
import networkx as nx
import matplotlib as mpl
import numpy
import operator
import random
import pickle
import collections
import sys
import os
class GeneEmbedding(object):
def __init__(self, embedding_file, compass_dataset, vector="1"):
if vector not in ("1","2","average"):
raise ValueError("Select the weight vector from: ('1','2','average')")
if vector == "average":
print("Loading average of 1st and 2nd weights.")
avg_embedding = embedding_file.replace(".vec","_avg.vec")
secondary_weights = embedding_file.replace(".vec","2.vec")
GeneEmbedding.average_vector_results(embedding_file,secondary_weights,avg_embedding)
self.embeddings = self.read_embedding(avg_embedding)
elif vector == "1":
print("Loading first weights.")
self.embeddings = self.read_embedding(embedding_file)
elif vector == "2":
print("Loading second weights.")
secondary_weights = embedding_file.replace(".vec","2.vec")
self.embeddings = self.read_embedding(secondary_weights)
self.vector = []
self.context = compass_dataset.data
self.embedding_file = embedding_file
self.vector = []
self.genes = []
for gene in tqdm.tqdm(self.embeddings.keys()):
# if gene in self.embeddings:
self.vector.append(self.embeddings[gene])
self.genes.append(gene)
def read_embedding(self, filename):
embedding = dict()
lines = open(filename,"r").read().splitlines()[1:]
for line in lines:
vector = line.split()
gene = vector.pop(0)
embedding[gene] = [float(x) for x in vector]
return embedding
def compute_similarities(self, gene, subset=None, feature_type=None):
if gene not in self.embeddings:
return None
if feature_type:
subset = []
for gene in list(self.embeddings.keys()):
if feature_type == self.context.feature_types[gene]:
subset.append(gene)
embedding = self.embeddings[gene]
distances = dict()
if subset:
targets = set(list(self.embeddings.keys())).intersection(set(subset))
else:
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def get_similar_genes(self, vector):
distances = dict()
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def cluster(self, threshold=0.75, lower_bound=1):
cluster_definitions = collections.defaultdict(list)
G = embed.generate_network(threshold=threshold)
G.remove_edges_from(networkx.selfloop_edges(G))
for i, connected_component in enumerate(networkx.connected_components(G)):
subg = G.subgraph(connected_component)
if len(subg.nodes()) > lower_bound:
# if len(subg.nodes()) == 2:
# cluster_definitions[str(i+j+100)] += list(subg.nodes())
# continue
clique_tree = networkx.tree.junction_tree(subg)
clique_tree.remove_nodes_from(list(networkx.isolates(clique_tree)))
for j, cc in enumerate(nx.connected_components(clique_tree)):
for clique_cc in cc:
cluster_definitions[str(i+j)] += list(set(clique_cc))
self.cluster_definitions = cluster_definitions
return self.cluster_definitions
def clusters(self, clusters):
average_vector = dict()
gene_to_cluster = collections.defaultdict(list)
matrix = collections.defaultdict(list)
total_average_vector = []
for gene, cluster in zip(self.context.expressed_genes, clusters):
if gene in self.embeddings:
matrix[cluster].append(self.embeddings[gene])
gene_to_cluster[cluster].append(gene)
total_average_vector.append(self.embeddings[gene])
self.total_average_vector = list(numpy.average(total_average_vector, axis=0))
for cluster, vectors in matrix.items():
xvec = list(numpy.average(vectors, axis=0))
average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)
return average_vector, gene_to_cluster
def generate_vector(self, genes):
vector = []
for gene, vec in zip(self.genes, self.vector):
if gene in genes:
vector.append(vec)
assert len(vector) != 0, genes
return list(numpy.median(vector, axis=0))
def cluster_definitions_as_df(self, top_n=20):
similarities = self.cluster_definitions
clusters = []
symbols = []
for key, genes in similarities.items():
clusters.append(key)
symbols.append(", ".join(genes[:top_n]))
df = pandas.DataFrame.from_dict({"Cluster Name":clusters, "Top Genes":symbols})
return df
def plot(self, png=None, method="TSNE", labels=[], pcs=None, remove=[]):
plt.figure(figsize = (8, 8))
ax = plt.subplot(1,1,1)
pcs = self.plot_reduction(self.cluster_labels, ax, labels=labels, method=method, pcs=pcs, remove=remove)
# if png:
# plt.savefig(png)
# plt.close()
# else:
plt.show()
return pcs
def marker_labels(self,top_n=5):
markers = []
cluster_definitions = self.cluster_definitions
marker_labels = dict()
for gclust, genes in cluster_definitions.items():
print(gclust, ",".join(genes[:5]))
markers += genes[:top_n]
for gene in genes[:top_n]:
marker_labels[gene] = gclust
return markers, marker_labels
def plot_reduction(self, clusters, ax, method="TSNE", labels=[], pcs=None, remove=[]):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.vector)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
if len(remove) != 0:
_pcsx = []
_pcsy = []
_clusters = []
for x, y, c in zip(pcs[0],pcs[1],clusters):
if c not in remove:
_pcsx.append(x)
_pcsy.append(y)
_clusters.append(c)
pcs = []
pcs.append(_pcsx)
pcs.append(_pcsy)
clusters = _clusters
data = {"x":pcs[0],"y":pcs[1], "Cluster":clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y",hue="Cluster", ax=ax)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax.set_xticks([])
ax.set_yticks([])
if len(labels):
for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):
if gene in labels:
ax.text(x+.02, y, str(gene), fontsize=8)
return pcs
def subtract_vector(self, vector):
for gene, vec in self.embeddings.items():
vec = numpy.subtract(vec-vector)
self.embeddings[gene] = vec
def relabel_clusters(self, clusters, annotations):
_clusters = []
for cluster in clusters:
if cluster in annotations:
_clusters.append(annotations[cluster])
else:
_clusters.append(cluster)
self.cluster_labels = _clusters
return _clusters
def plot_similarity_matrix(self, top_n=5, png=None):
markers, marker_labels = self.marker_labels(top_n=top_n)
cmap = matplotlib.cm.tab20
node_color = {}
type_color = {}
ctypes = []
for marker in markers:
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color[marker] = cmap(ctypes.index(marker_labels[marker]))
type_color[marker_labels[marker]] = cmap(ctypes.index(marker_labels[marker]))
mm = pandas.DataFrame(markers, index=markers)
mm["Gene Cluster"] = mm[0]
row_colors = mm["Gene Cluster"].map(node_color)
similarity_matrix = []
markers = set(list(self.embeddings.keys())).intersection(set(markers))
markers = list(markers)
for marker in markers:
row = []
res = self.compute_similarities(marker, subset=markers)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene in markers:
row.append(resdict[gene])
similarity_matrix.append(row)
from matplotlib.patches import Patch
plt.figure()
matrix = numpy.array(similarity_matrix)
df = | pandas.DataFrame(matrix,index=markers,columns=markers) | pandas.DataFrame |
"""Utilities for working with pandas & JS datetimes."""
import re
from typing import Union, Set
import pandas as pd
from dateutil.tz import tzlocal
__all__ = ["compute_timeunit"]
Date = Union[pd.Series, pd.DatetimeIndex, pd.Timestamp]
def compute_timeunit(date: Date, timeunit: str) -> Date:
"""Evaluate a timeUnit transform.
Parameters
----------
date : pd.DatetimeIndex, pd.Series, or pd.Timestamp
The date to be converted
timeunit : string
The Altair timeUnit identifier.
Returns
-------
date_tu : pd.DatetimeIndex, pd.Series, or pd.Timestamp
The converted date, of the same type as the input.
"""
# Convert to either UTC or localtime as appropriate.
def dt(date):
return date.dt if isinstance(date, pd.Series) else date
if dt(date).tz is None:
date = dt(date).tz_localize(tzlocal())
date = dt(date).tz_convert("UTC" if timeunit.startswith("utc") else tzlocal())
if isinstance(date, pd.Series):
return pd.Series(_compute_timeunit(timeunit, date.dt))
elif isinstance(date, pd.Timestamp):
return _compute_timeunit(timeunit, | pd.DatetimeIndex([date]) | pandas.DatetimeIndex |
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
import pandas as pd
def basic_classifier(df, columns, target):
'''
This function is a elementery RandomForest classifier that cleans, splits,
and predicts on validation set. Returns metrics accuracy, precision (lab_class = 1),
recall, and f1.
Takes three arguments : raw dataframe, list of column headers for that dataframe (df.columns),
and the binary target column name as a string.
Fills NaNs with string value 'missing', applies OrdinalEncoder, and applies
SimpleImputer with strategy 'mean'.
'''
# This section defines input as 'dataframe' and fills nan values with
# string value, 'missing'
df = pd.DataFrame(df)
df = df.fillna('missing')
# This section defines our basic pipeline
cleaner = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean')
)
# This section applies pipeline to our dataframe
df_clean = cleaner.fit_transform(df)
df_clean = | pd.DataFrame(df_clean) | pandas.DataFrame |
import pandas as pd
from unittest import TestCase, mock
from unittest.mock import MagicMock, PropertyMock
from gtfs_kit.feed import Feed
from representation.gtfs_metadata import GtfsMetadata
from representation.gtfs_representation import GtfsRepresentation
from usecase.process_routes_count_by_type_for_gtfs_metadata import (
process_routes_count_by_type_for_gtfs_metadata,
ROUTE_TYPE,
TRAM_CODE,
SUBWAY_CODE,
RAIL_CODE,
BUS_CODE,
FERRY_CODE,
CABLE_TRAM_CODE,
AERIAL_LIFT_CODE,
FUNICULAR_CODE,
TROLLEY_BUS_CODE,
MONORAIL_CODE,
)
class TestProcessRoutesCountByTypeForGtfsMetadata(TestCase):
def test_process_routes_count_with_none_gtfs_representation(
self,
):
self.assertRaises(
TypeError, process_routes_count_by_type_for_gtfs_metadata, None
)
def test_process_routes_count_with_invalid_gtfs_representation(
self,
):
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = str
self.assertRaises(
TypeError,
process_routes_count_by_type_for_gtfs_metadata,
mock_gtfs_representation,
)
@mock.patch("usecase.process_routes_count_by_type_for_gtfs_metadata.os.environ")
def test_process_routes_count_with_valid_gtfs_representation_should_return_instance(
self, mock_env
):
test_env = {
TRAM_CODE: "test_tram_code",
SUBWAY_CODE: "test_subway_code",
RAIL_CODE: "test_rail_code",
BUS_CODE: "test_bus_code",
FERRY_CODE: "test_ferry_code",
CABLE_TRAM_CODE: "test_cable_tram_code",
AERIAL_LIFT_CODE: "test_aerial_lift_code",
FUNICULAR_CODE: "test_funicular_code",
TROLLEY_BUS_CODE: "test_trolley_bus_code",
MONORAIL_CODE: "test_monorail_code",
}
mock_env.__getitem__.side_effect = test_env.__getitem__
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
under_test = process_routes_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
@mock.patch("usecase.process_routes_count_by_type_for_gtfs_metadata.os.environ")
def test_process_routes_count_with_missing_files(self, mock_env):
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_routes_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_env.assert_not_called()
mock_metadata.routes_count_by_type.assert_not_called()
@mock.patch("usecase.process_routes_count_by_type_for_gtfs_metadata.os.environ")
def test_process_routes_count_with_missing_fields(self, mock_env):
mock_routes = PropertyMock(return_value= | pd.DataFrame({}) | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
from cellphonedb.utils import dataframe_functions
class TestDataframeSameData(TestCase):
def test_compare_empty(self):
self.assertTrue(dataframe_functions.dataframes_has_same_data(pd.DataFrame(), pd.DataFrame()))
def test_equal(self):
dataframe1 = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
dataframe2 = | pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) | pandas.DataFrame |
"""
generate_gumbel_return_periods.py
Author: <NAME>, <NAME>
Copyright May 2020
License: BSD 3 Clause
Updated: July 2020
Script to compute Gumbel return periods (2-, 5-, 10-, 25-, 50-, and 100-yr) based on historical streamflow simulation.
The historical simulation needs to be in daily increments.
To run the script, give 3 additional arguments:
1. path to master directory containing complete historical simulation netCDF's (with subfolders for each region)
2. path to the directory of log files
3. ending year to process (eg: 2020)
python generate_gumbel_return_periods.py /path/to/era-5/results/dir /path/to/logs/dir end_year_of_simulation
"""
import os
import sys
import netCDF4
import logging
import statistics
import math
import datetime
import pandas
from datetime import datetime
def solve_gumbel_flow(std, xbar, rp):
"""
Solves the Gumbel Type I pdf = exp(-exp(-b))
where b is the covariate
"""
return -math.log(-math.log(1 - (1 / rp))) * std * .7797 + xbar - (.45 * std)
def daily_to_yearly_max_flow(daily_flow_list, start_yr, end_yr):
yearly_max_flows = []
dates = pandas.Series(pandas.date_range(str(start_yr) + '-1-1 00:00:00', periods=len(daily_flow_list), freq='D'))
df = | pandas.DataFrame(daily_flow_list, columns=['simulated_flow'], index=dates) | pandas.DataFrame |
# Importing Flask modules:
from flask import Blueprint, make_response, render_template, flash, redirect
from flask import current_app as app
# Importing Flask REST API modules:
from flask_restful import Resource, reqparse, Api
# Importing 3rd party packages:
import ast
import json
import datetime
import networkx as nx
import plotly
import plotly.graph_objects as go
import pandas as pd
# Importing internal packages:
from .models import MicroServiceLog, Microservice, db
from .forms import MicroserviceCreationForm
# Blueprint Configuration:
microservice_bp = Blueprint(
"microservice_bp", __name__,
template_folder = "templates",
static_folder = "static"
)
# Creating API:
api = Api(microservice_bp)
# Creating the request parser object for all python logging field:
log_parser = reqparse.RequestParser()
log_parser.add_argument("name")
log_parser.add_argument("msg")
log_parser.add_argument("args")
log_parser.add_argument("levelname")
log_parser.add_argument("created")
log_parser.add_argument("lineno")
log_parser.add_argument("funcName")
log_parser.add_argument("msecs")
log_parser.add_argument("relativeCreated")
log_parser.add_argument("thread")
log_parser.add_argument("threadName")
log_parser.add_argument("processName")
log_parser.add_argument("process")
class MicroServiceLogs(Resource):
"""The REST API functions for handeling python logs sent to the server from
velokzz microservices.
GET - Display the data based on the query params.
POST - Ingest Log information.
PUT - N/A
DELETE - N/A
"""
def get(self):
"""Querying all avalible microservice logs that conform to the url query parameters.
TODO: Add URL query param support.
"""
# Querying all logs made from the database:
logs = MicroServiceLog.query.all()
# Unpacking the SQLAlchemy objects into seralized JSON:
logs = [
{
"name":log.name,
"msg": log.msg,
"app_name":log.app_name,
"process_type":log.process_type,
"status_code": log.status_code,
"levelname":log.levelname,
"created":log.created.strftime("%m/%d/%Y, %H:%M:%S"),
"lineno":log.lineno,
"funcName":log.funcName,
"msecs":log.msecs,
"relativeCreated":log.relativeCreated.strftime("%m/%d/%Y, %H:%M:%S"),
"thread":log.thread,
"threadName":log.threadName,
"processName":log.processName,
"process":log.process
}
for log in logs
]
return logs
def post(self):
"""Handeling POST requests made to the server containing logs.
The method error checks each post request to ensure that it conforms
to a specific structure. If the request body contains the correct params
the method performs type conversion and unpacks all params to create log
SQLA objects that are written to the database.
"""
# Extracting all log params:
args = log_parser.parse_args()
if {
'args', 'created', 'lineno', 'msecs', 'relativeCreated',
'thread', 'name', 'msg', 'levelname', 'funcName', 'threadName',
'processName', 'process'
} <= set(args):
# Converting the string tuple to actual tuple and unpacking:
app_name, process_type, status_code = ast.literal_eval(args["args"])
# Converting the arguments to the correct data types:
status_code = int(status_code)
created_obj = datetime.datetime.fromtimestamp(float(args["created"]))
lineno = int(args["lineno"])
msecs = float(args["msecs"])
relativeCreated = datetime.datetime.fromtimestamp(float(args["relativeCreated"]))
thead = int(args["thread"])
# Creating Log w/ ORM object:
new_log = MicroServiceLog(
name = args["name"],
msg = args["msg"],
app_name = app_name,
process_type = process_type,
status_code = status_code,
levelname = args["levelname"],
created = created_obj,
lineno = lineno,
funcName = args["funcName"],
msecs = msecs,
relativeCreated = relativeCreated,
thread = thead,
threadName = args["threadName"],
processName = args["processName"],
process = args["process"]
)
# Commiting a Log Object to the database:
db.session.add(new_log)
db.session.commit()
return make_response(f"Log {app_name}{process_type}{created_obj} Successfully")
else:
raise Warning("Log not Written to the database. Placeholder for error catching.")
pass
# Registering Microservice Log Routes:
api.add_resource(MicroServiceLogs, "/api/")
# Non REST API Routes:
@microservice_bp.route("/", methods=["GET"])
def microservice_log_home():
# Querying the Microservice objects:
microservices = Microservice.query.all()
# Creating the graph plot from all the microservices:
microservice_G = nx.Graph()
microservice_G.add_node("Velkozz_REST_API") # Central node for the graph, the velkozz REST API.
# Adding Microservice objects as nodes to the graph:
microservice_G.add_nodes_from(microservices)
for microservice in microservices:
microservice_G.add_edge(microservice, "Velkozz_REST_API")
# Generating the positions for each node in the graph:
pos = nx.spring_layout(microservice_G)
# formatting the nodes and edges of the graph:
for n, p in pos.items():
microservice_G.nodes[n]["pos"] = p
# Creating the scatter plot for the edges:
edge_trace = go.Scatter(
x = [],
y = [],
line = dict(width=0.5, color="#888"),
hoverinfo="none",
mode="lines"
)
# Populating the edge trace with x and y values:
for edge in microservice_G.edges():
x0, y0 = microservice_G.nodes[edge[0]]['pos']
x1, y1 = microservice_G.nodes[edge[1]]['pos']
edge_trace["x"] += tuple([x0, x1, None])
edge_trace["y"] += tuple([y0, y1, None])
# Creating the scatter plot for graph nodes:
node_trace = go.Scatter(
x=[],
y=[],
text=[],
mode="markers",
hoverinfo="text",
marker=dict(
showscale=True,
colorscale='RdBu',
reversescale=True,
color=[],
size=15,
colorbar=dict(
thickness=10,
title='Node Connections',
xanchor='left',
titleside='right'),
line=dict(width=0)))
# Populating the node scatter trace with x and y values:
for node in microservice_G.nodes():
x, y = microservice_G.nodes[node]['pos']
node_trace["x"] += tuple([x])
node_trace["y"] += tuple([y])
# Labeling the nodes text and color:
for node in microservice_G.nodes():
# Determining if the node is a microservice or the REST API:
if type(node) == str:
# Labeling the node:
node_trace["text"] += tuple([node])
# Setting REST API node color:
node_trace["marker"]["color"] += tuple(["#FF00FF"]) # This overwrites the scatterplot params w/ config values.
else:
# Labeling the node:
node_trace["text"] += tuple([f"{node.microservice_name} Microservice"])
# Setting the node color for Microservices:
node_trace["marker"]['color'] += tuple(["#0000FF"])
# Creating the total graph figure:
fig = go.Figure(
data=[edge_trace, node_trace],
layout = go.Layout(
titlefont=dict(size=16),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(color="#b2becd"),
showlegend=False,
hovermode="closest",
margin=dict(b=20,l=5,r=5,t=40),
annotations=[dict(
text="No. of connections",
showarrow=False,
xref="paper", yref="paper")],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
# Converting the plotly graph to a JSON object to be passed to the frontend:
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# TODO: Filter the logs sent by microservices.
# Create line graphs of all of the microservices sent per microservice.
# Querying all microservice logs:
# Creating the previous week timeframe that is used to filter the microservice logs:
prev_week = datetime.datetime.today() - datetime.timedelta(days=7)
microservice_logs = MicroServiceLog.query.filter(MicroServiceLog.created >= prev_week).all()
# Logic that checks the number of microservice_logs. If < 0 render template w/o applying logic:
if len(microservice_logs) <= 0:
return render_template("microservice_home.html", microservices=microservices, graphJSON=graphJSON)
# Converting the microservice query object to a list of dictionary:
microservice_log_dicts = [
{
"name":microservice_log.name,
"msg":microservice_log.msg,
"app_name":microservice_log.app_name,
"process_type":microservice_log.process_type,
"status_code":microservice_log.status_code,
"levelname":microservice_log.levelname,
"created":microservice_log.created,
"lineno":microservice_log.lineno,
"funcName":microservice_log.funcName,
"msecs":microservice_log.msecs,
"relativeCreated":microservice_log.relativeCreated,
"thread":microservice_log.thread,
"threadName":microservice_log.threadName,
"processName":microservice_log.processName,
"process":microservice_log.process
} for microservice_log in microservice_logs
]
microservice_df = pd.DataFrame.from_dict(microservice_log_dicts)
microservice_df["_counter"] = 1
# Slicing dataframes based on specific microservices:
microservice_slices = {}
for microservice in microservices:
# Slicing dataframe:
df_slice = microservice_df.loc[microservice_df["app_name"] == microservice.microservice_name]
# Transforming data to create daily frequency counts:
df_slice.set_index("created" ,inplace=True)
def add_microservice_log_data(microservice_slice_dict, microservice_name, microservice_df):
"""Method tries to extract the number of daily occurances of the specific
log level from the dataframe.
It extracts this data, seralizes it and adds the seralized data to the main
'microservice_slices' dict.
"""
# List of log level to process:
levels = ["INFO", "WARN", "ERR.", "CRITICAL", "WARNING", "ERROR"]
# Iterating through the dataframe slicing and resampling data to get counts of logs:
level_data_dict = {}
for level in levels:
try:
level_slice = df_slice.loc[df_slice["levelname"] == level, "_counter"].squeeze().resample("D").sum()
except:
level_slice = None
if level_slice is not None:
# Building nested dict for the specfici log level data:
level_data_dict[level] = {
"Date_Index":level_slice.index,
"Data": list(level_slice.values)
}
else:
pass
# Add the level_data_dict onto the main microservice slice dict:
microservice_slice_dict[microservice_name] = level_data_dict
add_microservice_log_data(microservice_slices, microservice, df_slice)
# TODO: use Microservice Slice Dict to create plotly timesereis and pass them to the front-end.
# Iterating through the microservice dict to create a plotly timeseries for each microservice:
levels = ["INFO", "WARN", "ERR.", "CRITICAL", "WARNING", "ERROR"]
log_scatterplots = {}
for microservice in microservice_slices:
microservice_desc_lst = microservice.microservice_description.split(" ")
# Splitting microservice description to make it formattable:
if len(microservice_desc_lst) > 9:
# Inserting line breaks:
microservice_desc_lst.insert(10, "<br>")
# Re-converting the list of strigs to a single string and adding it back to the microservice objects:
microservice.microservice_description = " ".join(microservice_desc_lst)
microservice_fig = go.Figure(
layout=go.Layout(
title=dict(
text=microservice.microservice_description,
y=0.9,
x=0.5,
xanchor="center",
yanchor="top"
),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(color="#b2becd"),
xaxis=dict(
title="Local Time",
gridcolor="#b2becd",
linecolor="#b2becd",
linewidth= 1,
mirror= True,
showgrid=False),
yaxis=dict(
title="Log Frequency",
gridcolor= "#b2becd",
linecolor= "#b2becd",
linewidth= 2,
mirror= True,
showgrid= False)
)
)
# Iterating over the logging levels to add traces to the main figure:
for level in levels:
try:
microservice_fig.add_trace(go.Scatter(
name=f"{level}",
mode="markers+lines",
x=microservice_slices[microservice][level]["Date_Index"],
y=microservice_slices[microservice][level]["Data"]
))
except:
pass
# Adding the built figure to the scatterplot dict:
log_scatterplots[microservice] = json.dumps(microservice_fig, cls=plotly.utils.PlotlyJSONEncoder)
# Now that all scatterplots have been built adding the searlized data to the microservice query objects:
for microservice in microservices:
microservice.timeseries = log_scatterplots[microservice]
return render_template("microservice_home.html", microservices=microservices, graphJSON=graphJSON)
# Route to delete microservice object:
@microservice_bp.route("/remove/<microservice>")
def remove_microservice(microservice):
# TODO: Write deletion function after writing adding and editing function.
# Querying to ensure that the microservice exists:
microservice = Microservice.query.filter_by(microservice_name=microservice).first()
if microservice is not None:
# Deleting Miroservice:
msg_text = f"Microservice {microservice.microservice_name} successfully removed"
db.session.delete(microservice)
db.session.commit()
flash(msg_text)
return redirect("/")
else:
return redirect("/")
# Route for Microservice creation:
@microservice_bp.route("/add/", methods=["GET", "POST"])
def microservice_creation_form():
# Creating Microservice creation form:
form = MicroserviceCreationForm()
# Processing Form info and adding microservice to database:
if form.validate_on_submit():
# TODO: Add logic that allows you to create OR update an existing object via this endpoint.
# Querying the microservice object to see if it already exists:
existing_microservice = Microservice.query.filter_by(microservice_name=form.microservice_name.data).first()
if existing_microservice is not None:
# Updating an existing microservice fields:
existing_microservice.microservice_description = form.microservice_description.data
existing_microservice.date_added=datetime.datetime.now()
# Adding updated object to the database:
db.session.commit()
else:
# Creating a Microservice Object:
new_microservice = Microservice(
microservice_name=form.microservice_name.data,
microservice_description=form.microservice_description.data,
date_added=datetime.datetime.now()
)
# Committing a Microservice object to the database:
db.session.add(new_microservice)
db.session.commit()
return redirect("/microservices/")
return render_template("microservice_creation_form.html", form=form)
# Route for a specific Microservice:
@microservice_bp.route("/dashboard/<microservice>/", methods=["GET"])
def specific_microservice_dashboard(microservice):
"""
Method extracts the data to construct a specific Microservice dashboard and
passes this data into the HTML template.
"""
levels = ["INFO", "WARN", "ERR.", "CRITICAL", "WARNING", "ERROR"]
# Creating the previous week timeframe that is used to filter the microservice logs:
prev_week = datetime.datetime.today() - datetime.timedelta(days=7)
# Querying the single microservice from the Database:
microservice = Microservice.query.filter_by(microservice_name=microservice).first()
# TODO: Add logic to redirect the route if the microservice does not exist.
if microservice is not None:
# Querying the logs from a specific microservice:
microservice_logs = MicroServiceLog.query.filter_by(
app_name=microservice.microservice_name).filter(MicroServiceLog.created >= prev_week).order_by(
MicroServiceLog.created.desc()).all()
# Logic rendering template w/o graphs and other dispaly if there are no logs:
if len(microservice_logs) <= 0:
return render_template("microservice_dashboard.html", microservice=microservice, microservice_logs=microservice_logs)
# Converting the Microservice Logs to a dataframe:
microservice_log_dicts = [
{
"name":microservice_log.name,
"msg":microservice_log.msg,
"app_name":microservice_log.app_name,
"process_type":microservice_log.process_type,
"status_code":microservice_log.status_code,
"levelname":microservice_log.levelname,
"created":microservice_log.created,
"lineno":microservice_log.lineno,
"funcName":microservice_log.funcName,
"msecs":microservice_log.msecs,
"relativeCreated":microservice_log.relativeCreated,
"thread":microservice_log.thread,
"threadName":microservice_log.threadName,
"processName":microservice_log.processName,
"process":microservice_log.process
} for microservice_log in microservice_logs
]
# Creating and refactoring the dataframe into a dialy count of log frequency:
microservice_df = | pd.DataFrame.from_dict(microservice_log_dicts) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 20 09:34:57 2020
@author: <NAME>
"""
import pandas as pd
import argparse
import sys
import os
from comprehensive_tcga_survival import StageGradeParser
import biomarker_survival as surv
class Multivariate():
def __init__(self, tcga_cdr, stage_key, grade_key):
self.tcga_cdr = tcga_cdr
self.stage = StageGradeParser(stage_key, tcga_cdr)
self.grade = StageGradeParser(grade_key, tcga_cdr)
def prep_ctype_multivar(self, ctype):
gender_age = self.tcga_cdr.cancer_type_data(ctype,
extra_cols=['gender', 'age_at_initial_pathologic_diagnosis'])
gender_age = gender_age[['gender', 'age_at_initial_pathologic_diagnosis']]
gender_age = gender_age.replace({'FEMALE': 0, 'MALE': 1})
stage_cols = self.stage.parse_for_ctype(ctype)
grade_cols = self.grade.parse_for_ctype(ctype)
multivar_cols = gender_age
if not stage_cols.empty:
multivar_cols = multivar_cols.join(stage_cols, how='inner')
if not grade_cols.empty:
multivar_cols = multivar_cols.join(grade_cols, how='inner')
return multivar_cols
# returns a dictionary of age, gender, and appropriate stage/grade features,
# keyed by cancer type
def prep_all_multivar(self):
multivars = {}
for c in self.tcga_cdr.cancer_types():
multivars[c] = self.prep_ctype_multivar(c)
return multivars
#%%%
def get_options(argv):
parser = argparse.ArgumentParser(description='Get rppa file, clinical file, optional output dir')
parser.add_argument('-c', action='store', dest='tcga_cdr')
parser.add_argument('-s', action='store', dest='stage_key')
parser.add_argument('-g', action='store', dest='grade_key')
parser.add_argument('-o', action='store', dest='output_directory', default='.')
ns = parser.parse_args()
return ns.tcga_cdr, ns.stage_key, ns.grade_key, ns.output_directory
def multivar_main(argv=None):
clinical, stage_key, grade_key, outdir = get_options(argv)
tcga_cdr = surv.TCGA_CDR_util(clinical)
m = Multivariate(tcga_cdr, stage_key, grade_key)
cox_dicts = {}
for ctype in tcga_cdr.cancer_types():
print(ctype)
data_cols = m.prep_multivar(ctype)
time_censor = tcga_cdr.cancer_type_data(ctype)
ctype_patient_count = time_censor.shape[0]
df = time_censor.join(data_cols, how='inner').dropna(how='any')
cox_dict = surv.do_multivariate_cox(df['time'], df['censor'], df['gender'], df.drop(['time', 'censor', 'gender'], axis=1))
cox_dict['total patient count'] = ctype_patient_count
cox_dicts[ctype] = | pd.Series(cox_dict) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = | pd.DataFrame(data['temp']) | pandas.DataFrame |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import pysal
import cv2
import scipy
import scipy.spatial as spatial
class NeighborhoodMatrixComputation(object):
def compute_neighborhood_matrix(self,
neighborhood_matrix_type,
neighborhood_p0,
neighborhood_p1,
**kwargs):
''' Computes the neighborhood matrix from the label image as a pysal object.
Stores it in the dataframe neighborhood_matrix_df.
:neighborhood_matrix_type: str
should be 'k', 'radius', or 'network'
:neighborhood_min_p0: int or float
minimum bound for the neighborhood.
should be int for 'k' or 'network'. Can be int or float for 'radius'
:neighborhood_min_p1: int or float
maximum bound for the neighborhood.
should be int for 'k' or 'network'. Can be int or float for 'radius'
:iterations: int. Optional
Only used for 'network' neighborhood computation mode.
Number of iterations of dilation to select the object neighbors from the label image.
Default value: 1
:kd_tree_approx: boolean. Optional
Used for 'radius' and 'k'.
If set to True, then use a kd-tree to find kNN.
Else compute all pair distances.
:save_neighbors: bool
if True, will add a column to feature_table with the ids of neighbors
if False, do not keep the ids of neighbors
'''
if self._debug:
print("\ncompute_neighborhood_matrix", neighborhood_matrix_type,
neighborhood_p0,
neighborhood_p1,
kwargs)
neighborhood_p0, neighborhood_p1, iterations = self._check_neighborhood_matrix_parameters(neighborhood_matrix_type, neighborhood_p0, neighborhood_p1, **kwargs)
try:
w = self.get_neighborhood_matrix(neighborhood_matrix_type, neighborhood_p0, neighborhood_p1, **kwargs)
## already computed
return
except ValueError:
suffix = self.get_suffix(neighborhood_matrix_type, neighborhood_p0, neighborhood_p1, **kwargs)
if neighborhood_matrix_type == 'k':
neighbor_dict = self._compute_matrix_k(suffix,
neighborhood_p0,
neighborhood_p1,
**kwargs)
elif neighborhood_matrix_type == 'radius':
neighbor_dict = self._compute_matrix_radius(suffix,
neighborhood_p0,
neighborhood_p1,
**kwargs)
elif neighborhood_matrix_type == 'network':
neighbor_dict = self._compute_matrix_network(suffix,
neighborhood_p0,
neighborhood_p1,
**kwargs)
neighborhood_matrix = pysal.weights.weights.W(neighbor_dict)
nb_pairs = np.sum(neighborhood_matrix.full()[0])/2
self.neighborhood_matrix_df.loc[self.neighborhood_matrix_df.shape[0],:] = [neighborhood_matrix_type, neighborhood_p0, neighborhood_p1, iterations, \
neighborhood_matrix, \
nb_pairs]
def _compute_matrix_k(self,
suffix,
neighborhood_p0,
neighborhood_p1,
save_neighbors=True,
kd_tree_approx=False,
**kwargs):
if self.NN is None:
self._compute_NN(kd_tree_approx)
NN = self.NN[:,neighborhood_p0:neighborhood_p1+1]
NN = [[self.feature_table.iloc[l][self._column_objectnumber] for l in line] for line in NN]
self.feature_table.loc[:, 'DistanceLastNeighbor_{}'.format(suffix)] = self.NN_distances[:,neighborhood_p1]
# if save_neighbors:
# self.feature_table['neighbors_{}'.format(suffix)] = np.empty((self.n, 0)).tolist()
if neighborhood_p0 == 0:
neighbor_dict = {line[0]: line[1:] for line in NN if len(line) > 1}
NN_for_df = [line[1:] for line in NN]
if save_neighbors:
self.feature_table['neighbors_{}'.format(suffix)] = np.empty((self.n, 0)).tolist()
self.feature_table.loc[:, 'neighbors_{}'.format(suffix)] = pd.Series(NN_for_df)
else:
if save_neighbors:
self.feature_table.loc[:, 'neighbors_{}'.format(suffix)] = pd.Series(NN)
ObjNum = self.feature_table[self._column_objectnumber].values
neighbor_dict = {obj_num: neighbors for (obj_num, neighbors) in zip(ObjNum, NN) if len(neighbors)>0}
return neighbor_dict
def _compute_matrix_radius(self,
suffix,
neighborhood_p0,
neighborhood_p1,
save_neighbors=True,
kd_tree_approx=False,
**kwargs):
## keep neighborhood_p0 <= r <= neighborhood_p1
if self.NN is None:
self._compute_NN(kd_tree_approx)
mask = (self.NN_distances > neighborhood_p1)+(self.NN_distances < neighborhood_p0)
mask[:,0] = True
NN = np.array(self.NN)
NN[mask] = -1
NN = [[l for l in line if l != -1] for line in NN]
NN = [[self.feature_table.iloc[l][self._column_objectnumber] for l in line if l != -1] for line in NN]
NN_distances = [[l for l in line if (l < neighborhood_p1) and (l > neighborhood_p0)] for line in self.NN_distances]
self.feature_table.loc[:, 'NumberNeighbors_{}'.format(suffix)] = np.sum(~mask, axis=1)
if save_neighbors:
self.feature_table['neighbors_{}'.format(suffix)] = np.empty((self.n, 0)).tolist()
self.feature_table.loc[:, 'neighbors_{}'.format(suffix)] = pd.Series(NN)
ObjNum = self.feature_table[self._column_objectnumber].values
neighbor_dict = {obj_num: neighbors for (obj_num, neighbors) in zip(ObjNum, NN) if len(neighbors)>0}
# print(neighbor_dict)
return neighbor_dict
def _compute_matrix_network(self,
suffix,
neighborhood_p0,
neighborhood_p1,
save_neighbors=True,
iterations=1,
**kwargs):
def custom_pow(firstM, lastM, n):
if n > 1:
mylist = custom_pow(firstM, lastM, n-1)
mylist.append(np.dot(mylist[-1], firstM))
return mylist
else:
return [np.dot(firstM,lastM)]
def matrix_treatment(M):
np.fill_diagonal(M,0)
M[M>1] = 1
return M
## actual neighbors
## labels start at 1
if iterations not in self.adjacency_matrix:
self._get_neighbors_from_label_image(self.get_suffix('network', 0, 1, iterations=iterations),
iterations=iterations, save_neighbors=save_neighbors,
**kwargs)
## compute the needed power matrices
if len(self.adjacency_matrix[iterations]) < neighborhood_p1:
last_adj_mat = self.adjacency_matrix[iterations][-1]
first_adj_mat = self.adjacency_matrix[iterations][0]
new_power_matrices = custom_pow(first_adj_mat, \
last_adj_mat,
neighborhood_p1 - len(self.adjacency_matrix[iterations]))
new_power_matrices[:] = map(matrix_treatment, new_power_matrices)
self.adjacency_matrix[iterations].extend(new_power_matrices)
obj_nums = self.feature_table[self._column_objectnumber].values
### real network
if neighborhood_p1 == 1:
list_where = np.where(self.adjacency_matrix[iterations][0])
### extended network
else:
cumsum_mat = self.adjacency_matrix[iterations][:neighborhood_p0]
cumsum_mat = np.sum(cumsum_mat, axis=0)
cumsum_mat2 = self.adjacency_matrix[iterations][neighborhood_p0:neighborhood_p1]
cumsum_mat2 = np.sum(cumsum_mat2, axis=0)
w = cumsum_mat2 - cumsum_mat
w[w<0] = 0
list_where = np.where(w)
neighbor_dict = {}
# print(list_where)
for key, value in zip(list_where[0], list_where[1]):
neighbor_dict.setdefault(obj_nums[key], []).append(obj_nums[value])
if neighborhood_p1 != 1:
## neighborhood_p1 == 1 done by _get_neighbors_from_label_image
# index_for_series = self.feature_table.index[self.feature_table[self._column_objectnumber].isin(neighbor_dict.keys())]
# correct_keys = self.feature_table[self._column_objectnumber].isin(neighbor_dict.keys())
# print(len(neighbor_dict.keys()), np.sum(correct_keys), w.shape)
self.feature_table.loc[:, 'NumberNeighbors_{}'.format(suffix)] = np.sum(w, axis=0)
if save_neighbors:
self.feature_table.loc[:, 'neighbors_{}'.format(suffix)] = pd.Series([obj_nums[list_where[1][list_where[0] == index]] for index in range(self.n)])
return neighbor_dict
def _compute_NN(self, kd_tree_approx=False):
coordinates = self.feature_table.loc[:,self._column_x_y].values
if kd_tree_approx:
tree = spatial.KDTree(coordinates)
NN_distances, NN = tree.query(coordinates, self.n)
else:
## no approximation, compute all distances
distance_bw_points = spatial.distance.cdist(coordinates, coordinates)
NN = np.argsort(distance_bw_points, axis=1)
xs = np.tile(np.arange(self.n), self.n).reshape((self.n, self.n)).T
NN_distances = distance_bw_points[xs,NN]
self.NN = NN
self.NN_distances = NN_distances
def _get_neighbors_from_label_image(self,
suffix,
iterations=1,
save_neighbors=True,
**kwargs):
''' From the image_label, find all_neighbors
Assumes that the labels on the image are the same as in the feature table.
on the label image, 0 is bg
objectNumbers start at 1
'''
labels = np.unique(self.feature_table[self._column_objectnumber].values)
#np.sort(list(set(np.unique(self.image_label[self.image_label > 0].flatten())).intersection(np.unique(self.feature_table[self._column_objectnumber]))))
if self._debug:
print("_get_neighbors_from_label_image\n#labels = {}; #objects = {}, starting label id = {}, iterations={}".format(len(labels), self.n, min(labels), iterations))
# self.feature_table['neighbors_{}'.format(suffix)] = np.empty((self.n, 0)).tolist()
# self.feature_table['NumberNeighbors_{}'.format(suffix)] = 0
sum_neighbors = []
if save_neighbors:
list_neighbors = []
adj_mat = np.zeros((self.n, self.n))
if self._debug:
print("_get_neighbors_from_label_image", adj_mat.shape)
for index_l, l in enumerate(labels):
list_neighbor, sum_neighbor = self._get_label_neighbors(l, iterations)
sum_neighbors.append(sum_neighbor)
if save_neighbors:
list_neighbors.append(list_neighbor)
if len(list_neighbor) > 0:
adj_mat[index_l, np.isin(labels, list_neighbor)] = 1
self.adjacency_matrix[iterations] = [adj_mat]
if self._debug:
print("_get_neighbors_from_label_image", suffix)
self.feature_table.loc[self.feature_table[self._column_objectnumber].isin(labels), 'NumberNeighbors_{}'.format(suffix)] = sum_neighbors
if save_neighbors:
index_for_series = self.feature_table.index[self.feature_table[self._column_objectnumber].isin(labels)]
self.feature_table.loc[self.feature_table[self._column_objectnumber].isin(labels), 'neighbors_{}'.format(suffix)] = | pd.Series(list_neighbors, index=index_for_series) | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from datetime import datetime
from sys import stdout
from sklearn.preprocessing import scale
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel
from sklearn.utils.validation import indexable
from sklearn.model_selection import check_cv
from sklearn.metrics.scorer import check_scoring
from sklearn.model_selection._validation import _fit_and_score
from sklearn.externals.joblib import Parallel, delayed
def compute_features():
# Load json data
with open('json_file.json') as data_file:
patients = json.load(data_file)
print("JSON file loaded")
# Features computation
print("Features computation launched")
visits = []
for patient in patients.values():
for i in range(1, len(patient['visits']) + 1):
visits.append(patient['visits'][str(i)])
n_visits = len(visits)
print("n_visits = %s" % n_visits)
# Features DataFrame with encounter_nums index
encounter_nums = [int(visit.get('encounter_num')) for visit in visits]
X = pd.DataFrame(index=encounter_nums)
# Time vector & censoring indicator
print("Adding labels...", end="")
next_visit = [visit.get('next_visit') for visit in visits]
T = np.array([1e10 if str(t) == 'none' else t for t in next_visit]).astype(
int)
end_dates = pd.to_datetime([visit.get('end_date') for visit in visits])
C = pd.to_datetime('2016-01-15 00:00:00') - end_dates
days, seconds = C.days, C.seconds
C = days * 24 + seconds // 3600 # in hours (discrete)
delta = (T <= C).astype(int)
Y = T
Y[delta == 0] = C[delta == 0]
labels = pd.DataFrame({'Y': Y, 'delta': delta}, index=encounter_nums)
X = pd.concat([X, labels], axis=1)
print(" done")
# Basic features
print("Adding basic features...", end="")
# Add also patient_num & encounter_num for future random choice
patient_num, encounter_num = [], []
sex, baseline_HB, genotype_SS, age, transfu_count = [], [], [], [], []
LS_ALONE, LS_INACTIVE, MH_ACS, MH_AVN, MH_DIALISIS = [], [], [], [], []
MH_HEART_FAILURE, MH_ISCHEMIC_STROKE, MH_LEG_ULCER = [], [], []
MH_NEPHROPATHY, MH_PHTN, MH_PRIAPISM, MH_RETINOPATHY = [], [], [], []
OPIOID_TO_DISCHARGE, ORAL_OPIOID, USED_MORPHINE = [], [], []
USED_OXYCODONE, duration, previous_visit, rea = [], [], [], []
for patient in patients.values():
for _ in range(1, len(patient['visits']) + 1):
patient_num.append(patient['patient_num'])
sex.append(1 if int(patient['sex']) == 1 else 0)
baseline_HB.append(patient['baseline_HB'])
genotype_SS.append(patient['genotype_SS'])
for visit in visits:
encounter_num.append(visit.get('encounter_num'))
age.append(visit.get('age'))
rea.append(visit.get('rea'))
LS_ALONE.append(visit.get('LS_ALONE'))
LS_INACTIVE.append(visit.get('LS_INACTIVE'))
MH_ACS.append(visit.get('MH_ACS'))
MH_AVN.append(visit.get('MH_AVN'))
MH_DIALISIS.append(visit.get('MH_DIALISIS'))
MH_HEART_FAILURE.append(visit.get('MH_HEART_FAILURE'))
MH_ISCHEMIC_STROKE.append(visit.get('MH_ISCHEMIC_STROKE'))
MH_LEG_ULCER.append(visit.get('MH_LEG_ULCER'))
MH_NEPHROPATHY.append(visit.get('MH_NEPHROPATHY'))
MH_PHTN.append(visit.get('MH_PHTN'))
MH_PRIAPISM.append(visit.get('MH_PRIAPISM'))
MH_RETINOPATHY.append(visit.get('MH_RETINOPATHY'))
ORAL_OPIOID.append(visit.get('ORAL_OPIOID'))
USED_MORPHINE.append(visit.get('USED_MORPHINE'))
USED_OXYCODONE.append(visit.get('USED_OXYCODONE'))
duration.append(visit.get('duration'))
previous_visit.append(visit.get('previous_visit'))
transfu_count.append(visit.get('transfu_count'))
threshold = 24 * 30 * 18 # 18 months
previous_visit = [0 if (t == 'none' or t > threshold) else 1 for t in
previous_visit]
MH_ACS = [1 if int(x) == 2 else x for x in MH_ACS]
MH_AVN = [1 if int(x) == 2 else x for x in MH_AVN]
MH_DIALISIS = [1 if int(x) == 2 else x for x in MH_DIALISIS]
MH_HEART_FAILURE = [1 if int(x) == 2 else x for x in MH_HEART_FAILURE]
MH_ISCHEMIC_STROKE = [1 if int(x) == 2 else x for x in MH_ISCHEMIC_STROKE]
MH_LEG_ULCER = [1 if int(x) == 2 else x for x in MH_LEG_ULCER]
MH_NEPHROPATHY = [1 if int(x) == 2 else x for x in MH_NEPHROPATHY]
MH_PHTN = [1 if int(x) == 2 else x for x in MH_PHTN]
MH_PRIAPISM = [1 if int(x) == 2 else x for x in MH_PRIAPISM]
MH_RETINOPATHY = [1 if int(x) == 2 else x for x in MH_RETINOPATHY]
X_basic = pd.DataFrame(
{'patient_num': patient_num, 'encounter_num': encounter_num, 'sex': sex,
'genotype_SS': genotype_SS, 'age': age, 'rea': rea,
'LS_INACTIVE': LS_INACTIVE, 'MH_ACS': MH_ACS, 'MH_AVN': MH_AVN,
'MH_DIALISIS': MH_DIALISIS, 'MH_HEART_FAILURE': MH_HEART_FAILURE,
'MH_ISCHEMIC_STROKE': MH_ISCHEMIC_STROKE,
'MH_LEG_ULCER': MH_LEG_ULCER, 'LS_ALONE': LS_ALONE,
'MH_NEPHROPATHY': MH_NEPHROPATHY, 'MH_PHTN': MH_PHTN,
'MH_PRIAPISM': MH_PRIAPISM, 'MH_RETINOPATHY': MH_RETINOPATHY,
'ORAL_OPIOID': ORAL_OPIOID, 'baseline_HB': baseline_HB,
'USED_MORPHINE': USED_MORPHINE, 'USED_OXYCODONE': USED_OXYCODONE,
'duration': duration, 'previous_visit': previous_visit,
'transfu_count': transfu_count},
index=encounter_nums)
X = pd.concat([X, X_basic], axis=1)
print(" done")
# Bio data
print("Adding bio features...", end="")
bio_data, bio_names = pd.DataFrame(), []
for visit in visits:
encounter_num = int(visit.get('encounter_num'))
tmp = pd.DataFrame(index=[encounter_num])
end_date = pd.to_datetime(visit.get('end_date'))
for bio_name, bio_values in visit.get('bio').items():
# keep last value
bio_names.append(bio_name)
values = [val['nval_num'] for val in bio_values.values()]
tmp[bio_name] = values[-1]
# only keep last 48h values
offset = end_date - pd.DateOffset(hours=48)
values, index = [], []
for dic in bio_values.values():
val_time = pd.to_datetime(dic['date_bio'])
if val_time > offset:
values.append(float(dic['nval_num']))
index.append(float(
(val_time - offset) / pd.Timedelta(
'1 hour')))
# if at least 2 pts, add slope
if len(values) > 1:
x, y = index, values
# least-squares
A = np.vstack([np.array(x), np.ones(len(x))]).T
slope, _ = np.linalg.lstsq(A, y)[0]
else:
slope = np.nan
bio_names.append(bio_name + ' slope')
tmp[bio_name + ' slope'] = slope
bio_data = bio_data.append(tmp)
bio_names_count = pd.Series(
bio_names).value_counts() * 100 / n_visits
bio_percentage = 35
bio_param_kept = bio_names_count[bio_names_count > bio_percentage]
bio_data = bio_data[bio_param_kept.index]
print(" done")
X = pd.concat([X, bio_data], axis=1)
# Vital parameters data
print("\nAdding vital parameters features...")
param_no_gp = ['Poids [kg]', 'Taille [cm]',
'Débit O2 [L/min]']
param_gp = ['Fréquence cardiaque [bpm]',
'Fréquence respiratoire [mvt/min]', 'PA max [mmHg]',
'PA min [mmHg]', 'Température [°C]',
'Saturation en oxygène [%]']
plot_curves_for_visits = np.random.randint(1, n_visits + 1, 3)
print("\nPlot Gaussian Processes learned for a few random sampled visits")
vital_parameter_data = pd.DataFrame()
count = 1
for nb_visit, visit in enumerate(visits):
stdout.write(
"\rVisit %s / %s" % (count, n_visits))
stdout.flush()
end_date = pd.to_datetime(visit.get('end_date'))
encounter_num = int(visit.get('encounter_num'))
tmp = pd.DataFrame(index=[encounter_num])
for vital_name, vital_values in visit.get(
'vital_parameters').items():
if vital_name in param_gp:
# only keep last 48h values
offset = end_date - pd.DateOffset(hours=48)
values, index = [], []
for dic in vital_values.values():
val_time = pd.to_datetime(dic['start_date'])
if val_time > offset:
values.append(float(dic['nval_num']))
index.append(float(
(val_time - offset) / pd.Timedelta(
'1 hour')))
if len(values) > 0:
x, y = index, values
# least-squares
A = np.vstack([np.array(x), np.ones(len(x))]).T
slope, intercept = np.linalg.lstsq(A, y)[0]
vals = np.array([slope, np.mean(values)])
kernel = ConstantKernel(1.0, (1e-3, 1e3)) * RBF(2, (
1, 5)) + WhiteKernel()
gp = GaussianProcessRegressor(kernel=kernel,
n_restarts_optimizer=10)
gp.fit(np.atleast_2d(x).T, np.array(y) - np.mean(y))
vals = np.append(vals, gp.kernel_.theta)
if count in plot_curves_for_visits:
nb_points = 100
x_new = np.linspace(0, 48, nb_points)
y_ = gp.predict(np.atleast_2d(x_new).T)
y_ += np.mean(y)
plt.figure()
plt.plot(x, y, 'r.', markersize=10,
label=u'Observations')
plt.plot(x_new, y_, 'b-', label=u'Prediction')
plt.xlabel('last 48h')
plt.ylabel('values')
plt.title(vital_name + ", encounter_num = %s"
% encounter_num)
plt.legend()
plt.show()
else:
vals = np.array([np.nan] * 5)
columns = ["%s slope" % vital_name,
"%s mean" % vital_name,
"%s cst_kernel" % vital_name,
"%s length_scale_RBF" % vital_name,
"%s noise_level" % vital_name]
vals = pd.DataFrame(np.atleast_2d(vals), columns=columns,
index=[encounter_num])
tmp = pd.concat([tmp, vals], axis=1)
if vital_name in param_no_gp:
if vital_name in ['Poids [kg]', 'Taille [cm]']:
values = []
for dic in vital_values.values():
values.append(float(dic['nval_num']))
columns = ["%s mean" % vital_name]
vals = pd.DataFrame(np.atleast_2d(np.mean(values)),
columns=columns,
index=[encounter_num])
if vital_name == 'Débit O2 [L/min]':
values, index = [], []
for dic in vital_values.values():
val_time = pd.to_datetime(dic['start_date'])
values.append(float(dic['nval_num']))
index.append(val_time)
if len(values) > 0:
idx_pos = [idx if is_positive else -1 for
idx, is_positive
in enumerate(np.array(values) > 0)]
delay = (end_date - index[
np.max(idx_pos)]) / pd.Timedelta('1 hour')
else:
delay = (end_date - start_date) / pd.Timedelta('1 hour')
columns = ["Débit O2 [L/min] delay"]
if delay < 0:
delay = 0
vals = pd.DataFrame(np.atleast_2d(delay),
columns=columns,
index=[encounter_num])
tmp = pd.concat([tmp, vals], axis=1)
vital_parameter_data = vital_parameter_data.append(tmp)
count += 1
# add BMI
bmi = vital_parameter_data["Poids [kg] mean"] / vital_parameter_data[
"Taille [cm] mean"] ** 2
bmi *= 1e4
bmi = pd.DataFrame(bmi, columns=['BMI'])
vital_parameter_data = pd.concat([vital_parameter_data, bmi], axis=1)
X = pd.concat([X, vital_parameter_data], axis=1)
# Syringes data
print("\nAdding syringes features...")
syringes_data = | pd.DataFrame() | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype, is_numeric_dtype, is_string_dtype
from formulae.transforms import TRANSFORMS, Proportion, Offset
from formulae.terms.call_utils import CallVarsExtractor
class Call:
"""Representation of a call in a model Term.
This class and ``Variable`` are the atomic components of a model term.
This object supports stateful transformations defined in ``formulae.transforms``.
A transformation of this type defines its parameters the first time it is called,
and then can be used to recompute the transformation with memorized parameter values.
This behavior is useful when implementing a predict method and using transformations such
as ``center(x)`` or ``scale(x)``. ``center(x)`` memorizes the value of the mean, and
``scale(x)`` memorizes both the mean and the standard deviation.
Parameters
----------
call: formulae.terms.call_resolver.LazyCall
The call expression returned by the parser.
is_response: bool
Indicates whether this call represents a response. Defaults to ``False``.
"""
def __init__(self, call, is_response=False):
self.data = None
self.env = None
self._intermediate_data = None
self._type = None
self.is_response = is_response
self.call = call
self.name = str(self.call)
def __hash__(self):
return hash(self.call)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.call == other.call
def __repr__(self):
return self.__str__()
def __str__(self):
return f"{self.__class__.__name__}({self.name})"
def accept(self, visitor):
"""Accept method called by a visitor.
Visitors are those available in call_utils.py, and are used to work with call terms.
"""
return visitor.visitCallTerm(self)
@property
def var_names(self):
"""Returns the names of the variables involved in the call, not including the callee.
This is used to determine which variables of the data set being used are actually used in
the model. This allows us to subset the original data set and only raise errors regarding
missing values when the missingness happens in variables used in the model.
Uses a visitor of class ``CallVarsExtractor`` that walks through the components of the call
and returns a list with the name of the variables in the call.
Returns
----------
result: list
A list of strings with the names of the names of the variables in the call, not
including the name of the callee.
"""
return set(CallVarsExtractor(self).get())
def set_type(self, data_mask, env):
"""Evaluates function and determines the type of the result of the call.
Evaluates the function call and sets the ``._type`` property to ``"numeric"`` or
``"categoric"`` depending on the type of the result. It also stores the intermediate result
of the evaluation in ``._intermediate_data`` to prevent us from computing the same thing
more than once.
Parameters
----------
data_mask: pd.DataFrame
The data frame where variables are taken from
env: Environment
The environment where values and functions are taken from.
"""
self.env = env.with_outer_namespace(TRANSFORMS)
x = self.call.eval(data_mask, self.env)
if is_numeric_dtype(x):
self._type = "numeric"
elif is_string_dtype(x) or is_categorical_dtype(x):
self._type = "categoric"
elif isinstance(x, Proportion):
self._type = "proportion"
elif isinstance(x, Offset):
self._type = "offset"
x.set_size(len(data_mask.index))
else:
raise ValueError(f"Call result is of an unrecognized type ({type(x)}).")
self._intermediate_data = x
def set_data(self, encoding=False):
"""Finishes the evaluation of the call according to its type.
Evaluates the call according to its type and stores the result in ``.data``. It does not
support multi-level categoric responses yet. If ``self.is_response`` is ``True`` and the
variable is of a categoric type, this method returns a 1d array of 0-1 instead of a matrix.
In practice, it just completes the evaluation that started with ``self.set_type()``.
Parameters
----------
encoding: bool
Indicates if it uses full or reduced encoding when the type of the call is
categoric. Omitted when the result of the call is numeric.
"""
try:
if self._type is None:
raise ValueError("Call result type is not set.")
if self._type not in ["numeric", "categoric", "proportion", "offset"]:
raise ValueError(f"Call result is of an unrecognized type ({self._type}).")
if self._type == "numeric":
self.data = self._eval_numeric(self._intermediate_data)
elif self._type == "categoric":
self.data = self._eval_categoric(self._intermediate_data, encoding)
elif self._type == "proportion":
self.data = self._eval_proportion(self._intermediate_data)
elif self._type == "offset":
self.data = self._eval_offset(self._intermediate_data)
except:
print("Unexpected error while trying to evaluate a Call:", sys.exc_info()[0])
raise
def _eval_numeric(self, x):
"""Finishes evaluation of a numeric call.
Converts the intermediate values of the call into a numpy array of shape ``(n, 1)``,
where ``n`` is the number of observations. This method is used both in ``self.set_data``
and in ``self.eval_new_data``.
Parameters
----------
x: np.ndarray or pd.Series
The intermediate values resulting from the call.
Returns
----------
result: dict
A dictionary with keys ``"value"`` and ``"type"``. The first contains the result of the
evaluation, and the latter is equal to ``"numeric"``.
"""
if isinstance(x, np.ndarray):
if x.ndim == 1:
value = x[:, np.newaxis]
else:
value = x
elif isinstance(x, pd.Series):
value = x.to_numpy()[:, np.newaxis]
else:
raise ValueError(f"Call result is of an unrecognized type ({type(x)}).")
return {"value": value, "type": "numeric"}
def _eval_categoric(self, x, encoding):
"""Finishes evaluation of categoric call.
First, it checks whether the intermediate evaluation returned is ordered. If not, it
creates a category where the levels are the observed in the variable. They are sorted
according to ``sorted()`` rules.
Then, it determines the reference level as well as all the other levels. If the variable
is a response, the value returned is a dummy with 1s for the reference level and 0s
elsewhere. If it is not a response variable, it determines the matrix of dummies according
to the levels and the encoding passed.
Parameters
----------
x: np.ndarray or pd.Series
The intermediate values of the variable.
encoding: bool
Indicates if it uses full or reduced encoding.
Returns
----------
result: dict
A dictionary with keys ``"value"``, ``"type"``, ``"levels"``, ``"reference"``, and
``"encoding"``. They represent the result of the evaluation, the type, which is
``"categoric"``, the levels observed in the variable, the level used as reference when
using reduced encoding, and whether the encoding is ``"full"`` or ``"reduced"``.
"""
if not hasattr(x.dtype, "ordered") or not x.dtype.ordered:
categories = sorted(x.unique().tolist())
cat_type = pd.api.types.CategoricalDtype(categories=categories, ordered=True)
x = x.astype(cat_type)
reference = x.min()
levels = x.cat.categories.tolist()
if self.is_response:
value = np.atleast_2d(np.where(x == reference, 1, 0)).T
encoding = None
else:
if isinstance(encoding, list):
encoding = encoding[0]
if isinstance(encoding, dict):
encoding = encoding[self.name]
if encoding:
value = pd.get_dummies(x).to_numpy()
encoding = "full"
else:
value = pd.get_dummies(x, drop_first=True).to_numpy()
encoding = "reduced"
return {
"value": value,
"type": "categoric",
"levels": levels,
"reference": reference,
"encoding": encoding,
}
def _eval_proportion(self, proportion):
if not self.is_response:
raise ValueError("'prop()' can only be used in the context of a response term.")
return {"value": proportion.eval(), "type": "proportion"}
def _eval_offset(self, offset):
if self.is_response:
raise ValueError("'offset() cannot be used in the context of a response term.")
return {"value": offset.eval(), "type": "offset"}
def eval_new_data(self, data_mask): # pylint: disable = inconsistent-return-statements
"""Evaluates the function call with new data.
This method evaluates the function call within a new data mask. If the transformation
applied is a stateful transformation, it uses the proper object that remembers all
parameters or settings that may have been set in a first pass.
Parameters
----------
data_mask: pd.DataFrame
The data frame where variables are taken from
Returns
----------
result: np.array
The rules for the shape of this array are the rules for ``self._eval_numeric()`` and
``self._eval_categoric()``. The first applies for numeric calls, the second for
categoric ones.
"""
if self._type in ["numeric", "categoric"]:
x = self.call.eval(data_mask, self.env)
if self._type == "numeric":
return self._eval_numeric(x)["value"]
else:
return self._eval_new_data_categoric(x)
elif self._type == "proportion":
if self._intermediate_data.trials_type == "constant":
# Return value passed in the second component
return np.ones((len(data_mask.index), 1)) * self.call.args[1].value
else:
# Extract name of the second component
name = self.call.args[1].name
values = data_mask[name]
if isinstance(values, pd.Series):
values = values.values[:, np.newaxis]
return values
elif self._type == "offset":
if self._intermediate_data.type == "constant":
# Return value passed as the argument
return np.ones((len(data_mask.index), 1)) * self.call.args[0].value
else:
# Extract name of the argument
name = self.call.args[0].name
values = data_mask[name]
if isinstance(values, pd.Series):
values = values.values[:, np.newaxis]
return values
def _eval_new_data_categoric(self, x):
"""Evaluates the call with new data when the result of the call is categoric.
This method also checks the levels observed in the new data frame are included within the
set of the levels of the result of the original call If not, an error is raised.
x: np.ndarray or pd.Series
The intermediate values of the variable.
Returns
----------
result: np.array
Numeric numpy array ``(n, p)``, where ``n`` is the number of observations and ``p`` the
number of dummy variables used in the numeric representation of the categorical
variable.
"""
# Raise error if passing a level that was not observed.
new_data_levels = pd.Categorical(x).dtype.categories.tolist()
if set(new_data_levels).issubset(set(self.data["levels"])):
series = | pd.Categorical(x, categories=self.data["levels"]) | pandas.Categorical |
import pandas as pd
import numpy as np
from pandas import DataFrame, get_dummies
import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.constraints import max_norm
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
f = pd.read_csv('presidents-data-words-january-3-2018.csv')
df = DataFrame(f)
df = df.dropna(subset=['dagalb','nseg','nsyll','nstress','mean'])
early_stop = EarlyStopping(patience=5)
X_cols = ['widx','lexstress','nseg','nsyll','nstress','pos','dep','doc.freq','d.inform.3','corpus.freq','c.inform.3','category']
X = df[X_cols]
y = np.array(to_categorical(df.dagalb))
cat_cols = ['lexstress','pos','dep','category']
scale_cols = ['widx','nseg','nsyll','nstress','doc.freq','d.inform.3','corpus.freq','c.inform.3']
for c in cat_cols:
dum = | pd.get_dummies(X[c], columns=[c], prefix=c) | pandas.get_dummies |
from typing import Any, Dict, List, Optional
from copy import copy
import os
from lunchbox.enforce import Enforce, EnforceError
from pandas import DataFrame, DatetimeIndex
from schematics.exceptions import DataError
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import flask
import lunchbox.tools as lbt
import rolling_pin.blob_etl as rpb
import shekels.core.config as cfg
import shekels.core.data_tools as sdt
# ------------------------------------------------------------------------------
# TODO: refactor components tests to use selnium and be less brittle
# TODO: add JSON editor component for config
# APP---------------------------------------------------------------------------
def get_dash_app(server, storage_type='memory'):
# type: (flask.Flask, str) -> dash.Dash
'''
Generate Dash Flask app instance.
Args:
server (Flask): Flask instance.
storage_type (str): Storage type (used for testing). Default: memory.
Returns:
Dash: Dash app instance.
'''
store = dcc.Store(id='store', storage_type=storage_type)
icon = html.Img(id='icon', src='/assets/icon.svg')
tabs = dcc.Tabs(
id='tabs',
className='tabs',
value='plots',
children=[
dcc.Tab(className='tab', label='plots', value='plots'),
dcc.Tab(className='tab', label='data', value='data'),
dcc.Tab(className='tab', label='config', value='config'),
dcc.Tab(className='tab', label='api', value='api'),
dcc.Tab(className='tab', label='docs', value='docs'),
dcc.Tab(className='tab', label='monitor', value='monitor'),
],
)
tabs = html.Div(id='tabs-container', children=[icon, tabs])
content = dcc.Loading(
id="content",
className='content',
type="dot",
fullscreen=True,
)
# path to resources inside pip package
assets = lbt.relative_path(__file__, "../resources")
# path to resources inside repo
if 'REPO_ENV' in os.environ.keys():
assets = lbt.relative_path(__file__, "../../../resources")
app = dash.Dash(
name='Shekels',
title='Shekels',
server=server,
external_stylesheets=['/static/style.css'],
assets_folder=assets,
)
app.layout = html.Div(id='layout', children=[store, tabs, content])
app.config['suppress_callback_exceptions'] = True
return app
# TABS--------------------------------------------------------------------------
def get_data_tab(query=None):
# type: (Optional[str]) -> List
'''
Get tab element for Shekels data.
Args:
query (str, optional): Query string. Default: None.
Return:
list: List of elements for data tab.
'''
# dummies must go first for element props behavior to work
content = html.Div(id='lower-content', children=[
html.Div(id='data-content', className='col', children=[])
])
return [*get_dummy_elements(), get_searchbar(query), content]
def get_plots_tab(query=None):
# type: (Optional[str]) -> List
'''
Get tab element for Shekels plots.
Args:
query (str, optional): Query string. Default: None.
Return:
list: List of elements for plots tab.
'''
# dummies must go first for element props behavior to work
content = html.Div(id='lower-content', children=[
html.Div(id='plots-content', className='col', children=[
dcc.Loading(id="progress-bar", type="circle")
])
])
return [*get_dummy_elements(), get_searchbar(query), content]
def get_config_tab(config):
# type: (Dict) -> List
'''
Get tab element for Shekels config.
Args:
config (dict): Configuration to be displayed.
Return:
list: List of elements for config tab.
'''
# dummies must go first for element props behavior to work
content = html.Div(id='lower-content', children=[
html.Div(id='config-content', className='col', children=[
get_key_value_table(
config, id_='config', header='config', editable=True
)
])
])
return [*get_dummy_elements(), get_configbar(config), content]
# MENUBARS----------------------------------------------------------------------
def get_searchbar(query=None):
# type: (Optional[str]) -> html.Div
'''
Get a row of elements used for querying Shekels data.
Args:
query (str, optional): Query string. Default: None.
Returns:
Div: Div with query field and buttons.
'''
if query is None:
query = 'select * from data'
spacer = html.Div(className='col spacer')
query = dcc.Input(
id='query',
className='col query',
value=query,
placeholder='SQL query that uses "FROM data"',
type='text',
autoFocus=True,
debounce=True
)
search = get_button('search')
init = get_button('init')
update = get_button('update')
row = html.Div(
className='row',
children=[query, spacer, search, spacer, init, spacer, update],
)
searchbar = html.Div(id='searchbar', className='menubar', children=[row])
return searchbar
def get_dummy_elements():
# type: () -> List
'''
Returns a list of all elements with callbacks so that the client will not
throw errors in each tab.
Returns:
list: List of html elements.
'''
return [
dcc.Input(className='dummy', id='config-query', value=None),
html.Div(className='dummy', children=[dash_table.DataTable(id='config-table')]),
dcc.Input(className='dummy', id='query', value=None),
html.Div(className='dummy', id='config-search-button', n_clicks=None),
html.Div(className='dummy', id='search-button', n_clicks=None),
html.Div(className='dummy', id='init-button', n_clicks=None),
html.Div(className='dummy', id='update-button', n_clicks=None),
dcc.Upload(className='dummy', id='upload', contents=None),
html.Div(className='dummy', id='save-button', n_clicks=None),
]
def get_configbar(config, query='select * from config'):
# type: (Dict, Optional[str]) -> html.Div
'''
Get a row of elements used for configuring Shekels.
Args:
config (dict): Configuration to be displayed.
query (str, optional): Query string. Default: None.
Returns:
Div: Div with buttons and JSON editor.
'''
spacer = html.Div(className='col spacer')
query = dcc.Input(
id='config-query',
className='col query',
value=query,
placeholder='SQL query that uses "FROM config"',
type='text',
autoFocus=True,
debounce=True
)
search = get_button('search')
search.id = 'config-search-button'
init = get_button('init')
upload = dcc.Upload(
id='upload',
children=[get_button('upload')]
)
save = get_button('save')
row = html.Div(
className='row',
children=[
query, spacer, search, spacer, init, spacer, upload, spacer, save
],
)
configbar = html.Div(id='configbar', className='menubar', children=[row])
return configbar
# ELEMENTS----------------------------------------------------------------------
def get_button(title):
# type: (str) -> html.Button
'''
Get a html button with a given title.
Args:
title (str): Title of button.
Raises:
TypeError: If title is not a string.
Returns:
Button: Button element.
'''
if not isinstance(title, str):
msg = f'{title} is not a string.'
raise TypeError(msg)
return html.Button(id=f'{title}-button', children=[title], n_clicks=0)
def get_key_value_table(
data, id_='key-value', header='', editable=False, key_order=None
):
# type (dict, Optional(str), str, bool, Optional(List[str])) -> DataTable
'''
Gets a Dash DataTable element representing given dictionary.
Args:
data (dict): Dictionary.
id_ (str, optional): CSS id. Default: 'key-value'.
header (str, optional): Table header title. Default: ''.
editable (bool, optional): Whether table is editable. Default: False.
key_order (list[str], optional): Order in which keys will be displayed.
Default: None.
Returns:
DataTable: Tablular representation of given dictionary.
'''
data = rpb.BlobETL(data).to_flat_dict()
# determine keys
keys = sorted(list(data.keys()))
if key_order is not None:
diff = set(key_order).difference(keys)
if len(diff) > 0:
diff = list(sorted(diff))
msg = f'Invalid key order. Keys not found in data: {diff}.'
raise KeyError(msg)
keys = set(keys).difference(key_order)
keys = sorted(list(keys))
keys = key_order + keys
# transform data
data = [dict(key=k, value=data[k]) for k in keys]
cols = [] # type: Any
if len(data) > 0:
cols = data[0].keys()
cols = [{'name': x, 'id': x} for x in cols]
table = dash_table.DataTable(
data=data,
data_previous=data,
columns=cols,
id=f'{id_}-table',
sort_action='native',
sort_mode='multi',
page_action='none',
cell_selectable=True,
editable=editable,
)
head = html.Div(className='key-value-table-header', children=header)
return html.Div(
id=id_, className='key-value-table-container', children=[head, table]
)
def get_datatable(data, color_scheme=cfg.COLOR_SCHEME, editable=False):
# type: (List[Dict], Dict[str, str], bool) -> dash_table.DataTable
'''
Gets a Dash DataTable element using given data.
Assumes dict element has all columns of table as keys.
Args:
data (list[dict]): List of dicts.
color_scheme (dict, optional): Color scheme dictionary.
Default: COLOR_SCHEME.
editable (bool, optional): Whether table is editable. Default: False.
Returns:
DataTable: Table of data.
'''
cs = copy(cfg.COLOR_SCHEME)
cs.update(color_scheme)
cols = [] # type: Any
if len(data) > 0:
cols = data[0].keys()
cols = [{'name': x, 'id': x} for x in cols]
return dash_table.DataTable(
data=data,
columns=cols,
id='datatable',
fixed_rows=dict(headers=True),
sort_action='native',
sort_mode='multi',
cell_selectable=editable,
editable=editable,
)
def get_plots(data, plots):
# type: (List[dict], List[dict]) -> List[dcc.Graph]
'''
Gets a Dash plots using given dicts.
Assumes dict element has all columns of table as keys.
Args:
data (list[dict]): List of dicts defining data.
plots (list[dict]): List of dicts defining plots.
Raises:
EnforceError: If data is not a list of dicts.
EnforceError: If plots is not a list of dicts.
Returns:
list[dcc.Graph]: Plots.
'''
msg = 'Data must be a list of dictionaries. Given value: {a}.'
Enforce(data, 'instance of', list, message=msg)
for item in data:
Enforce(item, 'instance of', dict, message=msg)
msg = 'Plots must be a list of dictionaries. Given value: {a}.'
Enforce(plots, 'instance of', list, message=msg)
for item in plots:
Enforce(item, 'instance of', dict, message=msg)
# --------------------------------------------------------------------------
data_ = | DataFrame(data) | pandas.DataFrame |
import os
import os.path
import random
from operator import add
from datetime import datetime, date, timedelta
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import shutil
import ema_workbench
import time
## Step 2: Function for initiating the main dictionary of climate stations
def create_dic(a):
'''Function: creating a dictionary for each climate station'''
a = {}
keys = ['fM', 'iPot', 'rSnow', 'dSnow', 'cPrec', 'dP', 'elev', 'lat', 'long', 'fileName']
a = {key: None for key in keys}
return a
def initialize_input_dict (mainFolderSki):
''' This function returns a dictionary , and addresses of 4 folders'''
'''Step 1'''
rootFolder = mainFolderSki
inputFolder = os.path.join(rootFolder,'input')
ablationFolder = os.path.join(inputFolder, 'Ablation')
accumulationFolder = os.path.join(inputFolder, 'Accumulation')
climate_ref_Folder = os.path.join(inputFolder, 'Climate_ref')
climate_Ref_Folder_org = os.path.join(inputFolder, 'Climate_ref_no_randomness_0')
climate_ref_Folder_rand_1 = os.path.join(inputFolder, 'Climate_ref_randomness_1')
climate_ref_Folder_rand_2 = os.path.join(inputFolder, 'Climate_ref_randomness_2')
'''Step 2: Reading all files names inside the Ablation, Accumulation, and Climate folders'''
ablationFiles = []
for filename in os.walk(ablationFolder):
ablationFiles = filename[2]
accumulationFiles = list()
for filename in os.walk(accumulationFolder):
accumulationFiles = filename[2]
climate_ref_Files = list()
for filename in os.walk(climate_ref_Folder):
climate_ref_Files = filename[2]
'''Step 3: Reading files inside ablation folder '''
os.chdir(ablationFolder)
with open(ablationFiles[0], 'r') as file:
FM1 = file.read()
with open(ablationFiles[1], 'r') as file:
Ipot1 = file.read()
with open(ablationFiles[2], 'r') as file:
Rsnow1 = file.read()
'''Step 4: Reading the lines of files inside ablation folder'''
FM1 = FM1.replace('\n', '\t')
FM1 = FM1.split('\t')
Ipot1 = Ipot1.replace('\n', '\t').split('\t')
Rsnow1 = Rsnow1.replace('\n', '\t').split('\t')
'''Step 5: Reading the lines of files inside accumulation folder'''
os.chdir(accumulationFolder)
with open(accumulationFiles[0], 'r') as file:
cPrec = file.read()
with open(accumulationFiles[1], 'r') as file:
dSnow1 = file.read()
cPrec = cPrec.replace('\n', '\t')
cPrec = cPrec.split('\t')
dSnow1 = dSnow1.replace('\n', '\t').split('\t')
'''Step 6: Reading the lines of files inside climate folder'''
os.chdir(climate_ref_Folder)
with open('pcp.txt', 'r') as file:
pcpData = file.read()
with open('tmp.txt', 'r') as file:
tmpData = file.read()
pcpData = pcpData.split('\n')
for i in range(len(pcpData)):
pcpData[i] = pcpData[i].split(',')
'''Step 7: Initialazing the input dictionary of climate stations which holds the information of accumulation
and ablation, and etc of the stations'''
nameStn = []
for file in climate_ref_Files:
if 'p.csv' in file:
#nameStn.append('n_' + file[-25: -5])
nameStn.append(file[-25: -5])
stnDicts = []
for i in range(len(nameStn)):
stnDicts.append(create_dic(nameStn[i]))
'''Step 8: Assigning the file names to the dictionary'''
for i in range (len(nameStn)):
stnDicts[i]['fileName'] = nameStn[i]
'''Step 9: Assigning the accumulation and ablation values'''
for stnDict in stnDicts:
for i, element in enumerate(FM1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['fM'] = FM1[i+1]
for i, element in enumerate(Ipot1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['iPot'] = Ipot1[i+1]
for i, element in enumerate(Rsnow1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['rSnow'] = Rsnow1[i+1]
for i, element in enumerate(dSnow1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['dSnow'] = dSnow1[i+1]
for i, element in enumerate(cPrec):
stnDict['cPrec'] = cPrec[1]
stnDict['dP'] = cPrec[3]
'''Step 10: Assigning the elevation, Lat and long to the dictionaries'''
for i in range(len(stnDicts)):
for j in range(1, len(pcpData)):
#if pcpData[j][1][2:-1] == stnDicts[i]['fileName'][2:]:
if pcpData[j][1][:-1] == stnDicts[i]['fileName'][:]:
stnDicts[i]['lat']= pcpData[j][2]
stnDicts[i]['long']= pcpData[j][3]
stnDicts[i]['elev']= pcpData[j][4]
return stnDicts, inputFolder, ablationFolder, accumulationFolder, climate_ref_Folder, climate_Ref_Folder_org, \
climate_ref_Folder_rand_1, climate_ref_Folder_rand_2
# Step 3 Snow Model
## S3.1 Initializiing the main dictionary for a case study
caseStudyStns = {}
inputFolder = ''
ablationFolder = ''
accumulationFolder = ''
climateFolder = ''
climateFolder_org = ''
climateFolder1 = ''
climateFolder2 = ''
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case1_sattel-hochstuckli'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case2_Atzmaening'
root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case3_hoch-ybrig\setup1'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b1339'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b1822'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b2000'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b2500'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case5_champex'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b1564'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2141'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2584'
## calling the function with multiple return values
caseStudyStns, inputFolder, ablationFolder, accumulationFolder, climateFolder, climateFolder_org, \
climateFolder1, climateFolder2 = initialize_input_dict(root)
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
## 1st column as index: makaing date from 01 01 1981 to 2099 12 31
from datetime import timedelta, date
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date ).days + 1)):
yield start_date + timedelta(n)
### OR Let's make this function in a more OOP way:
class Policy_Ski:
def __init__(self, x1SnowThershold):
self.x1SnowThershold = x1SnowThershold
def policy_release2(self):
return(self.x1SnowThershold)
def policy_release3(self):
''' this function should make a matrix of evaluation fot the condition of 100 day ay minimum condition'''
pass
class Economic_Model_Ski:
def __init__(self, xCostDay, xRevenueDay):
self.costDayFixed = xCostDay
self.revenueDayFixed = xRevenueDay
def economic_costDay(self):
return(self.costDayFixed)
def economic_revenueDay(self):
return(self.revenueDayFixed)
class RCP_Model:
def __init__(self, xRCP, xClimateModel):
self.input1 = round(xRCP)
#self.input1 = xRCP
self.input2 = xClimateModel
def rcpGenerator(self):
if self.input1 == 1:
RCP = str(2.6)
rcpInt = 1
if self.input1 == 2:
RCP = str(4.5)
rcpInt = 2
if self.input1 == 3:
RCP = str(8.5)
rcpInt = 3
return(RCP, rcpInt)
def climateModel(self):
a, b = RCP_Model.rcpGenerator(self)
if b == 1:
climateModel = round(self.input2*11)
elif b == 2:
climateModel = 11 + max(1,round(self.input2*25))
else:
climateModel = 36 + max(1, round(self.input2*31))
return (int(climateModel))
def tipping_points_freq(df, xGoodDays):
"""
This function, calculates the frequency of tipping points for each individual resort
"""
dfColumns= df.columns
scenarios_length= len(dfColumns)
simulations_Length = len(df[dfColumns[1]])
tipping_freq = np.zeros(scenarios_length)
for i in range (1, scenarios_length, 1):
m = 0
for j in range (1 , simulations_Length, 1):
if float(df[dfColumns[i]].iloc[j]) < xGoodDays:
m += 1
if m == 3:
tipping_freq[i] += 1
m = 0
else:
m = 0
continue
#break
return tipping_freq
# XLR Framework
def snow_Model (xRCP=None, xClimateModel=None, Xfactor1 = None, X2fM = None, X3iPot = None, X4rSnow = None,
X5temp = None, X6tempArt = None, xCostDay = None, xRevenueDay = None, x1SnowThershold = None,
xGoodDays = None):
'''' This function controls the Ski resort model in an XLR framework'''
''' VERY IMPORTANT --- Controling the randomness --- VERY IMPORTANT'''
xClimateRandomness = round(Xfactor1)
if (xClimateRandomness == 1):
os.chdir(climateFolder_org)
src = os.getcwd()
os.chdir(climateFolder)
dst = os.getcwd()
#copytree(src, dst)
print('Original CH2018 is being used')
elif (xClimateRandomness == 2) :
os.chdir(climateFolder1)
src = os.getcwd()
os.chdir(climateFolder)
dst = os.getcwd()
#copytree(src, dst)
print('Random Climate realization version 1 is being used')
else:
os.chdir(climateFolder2)
src = os.getcwd()
os.chdir(climateFolder)
dst = os.getcwd()
#copytree(src, dst)
print('Random Climate realization version 2 is being used')
os.chdir(climateFolder)
fnames = os.listdir()
#randomness_pcp_tmp(fnames, Xfactor1)
print('Snow_Model: Matching the station names values with CSV files!')
'''Matching the station names values in the dictionary of stations with CSV files in Climate folder of the case Study'''
pcpCaseStudy = []
tmpCaseStudy = []
if (xClimateRandomness == 1):
for i in range(len(caseStudyStns)):
pcpCaseStudy.append(os.path.join(climateFolder, caseStudyStns[i]['fileName'] + 'p.csv'))
tmpCaseStudy.append(os.path.join(climateFolder, caseStudyStns[i]['fileName'] + 't.csv'))
elif (xClimateRandomness == 2) :
for i in range(len(caseStudyStns)):
pcpCaseStudy.append(os.path.join(climateFolder1, caseStudyStns[i]['fileName'] + 'p.csv'))
tmpCaseStudy.append(os.path.join(climateFolder1, caseStudyStns[i]['fileName'] + 't.csv'))
else:
for i in range(len(caseStudyStns)):
pcpCaseStudy.append(os.path.join(climateFolder2, caseStudyStns[i]['fileName'] + 'p.csv'))
tmpCaseStudy.append(os.path.join(climateFolder2, caseStudyStns[i]['fileName'] + 't.csv'))
print('Snow_Model: Building a database for each csv file (tmp and pcp)!')
'''Step 6: building a database for each precipitation and temperature file in Climate folder and saving them in a list'''
'''6.1 reading the csv files as databases'''
dfpcp = [None for _ in range(len(pcpCaseStudy))]
dftmp = [None for _ in range(len(tmpCaseStudy))]
for i in range(len(pcpCaseStudy)):
dfpcp[i] = pd.read_csv(pcpCaseStudy[i])
dftmp[i] = | pd.read_csv(tmpCaseStudy[i]) | pandas.read_csv |
import logging
import os
import ast
import pandas as pd
from pandas.io.json import json_normalize
import sys as sys
import json
import numpy as np
def main(argv=None):
"""
Utilize Pandas library to read in both UNSD M49 country and area .csv file
(tab delimited) as well as the UNESCO heritage site .csv file (tab delimited).
Extract regions, sub-regions, intermediate regions, country and areas, and
other column data. Filter out duplicate values and NaN values and sort the
series in alphabetical order. Write out each series to a .csv file for inspection.
"""
if argv is None:
argv = sys.argv
msg = [
'Source file read {0}',
'UNSD M49 regions written to file {0}',
'UNSD M49 sub-regions written to file {0}',
'UNSD M49 intermediate regions written to file {0}',
'UNSD M49 countries and areas written to file {0}',
'UNSD M49 development status written to file {0}',
'UNESCO heritage site countries/areas written to file {0}',
'UNESCO heritage site categories written to file {0}',
'UNESCO heritage site regions written to file {0}',
'UNESCO heritage site transboundary values written to file {0}'
]
# Creating small sample of data:
business_json = './input/json/yelp_academic_dataset_business.json'
business_df = pd.read_json(business_json, lines=True, encoding='utf8')
## Creating full clean business csv
attributes_df = business_df[['business_id','attributes']]
attire = []
noise = []
for val in attributes_df['attributes']:
try:
attire.append(val['RestaurantsAttire'])
except:
attire.append("")
try:
noise.append(val['NoiseLevel'])
except:
noise.append("")
attributes_df['Attire'] = pd.Series(attire)
attributes_df['Noise'] = pd.Series(noise)
attributes_df = attributes_df.drop('attributes', axis=1)
businesses = pd.read_csv('./input/csv/yelp_business.csv')
full_data = pd.merge(businesses, attributes_df, how='inner', on='business_id')
for col in full_data.columns:
full_data[col] = full_data[col].fillna('Null').astype(str)
full_data[col] = full_data[col].apply(lambda x: x.strip('""""'))
full_data_small = full_data.sample(2000)
full_data_small.to_csv('SMALL_yelp_full_businesses.csv', quotechar='"', index=False)
user_df = pd.read_csv('input/csv/yelp_user.csv')
user_df_small = user_df.sample(2000)
user_df_small.to_csv('SMALL_yelp_user.csv', quotechar='"', index=False)
review_df = pd.read_csv('input/csv/yelp_review.csv', converters={'text':lambda x:x.replace('/n/n','')})
review_df = review_df.replace('\n','', regex=True)
review_with_user_and_business = review_df[(review_df['user_id'].isin(user_df_small['user_id']) == True) & (review_df['business_id'].isin(full_data_small['business_id']) == True)]
review_with_user_and_business.to_csv('SMALL_yelp_review.csv', quotechar='"', index=False)
# Setting logging format and default level
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
# Read in United Nations Statistical Division (UNSD) M49 Standard data set (tabbed separator)
# # logging.info(msg[0].format(os.path.abspath(business_df)))
# # # Write categories to a .csv file.
# categories = []
# for x in business_df['categories']:
# if x != None:
# cat = x.split(',')
# for each in cat:
# if each.strip() not in categories:
# categories.append(each.strip())
# # business_cat = extract_filtered_series(df, 'categories')
# business_cat_csv = './output/business_categories.csv'
# write_series_to_csv(pd.Series(categories), business_cat_csv, '\t', False)
# # # Write cities to a .csv file.
# cities = []
# for x in business_df['city']:
# if x != None:
# if x.strip() not in cities:
# cities.append(x.strip())
# cities_csv = './output/cities.csv'
# write_series_to_csv(pd.Series(cities), cities_csv, '\t', False)
# # logging.info(msg[2].format(os.path.abspath(unsd_sub_region_csv)))
# # # Write states to a .csv file.
# states = []
# for x in business_df['state']:
# if x != None:
# if x.strip() not in states:
# states.append(x.strip())
# states_csv = './output/states.csv'
# write_series_to_csv(pd.Series(states), states_csv, '\t', False)
# # # Write attire to a .csv file.
# attires = []
# for x in df['RestaurantsAttire']:
# if x != None:
# if x not in attires:
# attires.append(x)
# attires_csv = './output/attires.csv'
# write_series_to_csv(pd.Series(attires), attires_csv, '\t', False)
# # # Write noise status to a .csv file.
# noise = []
# for x in df['NoiseLevel']:
# if x != None:
# if x not in noise:
# noise.append(x)
# noise_csv = './output/noise.csv'
# write_series_to_csv(pd.Series(noise), noise_csv, '\t', False)
def extract_filtered_series(data_frame, column_name):
"""
Returns a filtered Panda Series one-dimensional ndarray from a targeted column.
Duplicate values and NaN or blank values are dropped from the result set which is
returned sorted (ascending).
:param data_frame: Pandas DataFrame
:param column_name: column name string
:return: Panda Series one-dimensional ndarray
"""
return data_frame[column_name].drop_duplicates().dropna().sort_values()
def pandas_(s, lookup):
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from snsynth.preprocessors import GeneralTransformer
from snsynth.pytorch import PytorchDPSynthesizer
from snsynth.pytorch.nn import DPCTGAN, PATECTGAN
size = 100
batch_size = 10
eps = 1.0
np_data_xy = np.array([
np.arange(0, size) % 3,
(np.arange(0, size) % 3) * 10,
]).astype(np.int16).T
class TestDPGANInputChecks:
def test_train_dpctgan_continuous(self):
dpgan = DPCTGAN(epsilon=eps, batch_size=batch_size)
try:
dpgan.train(np_data_xy, categorical_columns=[0])
except ValueError:
return
raise AssertionError('DPCTGAN should have raised a ValueError')
def test_train_patectgan_continuous(self):
dpgan = PATECTGAN(epsilon=eps, batch_size=batch_size)
try:
dpgan.train(np_data_xy, categorical_columns=[0])
except ValueError:
return
raise AssertionError('PATECTGAN should have raised a ValueError')
def test_fit_pytorchgan_continuous(self):
dpgan = PytorchDPSynthesizer(eps, PATECTGAN(epsilon=eps, batch_size=batch_size), GeneralTransformer())
pd_data_xy = pd.DataFrame(np_data_xy, columns=["x", "y"])
try:
dpgan.fit(pd_data_xy, categorical_columns=[0])
except ValueError:
return
raise AssertionError('PATECTGAN should have raised a ValueError')
def test_fit_pytorchgan_continuous_no_transfromer(self):
dpgan = PytorchDPSynthesizer(eps, PATECTGAN(epsilon=eps, batch_size=batch_size), None)
pd_data_xy = | pd.DataFrame(np_data_xy, columns=["x", "y"]) | pandas.DataFrame |
import pandas as pd
from sqlalchemy.sql import text
def get_user_id(engine, email, password):
"""
Get user ID associated with the given user's email and password
:param engine: SQLAlchemy engine
:param email: email address of user
:param password: <PASSWORD>
:return: info about user
"""
sql_query = """select user_id from users where email = :email and password = :password limit 1"""
# execute the query
sql_query_2 = text(sql_query).bindparams(email=email, password=password)
df = pd.read_sql_query(sql_query_2, engine)
if len(df) > 0:
return df.values[0][0]
else:
return None
def get_all_input_data_items(engine, label_task_id):
"""
Get list of all input data items (labeled + unlabeled) for a given label task
:param engine: SQLAlchemy engine
:param label_task_id:
:return:
"""
sql_query = """select distinct on (input_data_id) * from
(
select input_data_id, dataset_group_id, dataset_id from input_data_per_label_task
where label_task_id = %(label_task_id)s
) as all_input_data_items"""
df = pd.read_sql_query(sql_query, engine, params={'label_task_id': label_task_id})
return df
def get_all_datasets(engine):
"""
Get list of all datasets
:param engine: SQLAlchemy engine
:return:
"""
sql_query = """select * from datasets"""
df = pd.read_sql_query(sql_query, engine)
return df
def count_input_data_items_per_user_per_label_task(engine, label_task_id=None, user_id=None):
"""
Count the number of labeled, unlabeled and in progress input data items per label task for the user
:param engine: SQLAlchemy engine
:param label_task_id:
:param user_id:
:return:
"""
if label_task_id is not None and user_id is not None:
where_clause = 'where label_task_id = %(label_task_id)s and user_id = %(user_id)s'
elif label_task_id is not None:
where_clause = 'where label_task_id = %(label_task_id)s'
elif user_id is not None:
where_clause = 'where user_id = %(user_id)s'
else:
where_clause = ''
sql_query = """select * from item_counts {} order by user_id, label_task_id""".format(where_clause)
df = pd.read_sql_query(sql_query, engine, params={'label_task_id': label_task_id, 'user_id': user_id})
return df
def get_next_unlabeled_input_data_item(engine, label_task_id, shuffle=True, n=1):
"""
Get the highest priority input data item for the specified label task that has not yet been labeled and is not
currently being labeled by another user
:param engine:
:param label_task_id:
:param shuffle: if True, shuffle the data before sorting by priority
:param n: number of items to return. If None, return all
:return:
"""
if shuffle:
order_by = 'order by random(), priority desc'
else:
order_by = 'order by priority desc, input_data_id'
if n is None:
max_limit = ''
else:
max_limit = 'limit {}'.format(int(n))
sql_query = """
with unlabeled_items as (
select * from labels_per_input_data_item
where label_task_id = %(label_task_id)s and label_id isnull and not input_data_id isnull
)
select * from unlabeled_items
{order_by}
{max_limit}""".format(order_by=order_by, max_limit=max_limit)
df = pd.read_sql_query(sql_query, engine, params={'label_task_id': label_task_id})
if len(df) > 0:
return df
else:
return None
def get_all_user_input_data(engine, user_id, label_task_id, n):
"""
Get all input data that the user has viewed (whether they have actually labeled any of it or not)
:param engine:
:param user_id:
:param label_task_id:
:param n: number of items to return (set to None if no limit)
:return:
"""
# choose whether to return some or all of the items
if n is None:
n = 'ALL'
else:
n = int(n)
fields = 'label_id, input_data_id, label_task_id, label_history_id, user_id, user_complete, needs_improvement, ' \
'admin_complete, paid, include_in_test_set, user_comment, admin_comment, timestamp_edit'
sql_query = """
SELECT {fields} FROM latest_label_history WHERE user_id=%(user_id)s AND label_task_id=%(label_task_id)s
ORDER BY label_id DESC LIMIT {n}""".format(fields=fields, n=n)
df = pd.read_sql_query(sql_query, engine, params={'user_id': user_id,
'label_task_id': label_task_id})
return df
def get_all_user_input_data_filtered(engine, user_id, label_task_id, label_filter):
"""
Get the first input_data item that matches the filter.
:param engine:
:param user_id:
:param label_task_id:
:param label_filter: filter indicating user_complete or user_incomplete
:return:
"""
# Apply the filter
if label_filter == "filter_complete":
complete = True
else:
complete = False
fields = 'label_id, input_data_id, label_task_id, label_history_id, user_id, user_complete, needs_improvement, ' \
'admin_complete, paid, include_in_test_set, user_comment, admin_comment, timestamp_edit'
sql_query = """
SELECT {fields} FROM latest_label_history a WHERE user_id=%(user_id)s AND label_task_id=%(label_task_id)s AND user_complete={complete}
AND label_history_id > 0 ORDER BY label_id ASC""".format(fields=fields, complete=complete)
df = pd.read_sql_query(sql_query, engine, params={'user_id': user_id,
'label_task_id': label_task_id})
return df
def get_first_user_input_data(engine, user_id, label_task_id, label_filter):
"""
Get the first input_data item that matches the filter.
:param engine:
:param user_id:
:param label_task_id:
:param label_filter: filter indicating user_complete or user_incomplete
:return:
"""
df = get_all_user_input_data_filtered(engine, user_id, label_task_id, label_filter)
entry = df.iloc[0:1, :]
return entry
def get_last_user_input_data(engine, user_id, label_task_id, label_filter):
"""
Get the first input_data item that matches the filter.
:param engine:
:param user_id:
:param label_task_id:
:param label_filter: filter indicating user_complete or user_incomplete
:return:
"""
df = get_all_user_input_data_filtered(engine, user_id, label_task_id, label_filter)
count = len(df.index)
entry = df.iloc[count-1:count, :]
return entry
def get_preceding_user_data_item(engine, user_id, label_task_id, current_input_data_id):
"""
Get preceding input data that the user has viewed (whether they have actually labeled any of it or not)
:param engine:
:param user_id:
:param label_task_id:
:param current_input_data_id: current input data ID (we want to find the item before this in the list)
:return:
"""
# retrieve all data from database for that user and label task
df = get_all_user_input_data(engine, user_id, label_task_id, n=None)
# get the next input data item in the list (the list is in descending order of label ID, so we get the next item)
matching_indices = df.index[df['input_data_id'] == current_input_data_id].tolist()
if len(matching_indices) >= 1:
idx = matching_indices[0]
return df.iloc[idx + 1:idx + 2, :]
else:
return pd.DataFrame(columns=df.columns)
def get_next_user_data_item(engine, user_id, label_task_id, current_input_data_id):
"""
Get next input data that the user has viewed (whether they have actually labeled any of it or not)
:param engine:
:param user_id:
:param label_task_id:
:param current_input_data_id: current input data ID (we want to find the item before this in the list)
:return:
"""
# retrieve all data from database for that user and label task
df = get_all_user_input_data(engine, user_id, label_task_id, n=None)
# get the next input data item in the list (the list is in descending order of label ID, so we get the next item)
matching_indices = df.index[df['input_data_id'] == current_input_data_id].tolist()
if len(matching_indices) >= 1:
idx = matching_indices[0]
return df.iloc[idx - 1:idx, :]
else:
return pd.DataFrame(columns=df.columns)
def get_preceding_user_data_item_filtered(engine, user_id, label_task_id, current_label_id, label_filter):
"""
Get preceding input data that the user has viewed (whether they have actually labeled any of it or not)
:param engine:
:param user_id:
:param label_task_id:
:param current_label_id: current label (we want to find the item before this in the list)
:label_filter: the filter according
:return:
"""
# retrieve all data from database for that user and label task
df = get_all_user_input_data_filtered(engine, user_id, label_task_id, label_filter)
# here the list is in ascending order.
matching_indices = df.index[df['label_id'] < current_label_id].tolist()
if len(matching_indices) >= 1:
idx = matching_indices[len(matching_indices)-1]
return df.iloc[idx:idx+1,:]
else:
return | pd.DataFrame(columns=df.columns) | pandas.DataFrame |
"""
Parse through files with Divvy raw data.
Group trips by day in order to average trip time.
"""
import csv
import pandas as pd
from statistics import mean
# create place to store ride durations for each day
sun_times = []
mon_times = []
tues_times = []
wed_times = []
thurs_times = []
fri_times = []
sat_times = []
# dictionary of dates and lists so that
# loop below can classify ride duration by day
date_dict = {
'8/12/2017': sat_times,
'8/11/2017': fri_times,
'8/10/2017': thurs_times,
'8/9/2017': wed_times,
'8/8/2017': tues_times,
'8/7/2017': mon_times,
'8/6/2017': sun_times
}
# open CSV containing all trip data from February 4-10, 2018
with open('Divvy_Trips_2017_Aug0612.csv') as csvfile:
divvy_week = csv.DictReader(csvfile)
for row in divvy_week:
# grab date of ride from start time
ride_date = dict(row)['start_time'].split()[0]
# grab length of ride and turn into integer
ride_length = int(dict(row)['tripduration'])
# use ride date to fetch appropriate list from dict, append ride length
date_dict[ride_date].append(ride_length)
day_lists = [sun_times, mon_times, tues_times, wed_times,
thurs_times, fri_times, sat_times]
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday']
avg_ride_times = []
for day in day_lists:
daily_average = round(mean(day) / 60, 1)
avg_ride_times.append(daily_average)
d = {'day': days, 'ride_length': avg_ride_times}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import seaborn as snb
import nltk
from gensim.models import Word2Vec, Phrases
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import re
import string
import gensim
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
# In[2]:
dir_name = 'input_not_lowercase/'
train_bio = pd.read_csv('%s/biology.csv'%dir_name,encoding='utf-8')
train_cooking = pd.read_csv("%s/cooking.csv"%dir_name,encoding='utf-8')
train_crypto = | pd.read_csv("%s/crypto.csv"%dir_name,encoding='utf-8') | pandas.read_csv |
"""
Run the procedure for COMPASS
"""
from __future__ import print_function, division, absolute_import
import pandas as pd
from tqdm import tqdm
from random import shuffle
import logging
import os
import sys
import time
import timeit
import numpy as np
from .. import utils
from .. import models
from . import cache
from ..globals import BETA, EXCHANGE_LIMIT
import cplex
logger = logging.getLogger("compass")
__all__ = ['singleSampleCompass']
def singleSampleCompass(data, model, media, directory, sample_index, args):
"""
Run Compass on a single column of data
Parameters
==========
data : list
Full path to data file(s)
model : str
Name of metabolic model to use
media : str or None
Name of media to use
directory : str
Where to store results and log info. Is created if it doesn't exist.
sample_index : int
Which sample to run on
args : dict
More keyword arguments
- lambda, num_neighbors, symmetric_kernel, species,
and_function, test_mode, detailed_perf
"""
if not os.path.isdir(directory) and directory != '/dev/null':
os.makedirs(directory)
if os.path.exists(os.path.join(directory, 'success_token')):
logger.info('success_token detected, results already calculated.')
logger.info('COMPASS Completed Successfully')
return
if args['save_argmaxes']:
args['save_argmaxes_dir'] = directory
else:
args['save_argmaxes_dir'] = None
model = models.init_model(model=args['model'], species=args['species'],
exchange_limit=EXCHANGE_LIMIT, media=args['media'],
isoform_summing=args['isoform_summing'])
logger.info("Running COMPASS on model: %s", model.name)
perf_log = None
if args['detailed_perf']:
cols = ['order','max rxn time', 'max rxn method', 'cached', 'min penalty time',
'min penalty method', 'min penalty sensitvivity', 'kappa']
perf_log = {c:{} for c in cols}
if args['generate_cache']:
cache.clear(model) #TBD add media specifier here too
# Build model into cplex problem
problem = initialize_cplex_problem(model, args['num_threads'], args['lpmethod'], args['advance'])
# Only read this to get the number of samples and the sample name
# Use nrows=1 so this is fast
samples = utils.read_sample_names(data)
if samples is None:
sample_name = 'sample_'+str(sample_index)
logger.info("Processing Sample %i: %s", sample_index, sample_name)
else:
sample_name = str(samples[sample_index])
logger.info("Processing Sample %i/%i: %s", sample_index,
len(samples), sample_name)
# Run core compass algorithm
# Evaluate reaction penalties
penalty_start = timeit.default_timer() #
logger.info("Evaluating Reaction Penalties...")
reaction_penalties = pd.read_csv(
args['penalties_file'], sep="\t", header=0,
usecols=[0, sample_index + 1]) #0 is the Reaction column,
reaction_penalties = reaction_penalties.set_index("Reaction").iloc[:, 0]
penalty_elapsed = timeit.default_timer() - penalty_start
react_start = timeit.default_timer()
if not args['no_reactions']:
logger.info("Evaluating Reaction Scores...")
reaction_scores = compass_reactions(
model, problem, reaction_penalties,
perf_log=perf_log, args=args)
react_elapsed = timeit.default_timer() - react_start
#if user wants to calc reaction scores, but doesn't want to calc metabolite scores, calc only the exchange reactions
logger.info("Evaluating Exchange/Secretion/Uptake Scores...")
exchange_start = timeit.default_timer()
uptake_scores, secretion_scores, exchange_rxns = compass_exchange(
model, problem, reaction_penalties,
only_exchange=(not args['no_reactions']) and not args['calc_metabolites'],
perf_log=perf_log, args=args)
exchange_elapsed = timeit.default_timer() - exchange_start
# Copy valid uptake/secretion reaction fluxes from uptake/secretion
# results into reaction results
if (not args['no_reactions']) or args['calc_metabolites']:
for r_id in exchange_rxns:
assert r_id in model.reactions
assert r_id not in reaction_scores
reaction_scores[r_id] = exchange_rxns[r_id]
# Output results to file
logger.info("Writing output files...")
if not args['no_reactions']:
reaction_scores = pd.Series(reaction_scores, name=sample_name).sort_index()
reaction_scores.to_csv(os.path.join(directory, 'reactions.txt'),
sep="\t", header=True)
if args['calc_metabolites']:
uptake_scores = | pd.Series(uptake_scores, name=sample_name) | pandas.Series |
#!/usr/bin/env python
"""Joining two or more dataframes together."""
import pandas as pd
# Use concat/append when to stack dfs vertically or horizontally
# Use merge when joining data between columns in two DataFrames
d1 = {'Column_A': ['a', 'a', 'a', 'c', 'c', 'd'],
'Column_B': [1, 2, 3, 4, 5, 6]}
df1 = | pd.DataFrame(d1) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import sys, os, platform
from tqdm import tqdm
import warnings
import argparse
#sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, LeaveOneOut, KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import auc, roc_curve, f1_score, recall_score, precision_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import shuffle
from imblearn.over_sampling import RandomOverSampler
#matplotlib
import matplotlib
from matplotlib import rcParams
rcParams['font.family'] = 'Arial'
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import matplotlib.ticker as ticker
workdir = ''# change to working directory
def perf_measure(y_actual, y_hat):
tp = fp = tn = fn = 0
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==1: tp += 1
if y_hat[i]==1 and y_actual[i]!=y_hat[i]: fp += 1
if y_actual[i]==y_hat[i]==0: tn += 1
if y_hat[i]==0 and y_actual[i]!=y_hat[i]: fn += 1
if (tp+fn) == 0: sensitivity = np.nan
else: sensitivity = tp/(tp+fn) # recall
if (tn+fp) == 0: specificity = np.nan
else: specificity = tn/(tn+fp)
if (tp+fp) == 0: ppv = np.nan
else: ppv = tp/(tp+fp) # precision or positive predictive value (PPV)
if (tn+fn) == 0: npv = np.nan
else: npv = tn/(tn+fn) # negative predictive value (NPV)
if (tp+tn+fp+fn) == 0: hitrate = np.nan
else: hitrate = (tp+tn)/(tp+tn+fp+fn) # accuracy (ACC)
return sensitivity, specificity, ppv, npv, hitrate # (tp, fp, tn, fn)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='DT', help='LR_L1, LR_L2, Adaboost, DT')
parser.add_argument('--label', default='Suspected cases', help='Suspected cases, Infection cases')
parser.add_argument('--upsampling', default=True, action='store_true')
parser.add_argument('--test_ratio', default=0.2, help='Held-out testing set')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
# =============================================================================
# Read data
# =============================================================================
df = pd.read_excel(workdir + 'Developement_datasets_final.xls', sheet_name = '疑似')
y = df[args.label]
y_sus = df['Suspected cases']
X = df[[c for c in df.columns if c not in ['Suspected cases', 'Infection cases']]]
colnames = X.columns.astype(str)
X = X.values
# =============================================================================
# Cross validation
# =============================================================================
np.random.seed(0)
test_ratio = args.test_ratio
rand_test_idx = np.arange(len(X))
np.random.shuffle(rand_test_idx)
rand_test_idx = rand_test_idx[0:int(test_ratio * len(X))]
rand_train_val_idx = [i for i in np.arange(len(X)) if i not in rand_test_idx]
X_test, y_test = X[rand_test_idx], y.values[rand_test_idx]
y_sus_test = y_sus.values[rand_test_idx]
X_trainval, y_trainval = X[rand_train_val_idx], y.values[rand_train_val_idx]
print('standardize input.')
stdscaler = StandardScaler().fit(X_trainval)
X_trainval = stdscaler.transform(X_trainval)
X_test = stdscaler.transform(X_test)
stdscaler_df = pd.DataFrame(np.stack((stdscaler.mean_, stdscaler.scale_)).T, index = colnames, columns = ['mean_','scale_'])
stdscaler_df.to_csv(workdir + 'stdscaler.csv')
# =============================================================================
# Handling the imbalanced dataset
# =============================================================================
if args.upsampling:
print('Handling the imbalanced dataset by RandomOverSampler ...')
ros = RandomOverSampler(random_state=0)
X_trainval, y_trainval = ros.fit_resample(X_trainval, y_trainval)
X_trainval, y_trainval = shuffle(X_trainval, y_trainval, random_state=0)
# class_weight = None
# class_weight = 'balanced'
class_weight = {0: 1/np.sum(y_trainval == 0), 1: 1/np.sum(y_trainval == 1)} # class_weightdict or ‘balanced’, default=None
nfolds = 10
if args.model == 'LR_L1':
C_range = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30])*0.25
if args.model == 'LR_L2':
C_range = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30])*0.25
if args.model == 'DT':
C_range = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
elif args.model == 'Adaboost':
C_range = [2**4, 2**5, 2**6, 2**7, 2**8]
metrics = pd.DataFrame(index = C_range, columns = ['AUC', 'F1', 'Precision', 'Recall', 'sensitivity', 'specificity', 'ppv', 'npv', 'hitrate'], dtype=float)
for C in C_range:
AUC = f1 = precision = recall = 0
sensitivity = specificity = ppv = npv = hitrate = 0
for train_index, val_index in KFold(n_splits=nfolds, shuffle=True, random_state=10).split(X_trainval):
X_train, y_train = X_trainval[train_index], y_trainval[train_index]
X_val, y_val = X_trainval[val_index], y_trainval[val_index]
if args.model == 'LR_L1':
model = LogisticRegression(penalty='l1', C = C, solver = 'saga', random_state=0, class_weight = class_weight)
if args.model == 'LR_L2':
model = LogisticRegression(penalty='l2', C = C, solver = 'saga', random_state=0, class_weight = class_weight)
if args.model == 'DT':
model = DecisionTreeClassifier(max_depth = C, class_weight = class_weight)
elif args.model == 'Adaboost':
model = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth=1, class_weight = class_weight), n_estimators = C)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model.fit(X_train, y_train)
y_val_pred, y_val_pred_proba = model.predict(X_val), model.predict_proba(X_val)[:,1]
fpr, tpr, thresholds = roc_curve(y_val, y_val_pred_proba)
AUC += auc(fpr, tpr)/nfolds
f1 += f1_score(y_val_pred, y_val, average = 'binary')/nfolds
precision += precision_score(y_val, y_val_pred, average = 'binary')/nfolds
recall += recall_score(y_val, y_val_pred, average = 'binary')/nfolds
sensitivity_tmp, specificity_tmp, ppv_tmp, npv_tmp, hitrate_tmp = perf_measure(y_val, y_val_pred)
sensitivity += sensitivity_tmp/nfolds
specificity += specificity_tmp/nfolds
ppv += ppv_tmp/nfolds
npv += npv_tmp/nfolds
hitrate += hitrate_tmp/nfolds
print('C: %f, AUC: %.4f, F1: %.4f, P: %.4f, R: %4f, Sen: %.4f, Spe: %.4f' % (C, AUC, f1, precision, recall, sensitivity, specificity))
metrics.loc[C, :] = AUC, f1, precision, recall, sensitivity, specificity, ppv, npv, hitrate
best_C = metrics.idxmax().F1
print('Best penalty C: %.4f' % best_C)
if args.model == 'LR_L1':
model_fin = LogisticRegression(penalty='l1', C = best_C, solver = 'saga', random_state=0, class_weight = class_weight)
if args.model == 'LR_L2':
model_fin = LogisticRegression(penalty='l2', C = best_C, solver = 'saga', random_state=0, class_weight = class_weight)
if args.model == 'DT':
model_fin = DecisionTreeClassifier(max_depth = best_C, class_weight = class_weight)
elif args.model == 'Adaboost':
model_fin = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth=1, class_weight = class_weight), n_estimators = best_C)
model_fin.fit(X_trainval, y_trainval)
y_test_pred, y_test_pred_proba = model_fin.predict(X_test), model_fin.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = roc_curve(y_test, y_test_pred_proba)
AUC = auc(fpr, tpr)
f1 = f1_score(y_test_pred, y_test, average = 'binary')
precision = precision_score(y_test, y_test_pred, average = 'binary')
recall = recall_score(y_test, y_test_pred, average = 'binary')
sensitivity, specificity, ppv, npv, hitrate = perf_measure(y_test, y_test_pred)
print('[Final report] Best C: %f, AUC: %.4f, F1: %.4f, P: %.4f, R: %4f, Sen: %.4f, Spe: %.4f' % (best_C, AUC, f1, precision, recall, sensitivity, specificity))
# =============================================================================
# Feature importance
# =============================================================================
feature_name = [c for c in df.columns.values.astype(str) if c not in ['Suspected cases', 'Infection cases']]
if args.model in ['LR_L1', 'LR_L2']:
importance = model_fin.coef_.reshape(-1)
elif args.model == 'DT':
importance = model_fin.feature_importances_
elif args.model == 'Adaboost':
importance = model_fin.feature_importances_
coefficients = pd.DataFrame(importance, index = feature_name, columns = ['Weight'])
coefficients.sort_values(by='Weight', ascending=False, inplace=True)
if args.model in ['LR_L1', 'LR_L2']:
coefficients.loc['intercept_','Weight'] = model_fin.intercept_
coefficients.to_csv(workdir + 'feature_importance_%s.csv' % args.model)
# =============================================================================
# External validation
# =============================================================================
df_ev = | pd.read_excel(workdir + 'validation_datasets_final.xls', sheet_name = '疑似', dtype = float) | pandas.read_excel |
from datetime import timezone
import pandas as pd
import numpy as np
import datetime
import netCDF4
import time
def _validate_date(date_text):
'''
Checks date format to ensure YYYY-MM-DD format and return date in
datetime format.
Parameters
----------
date_text: string
Date string format to check
Returns
-------
dt: datetime
'''
assert isinstance(date_text, str), (f'date_text must be' /
'of type string')
try:
dt = datetime.datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
else:
dt = dt.replace(tzinfo=timezone.utc)
return dt
def _start_and_end_of_year(year):
'''
Returns a datetime start and end for a given year
Parameters
----------
year: int
Year to get start and end dates
Returns
-------
start_year: datetime object
start of the year
end_year: datetime object
end of the year
'''
assert isinstance(year, (type(None),int,list)), 'year must be of type int'
try:
year = str(year)
start_year = datetime.datetime.strptime(year, '%Y')
except ValueError:
raise ValueError("Incorrect years format, should be YYYY")
else:
next_year = datetime.datetime.strptime(f'{int(year)+1}', '%Y')
end_year = next_year - datetime.timedelta(days=1)
return start_year, end_year
def _dates_to_timestamp(nc, start_date=None, end_date=None):
'''
Returns timestamps from dates.
Parameters
----------
nc: netCDF Object
netCDF data for the given station number and data type
start_date: string
Start date in YYYY-MM-DD, e.g. '2012-04-01'
end_date: string
End date in YYYY-MM-DD, e.g. '2012-04-30'
Returns
-------
start_stamp: float
seconds since the Epoch to start_date
end_stamp: float
seconds since the Epoch to end_date
'''
assert isinstance(start_date, (str, type(None))), ('start_date' /
'must be of type str')
assert isinstance(end_date, (str, type(None))), ('end_date must be' /
'of type str')
time_all = nc.variables['waveTime'][:].compressed()
time_range_all = [datetime.datetime.fromtimestamp(time_all[0]).replace(tzinfo=timezone.utc),
datetime.datetime.fromtimestamp(time_all[-1]).replace(tzinfo=timezone.utc)]
if start_date:
start_datetime = _validate_date(start_date)
if end_date:
end_datetime = _validate_date(end_date)
if start_datetime > end_datetime:
raise Exception(f'start_date ({start_datetime}) must be'+
f'before end_date ({end_datetime})')
elif start_datetime == end_datetime:
raise Exception(f'start_date ({start_datetime}) cannot be'+
f'the same as end_date ({end_datetime})')
if start_date:
if start_datetime > time_range_all[0] and start_datetime < time_range_all[1]:
start_stamp = start_datetime.replace(tzinfo=timezone.utc).timestamp()
else:
print(f'WARNING: Provided start_date ({start_datetime}) is '
f'not in the returned data range {time_range_all} \n'
f'Setting start_date to the earliest date in range '
f'{time_range_all[0]}')
start_stamp = pd.to_datetime(time_range_all[0]).replace(tzinfo=timezone.utc).timestamp()
if end_date:
if end_datetime > time_range_all[0] and end_datetime < time_range_all[1]:
end_stamp = end_datetime.replace(tzinfo=timezone.utc).timestamp()
else:
print(f'WARNING: Provided end_date ({end_datetime}) is '
f'not in the returned data range {time_range_all} \n'
f'Setting end_date to the latest date in range '
f'{time_range_all[1]}')
end_stamp = pd.to_datetime(time_range_all[1]).replace(tzinfo=timezone.utc).timestamp()
if start_date and not end_date:
end_stamp = pd.to_datetime(time_range_all[1]).replace(tzinfo=timezone.utc).timestamp()
elif end_date and not start_date:
start_stamp = pd.to_datetime(time_range_all[0]).replace(tzinfo=timezone.utc).timestamp()
if not start_date:
start_stamp = pd.to_datetime(time_range_all[0]).replace(tzinfo=timezone.utc).timestamp()
if not end_date:
end_stamp = pd.to_datetime(time_range_all[1]).replace(tzinfo=timezone.utc).timestamp()
return start_stamp, end_stamp
def request_netCDF(station_number, data_type):
'''
Returns historic or realtime data from CDIP THREDDS server
Parameters
----------
station_number: string
CDIP station number of interest
data_type: string
'historic' or 'realtime'
Returns
-------
nc: netCDF Object
netCDF data for the given station number and data type
'''
assert isinstance(station_number, str), (f'station_number must be ' +
f'of type string. Got: {station_number}')
assert isinstance(data_type, str), (f'data_type must be' /
'of type string')
assert data_type in ['historic', 'realtime'], ('data_type must be'\
f' "historic" or "realtime". Got: {data_type}')
if data_type == 'historic':
cdip_archive= 'http://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/archive'
data_url = f'{cdip_archive}/{station_number}p1/{station_number}p1_historic.nc'
elif data_type == 'realtime':
cdip_realtime = 'http://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/realtime'
data_url = f'{cdip_realtime}/{station_number}p1_rt.nc'
nc = netCDF4.Dataset(data_url)
return nc
def request_parse_workflow(nc=None, station_number=None, parameters=None,
years=None, start_date=None, end_date=None,
data_type='historic', all_2D_variables=False):
'''
Parses a passed CDIP netCDF file or requests a station number
from http://cdip.ucsd.edu/) and parses. This function can return specific
parameters is passed. Years may be non-consecutive e.g. [2001, 2010].
Time may be sliced by dates (start_date or end date in YYYY-MM-DD).
data_type defaults to historic but may also be set to 'realtime'.
By default 2D variables are not parsed if all 2D varaibles are needed. See
the MHKiT CDiP example Jupyter notbook for information on available parameters.
Parameters
----------
nc: netCDF Object
netCDF data for the given station number and data type. Can be the output of
request_netCDF
station_number: string
Station number of CDIP wave buoy
parameters: string or list of stings
Parameters to return. If None will return all varaibles except
2D-variables.
years: int or list of int
Year date, e.g. 2001 or [2001, 2010]
start_date: string
Start date in YYYY-MM-DD, e.g. '2012-04-01'
end_date: string
End date in YYYY-MM-DD, e.g. '2012-04-30'
data_type: string
Either 'historic' or 'realtime'
all_2D_variables: boolean
Will return all 2D data. Enabling this will add significant
processing time. If all 2D variables are not needed it is
recomended to pass 2D parameters of interest using the
'parameters' keyword and leave this set to False. Default False.
Returns
-------
data: dictionary
'vars1D': DataFrame
1D variables indexed by time
'metadata': dictionary
Anything not of length time
'vars2D': dictionary of DataFrames, optional
If 2D-vars are passed in the 'parameters key' or if run
with all_2D_variables=True, then this key will appear
with a dictonary of DataFrames of 2D variables.
'''
assert isinstance(station_number, (str, type(None))), (f'station_number must be '+
'of type string')
assert isinstance(parameters, (str, type(None), list)), ('parameters' /
'must be of type str or list of strings')
assert isinstance(start_date, (str, type(None))), ('start_date' /
'must be of type str')
assert isinstance(end_date, (str, type(None))), ('end_date must be' /
'of type str')
assert isinstance(years, (type(None),int,list)), ('years must be of'/
'type int or list of ints')
assert isinstance(data_type, str), (f'data_type must be' /
'of type string')
assert data_type in ['historic', 'realtime'], 'data_type must be'\
f' "historic" or "realtime". Got: {data_type}'
if not any([nc, station_number]):
raise Exception('Must provide either a CDIP netCDF file or a station '+
'number')
if not nc:
nc = request_netCDF(station_number, data_type)
buoy_name = nc.variables['metaStationName'][:].compressed().tobytes().decode("utf-8")
multiyear=False
if years:
if isinstance(years,int):
start_date = f'{years}-01-01'
end_date = f'{years}-12-31'
elif isinstance(years,list):
if len(years)==1:
start_date = f'{years[0]}-01-01'
end_date = f'{years[0]}-12-31'
else:
multiyear=True
if not multiyear:
data = get_netcdf_variables(nc,
start_date=start_date, end_date=end_date,
parameters=parameters,
all_2D_variables=all_2D_variables)
elif multiyear:
data={'data':{},'metadata':{}}
multiyear_data={}
multiyear_data_2D={}
for year in years:
start_date = f'{year}-01-01'
end_date = f'{year}-12-31'
year_data = get_netcdf_variables(nc,
start_date=start_date, end_date=end_date,
parameters=parameters,
all_2D_variables=all_2D_variables)
multiyear_data[year] = year_data['data']
for data_key in year_data['data'].keys():
if data_key.endswith('2D'):
data['data'][data_key]={}
for data_key2D in year_data['data'][data_key].keys():
data_list=[]
for year in years:
data2D = multiyear_data[year][data_key][data_key2D]
data_list.append(data2D)
data['data'][data_key][data_key2D]=pd.concat(data_list)
else:
data_list = [multiyear_data[year][data_key] for year in years]
data['data'][data_key] = pd.concat(data_list)
data['metadata'] = year_data['metadata']
data['metadata']['name'] = buoy_name
return data
def get_netcdf_variables(nc, start_date=None, end_date=None,
parameters=None, all_2D_variables=False):
'''
Iterates over and extracts variables from CDIP bouy data. See
the MHKiT CDiP example Jupyter notbook for information on available
parameters.
Parameters
----------
nc: netCDF Object
netCDF data for the given station number and data type
start_stamp: float
Data of interest start in seconds since epoch
end_stamp: float
Data of interest end in seconds since epoch
parameters: string or list of stings
Parameters to return. If None will return all varaibles except
2D-variables. Default None.
all_2D_variables: boolean
Will return all 2D data. Enabling this will add significant
processing time. If all 2D variables are not needed it is
recomended to pass 2D parameters of interest using the
'parameters' keyword and leave this set to False. Default False.
Returns
-------
results: dictionary
'vars1D': DataFrame
1D variables indexed by time
'metadata': dictionary
Anything not of length time
'vars2D': dictionary of DataFrames, optional
If 2D-vars are passed in the 'parameters key' or if run
with all_2D_variables=True, then this key will appear
with a dictonary of DataFrames of 2D variables.
'''
assert isinstance(nc, netCDF4.Dataset), 'nc must be netCDF4 dataset'
assert isinstance(start_date, (str, type(None))), ('start_date' /
'must be of type str')
assert isinstance(end_date, (str, type(None))), ('end_date must be' /
'of type str')
assert isinstance(parameters, (str, type(None), list)), ('parameters' /
'must be of type str or list of strings')
assert isinstance(all_2D_variables, bool), ('all_2D_variables'/
'must be a boolean')
if parameters:
if isinstance(parameters,str):
parameters = [parameters]
assert all([isinstance(param , str) for param in parameters]), ('All'/
'elements of parameters must be strings')
buoy_name = nc.variables['metaStationName'][:].compressed().tobytes().decode("utf-8")
allVariables = [var for var in nc.variables]
include_2D_variables=False
twoDimensionalVars = [ 'waveEnergyDensity', 'waveMeanDirection',
'waveA1Value', 'waveB1Value', 'waveA2Value',
'waveB2Value', 'waveCheckFactor', 'waveSpread',
'waveM2Value', 'waveN2Value']
if parameters:
params = set(parameters)
include_params = params.intersection(set(allVariables))
if params != include_params:
not_found = params.difference(include_params)
print(f'WARNING: {not_found} was not found in data.\n' \
f'Possible parameters are:\n {allVariables}')
include_params_2D = include_params.intersection(
set(twoDimensionalVars))
include_params = include_params.difference(include_params_2D)
if include_params_2D:
include_2D_variables=True
include_params.add('waveFrequency')
include_2D_vars = sorted(include_params_2D)
include_vars = sorted(include_params)
else:
include_vars = allVariables
for var in twoDimensionalVars:
include_vars.remove(var)
if all_2D_variables:
include_2D_variables=True
include_2D_vars = twoDimensionalVars
start_stamp, end_stamp =_dates_to_timestamp(nc, start_date=start_date,
end_date=end_date)
variables_by_type={}
prefixs = ['wave', 'sst', 'gps', 'dwr', 'meta']
remainingVariables = set(include_vars)
for prefix in prefixs:
variables_by_type[prefix] = [var for var in include_vars
if var.startswith(prefix)]
remainingVariables -= set(variables_by_type[prefix])
if not variables_by_type[prefix]:
del variables_by_type[prefix]
results={'data':{}, 'metadata':{}}
for prefix in variables_by_type:
var_results={}
time_variables={}
metadata={}
if prefix != 'meta':
prefixTime = nc.variables[f'{prefix}Time'][:]
masked_time = np.ma.masked_outside(prefixTime, start_stamp,
end_stamp)
mask = masked_time.mask
var_time = masked_time.compressed()
N_time = masked_time.size
else:
N_time= np.nan
for var in variables_by_type[prefix]:
variable = np.ma.filled(nc.variables[var])
if variable.size == N_time:
variable = np.ma.masked_array(variable, mask).astype(float)
time_variables[var] = variable.compressed()
else:
metadata[var] = nc.variables[var][:].compressed()
time_slice = pd.to_datetime(var_time, unit='s')
data = | pd.DataFrame(time_variables, index=time_slice) | pandas.DataFrame |
import os
import uuid
import lasio
from dlisio import dlis
from flask import render_template, Flask, session, request
from werkzeug.utils import redirect, secure_filename
import pandas as pd
import datetime
from classes import CSVprocessing, IndexType, APISupplementary, DLISprocessing, Visualization, XmlGeneration, \
Configuration, CheckFunctions, InputXMLprocessing, LASprocessing
from forms import UploadForm, VisualizeCsvForm, DLISForm, Credentials, Credentials1, TestForm
# application set up
app = Flask('__name__')
# configure upload folder
app.config['UPLOAD_PATH'] = 'uploads'
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
# clear folders
for root, dirs, files in os.walk(app.config['UPLOAD_PATH']):
for file in files:
os.remove(os.path.join(root, file))
for root, dirs, files in os.walk('errorlog'):
for file in files:
os.remove(os.path.join(root, file))
for root, dirs, files in os.walk('generatedXML'):
for file in files:
os.remove(os.path.join(root, file))
@app.route('/', methods=['GET', 'POST'])
# upload page
def upload():
form1 = UploadForm()
if form1.validate_on_submit():
# filename = secure_filename(form1.file.data.filename)
# form1.file.data.save(os.path.join(app.config['UPLOAD_PATH'], filename))
# return render_template('uploaded.html')
filename = secure_filename(form1.filename.data)
type1 = form1.filetype.data
# servicecompany = form1.servicecompany.data
# BU = form1.BU.data
# asset = form1.asset.data
# wellname = form1.wellname.data
# wellborename = form1.wellborename.data
# how to represent values at constant depth
# repr = form1.represent.data
# filename = form1.file.raw_data[0].filename
form1.file.data.save(os.path.join(app.config['UPLOAD_PATH'], filename))
session['filename'] = filename
session['type1'] = type1
# session['servicecompany'] = servicecompany
# session['BU'] = BU
# session['asset'] = asset
# session['wellname'] = wellname
# session['wellborename'] = wellborename
# session['type1'] = type1
# session['repr'] = repr
if type1 == 'csv':
# format csv
return redirect('/csv')
else:
return redirect('/uploaded')
return render_template('upload.html', form=form1)
@app.route('/csv', methods=['GET', 'POST'])
# formatting CSV because header position is not fixed
def csvhandling():
filename = session.get('filename', None)
data = pd.read_csv(os.path.join(app.config['UPLOAD_PATH'], filename))
df0 = pd.DataFrame(data)
df1 = CSVprocessing().csvpreprocess(df0)
# create a row number column
df1.insert(loc=0, column='Row number', value=df1.index)
form = VisualizeCsvForm()
if form.validate_on_submit():
# row with column headings
columnHeadingsRow = form.columns.data
# row with measurement units
unitsRow = form.measure.data
# row where data starts
dataStartRow = form.start.data
session['columnHeadingsRow'] = columnHeadingsRow
session['unitsRow'] = unitsRow
session['dataStartRow'] = dataStartRow
return redirect('/uploaded')
return render_template('csvhandle.html', column_names=df1.columns,
row_data=list(df1.iloc[:20].values), zip=zip, form=form)
@app.route('/uploaded', methods=['GET', 'POST'])
def uploaded():
# print(1)
form3 = TestForm()
filename = session.get('filename')
type1 = session.get('type1')
wellname = ''
wellborename = ''
servicecompany = ''
runNumber = ''
creationDate = ''
indexType = ''
startDateTimeIndex = ''
endDateTimeIndex = ''
indexCurve = ''
nullValue = ''
startIndex = ''
endIndex = ''
direction = ''
dataSize = ''
asset = ''
BU = ''
if type1 == 'las':
# Read las file
lf = lasio.read(os.path.join(app.config['UPLOAD_PATH'], filename), null_policy='all')
# find index type for visualization templates
indextype, index1, index2 = IndexType().findindex(lf, type1)
dataSize = lf.data.shape[0]
for well in lf.well:
if well.descr.lower().find('null') != -1:
nullValue = well.value
elif well.descr.lower().find('company') != -1:
BU = well.value
elif well.descr.lower().find('well') != -1:
wellname = well.value
elif well.descr.lower().find('field') != -1:
asset = well.value
elif well.descr.lower().find('wellbore') != -1:
wellborename = well.value
elif well.descr.lower().find('service company') != -1:
servicecompany = well.value
elif well.descr.lower().find('run number') != -1:
runNumber = well.value
elif well.descr.lower().find('date') != -1 or well.descr.lower().find('date1') != -1:
creationDate = well.value
if lf.curves[0].descr.lower().find('dept') != -1:
indexCurve = lf.curves[0].mnemonic
indexType = 'measured depth'
startIndex = lf.curves[0].data[0]
endIndex = lf.curves[0].data[len(lf.curves[0]) - 1]
if (float(startIndex) - float(endIndex)) > 0:
direction = 'decreasing'
elif (float(startIndex) - float(endIndex)) < 0:
direction = 'increasing'
else:
direction = 'not included'
elif lf.curves[0].descr.lower().find('time') != -1:
indexCurve = lf.curves[0].mnemonic
indexType = 'date time'
startDateTimeIndex = lf.curves[0].data[0]
endDateTimeIndex = lf.curves[0].data[len(lf.curves[0]) - 1]
timedelta = (datetime.datetime.strptime(startDateTimeIndex, "%Y-%m-%dT%H:%M:%S.%f-05:00") -\
datetime.datetime.strptime(endDateTimeIndex, "%Y-%m-%dT%H:%M:%S.%f-05:00"))
if timedelta.days < 0 or timedelta.seconds < 0:
direction = 'increasing'
elif timedelta.days > 0 and timedelta.seconds > 0:
direction = 'decreasing'
else:
direction = 'not included'
if str(lf.curves[0].mnemonic).lower().find(r'tim') != -1:
indexmain = 'Time'
elif str(lf.curves[0].mnemonic).lower().find(r'dep') != -1:
indexmain = 'Depth'
if index1 is None or index2 is None:
operation = 'Impossible to detect'
else:
RIH, POOH = LASprocessing().splitlogs(lf, repr)
if len(RIH) != 0 and len(POOH) == 0:
operation = 'RIH'
elif len(RIH) == 0 and len(POOH) != 0:
operation = 'POOH'
else:
operation = 'RIH and POOH'
session['operation'] = operation
# General Information
data = [['File Name', filename], ['File Type', type1], ['Index Type', indexmain],
['Number of Curves', len(lf.curves)], ['Operation', operation], ['Data nodes', dataSize]]
df = pd.DataFrame(data=data, columns=['Parameter', 'Populated'])
# File Information
data1 = [['Well name', wellname], ['Wellbore name', wellborename], ['Business Unit', BU],
['Field', asset], ['Service Company', servicecompany], ['Run Number', runNumber],
['Creation Date', creationDate], ['Null Value', nullValue], ['Direction', direction]]
df1 = pd.DataFrame(data=data1, columns=['Parameter', 'Populated'])
# which template to show
template = APISupplementary().uploadedpage(index1, index2)
if request.method == 'POST':
df2 = pd.DataFrame(form3.data['images'])
if df2.values[0][0] == '':
session['wellname'] = df1.values[0][1]
else:
session['wellname'] = df2.values[0][0]
if df2.values[1][0] == '':
session['wellborename'] = df1.values[1][1]
else:
session['wellborename'] = df2.values[1][0]
if df2.values[2][0] == '':
session['Business Unit'] = df1.values[2][1]
else:
session['Business Unit'] = df2.values[2][0]
if df2.values[3][0] == '':
session['Field'] = df1.values[3][1]
else:
session['Field'] = df2.values[3][0]
if df2.values[4][0] == '':
session['Service Company'] = df1.values[4][1]
else:
session['Service Company'] = df2.values[4][0]
if df2.values[5][0] == '':
session['Run Number'] = df1.values[5][1]
else:
session['Run Number'] = df2.values[5][0]
if df2.values[6][0] == '':
session['Creation Date'] = df1.values[6][1]
else:
session['Creation Date'] = df2.values[6][0]
if df2.values[7][0] == '':
session['Null Value'] = df1.values[7][1]
else:
session['Null Value'] = df2.values[7][0]
if df2.values[8][0] == '':
session['Direction'] = df1.values[8][1]
else:
session['Direction'] = df2.values[8][0]
session['indexType'] = indexType
session['startDateTimeIndex'] = startDateTimeIndex
session['endDateTimeIndex'] = endDateTimeIndex
session['indexCurve'] = indexCurve
session['nullValue'] = nullValue
session['startIndex'] = startIndex
session['endIndex'] = endIndex
session['dataSize'] = dataSize
return redirect('/export')
return render_template(template, column_names=df.columns,
row_data=list(df.values), zip=zip, column_names1=df1.columns,
row_data1=list(df1.values), zip1=zip, form=form3)
elif type1 == 'csv':
data = pd.read_csv(os.path.join(app.config['UPLOAD_PATH'], filename))
df0 = pd.DataFrame(data)
columnHeadingsRow = session.get('columnHeadingsRow', None)
unitsRow = ''
dataStartRow = int(session.get('dataStartRow'))
df2 = CSVprocessing().csvcolumns(df0, columnHeadingsRow, unitsRow, dataStartRow)
dataSize = df2.shape[0]
# index type
indextype, index1, index2 = IndexType().findindex(df2, type1)
for col in df2.columns:
if col.lower().find('dept') != -1:
indexCurve = col
indexType = 'measured depth'
startIndex = str(df2[col].iloc[0])
endIndex = str(df2[col].iloc[len(df2) - 1])
break
elif col.lower().find('time') != -1:
indexCurve = col
indexType = 'date time'
startDateTimeIndex = str(df2[col].iloc[0])
endDateTimeIndex = str(df2[col].iloc[len(df2) - 1])
break
if df2.columns[0].lower().find('dept') != -1:
indexCurve = df2.columns[0]
indexType = 'measured depth'
startIndex = df2[df2.columns[0]].iloc[0]
endIndex = df2[df2.columns[0]].iloc[-1]
if (float(startIndex) - float(endIndex)) > 0:
direction = 'decreasing'
elif (float(startIndex) - float(endIndex)) < 0:
direction = 'increasing'
else:
direction = 'not included'
elif df2.columns[0].lower().find('time') != -1:
indexCurve = df2.columns[0]
indexType = 'date time'
startDateTimeIndex = df2[df2.columns[0]].iloc[0]
endDateTimeIndex = df2[df2.columns[0]].iloc[-1]
timedelta = (datetime.datetime.strptime(startDateTimeIndex, "%Y-%m-%dT%H:%M:%S.%f-05:00") -\
datetime.datetime.strptime(endDateTimeIndex, "%Y-%m-%dT%H:%M:%S.%f-05:00"))
if timedelta.days < 0 or timedelta.seconds < 0:
direction = 'increasing'
elif timedelta.days > 0 and timedelta.seconds > 0:
direction = 'decreasing'
else:
direction = 'not included'
# determine operation type - RIH/POOH
operation = CSVprocessing().operationDefine(index1, index2, df2)
session['operation'] = operation
# dataframe for file information
data = [['File Name', filename], ['File Type', type1], ['Index Type', indextype],
['Number of Curves', len(df2.columns)],
['Operation', operation], ['Data nodes', dataSize]]
df = pd.DataFrame(data=data, columns=['Parameter', 'Value'])
data1 = [['Well name', wellname], ['Wellbore name', wellborename], ['Business Unit', BU],
['Field', asset], ['Service Company', servicecompany], ['Run Number', runNumber],
['Creation Date', creationDate], ['Null Value', nullValue], ['Direction', direction]]
df1 = pd.DataFrame(data=data1, columns=['Parameter', 'Populated'])
template = APISupplementary().uploadedpage(index1, index2)
if request.method == 'POST':
df3 = | pd.DataFrame(form3.data['images']) | pandas.DataFrame |
from unittest.case import TestCase
from pandas import Series
from probability.utils import series_is_binary
class TestUtils(TestCase):
def setUp(self) -> None:
self.int_binary = Series(data=[0] * 4 + [1] * 6)
self.float_binary = self.int_binary.astype(float)
self.bool_binary = self.int_binary.astype(bool)
self.int_binary_zeros = | Series(data=[0] * 10) | pandas.Series |
"""
.. module:: trend
:synopsis: Trend Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin, ema, get_min_max
class AroonIndicator(IndicatorMixin):
"""Aroon Indicator
Identify when trends are likely to change direction.
Aroon Up = ((N - Days Since N-day High) / N) x 100
Aroon Down = ((N - Days Since N-day Low) / N) x 100
Aroon Indicator = Aroon Up - Aroon Down
https://www.investopedia.com/terms/a/aroon.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 25, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
rolling_close = self._close.rolling(self._n, min_periods=0)
self._aroon_up = rolling_close.apply(
lambda x: float(np.argmax(x) + 1) / self._n * 100, raw=True)
self._aroon_down = rolling_close.apply(
lambda x: float(np.argmin(x) + 1) / self._n * 100, raw=True)
def aroon_up(self) -> pd.Series:
"""Aroon Up Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_up = self._check_fillna(self._aroon_up, value=0)
return pd.Series(aroon_up, name=f'aroon_up_{self._n}')
def aroon_down(self) -> pd.Series:
"""Aroon Down Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_down = self._check_fillna(self._aroon_down, value=0)
return pd.Series(aroon_down, name=f'aroon_down_{self._n}')
def aroon_indicator(self) -> pd.Series:
"""Aroon Indicator
Returns:
pandas.Series: New feature generated.
"""
aroon_diff = self._aroon_up - self._aroon_down
aroon_diff = self._check_fillna(aroon_diff, value=0)
return pd.Series(aroon_diff, name=f'aroon_ind_{self._n}')
class MACD(IndicatorMixin):
"""Moving Average Convergence Divergence (MACD)
Is a trend-following momentum indicator that shows the relationship between
two moving averages of prices.
https://school.stockcharts.com/doku.php?id=technical_indicators:moving_average_convergence_divergence_macd
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
close: pd.Series,
n_slow: int = 26,
n_fast: int = 12,
n_sign: int = 9,
fillna: bool = False):
self._close = close
self._n_slow = n_slow
self._n_fast = n_fast
self._n_sign = n_sign
self._fillna = fillna
self._run()
def _run(self):
self._emafast = ema(self._close, self._n_fast, self._fillna)
self._emaslow = ema(self._close, self._n_slow, self._fillna)
self._macd = self._emafast - self._emaslow
self._macd_signal = ema(self._macd, self._n_sign, self._fillna)
self._macd_diff = self._macd - self._macd_signal
def macd(self) -> pd.Series:
"""MACD Line
Returns:
pandas.Series: New feature generated.
"""
macd = self._check_fillna(self._macd, value=0)
return pd.Series(macd, name=f'MACD_{self._n_fast}_{self._n_slow}')
def macd_signal(self) -> pd.Series:
"""Signal Line
Returns:
pandas.Series: New feature generated.
"""
macd_signal = self._check_fillna(self._macd_signal, value=0)
return pd.Series(macd_signal, name=f'MACD_sign_{self._n_fast}_{self._n_slow}')
def macd_diff(self) -> pd.Series:
"""MACD Histogram
Returns:
pandas.Series: New feature generated.
"""
macd_diff = self._check_fillna(self._macd_diff, value=0)
return pd.Series(macd_diff, name=f'MACD_diff_{self._n_fast}_{self._n_slow}')
class EMAIndicator(IndicatorMixin):
"""EMA - Exponential Moving Average
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 14, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
def ema_indicator(self) -> pd.Series:
"""Exponential Moving Average (EMA)
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(self._close, self._n, self._fillna)
return pd.Series(ema_, name=f'ema_{self._n}')
class TRIXIndicator(IndicatorMixin):
"""Trix (TRIX)
Shows the percent rate of change of a triple exponentially smoothed moving
average.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 15, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
ema1 = ema(self._close, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
ema3 = ema(ema2, self._n, self._fillna)
self._trix = (ema3 - ema3.shift(1, fill_value=ema3.mean())) / ema3.shift(1, fill_value=ema3.mean())
self._trix *= 100
def trix(self) -> pd.Series:
"""Trix (TRIX)
Returns:
pandas.Series: New feature generated.
"""
trix = self._check_fillna(self._trix, value=0)
return pd.Series(trix, name=f'trix_{self._n}')
class MassIndex(IndicatorMixin):
"""Mass Index (MI)
It uses the high-low range to identify trend reversals based on range
expansions. It identifies range bulges that can foreshadow a reversal of
the current trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:mass_index
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n(int): n low period.
n2(int): n high period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, n: int = 9, n2: int = 25, fillna: bool = False):
self._high = high
self._low = low
self._n = n
self._n2 = n2
self._fillna = fillna
self._run()
def _run(self):
amplitude = self._high - self._low
ema1 = ema(amplitude, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
mass = ema1 / ema2
self._mass = mass.rolling(self._n2, min_periods=0).sum()
def mass_index(self) -> pd.Series:
"""Mass Index (MI)
Returns:
pandas.Series: New feature generated.
"""
mass = self._check_fillna(self._mass, value=0)
return pd.Series(mass, name=f'mass_index_{self._n}_{self._n2}')
class IchimokuIndicator(IndicatorMixin):
"""Ichimoku Kinkō Hyō (Ichimoku)
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n1(int): n1 low period.
n2(int): n2 medium period.
n3(int): n3 high period.
visual(bool): if True, shift n2 values.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, n1: int = 9, n2: int = 26, n3: int = 52,
visual: bool = False, fillna: bool = False):
self._high = high
self._low = low
self._n1 = n1
self._n2 = n2
self._n3 = n3
self._visual = visual
self._fillna = fillna
def ichimoku_a(self) -> pd.Series:
"""Senkou Span A (Leading Span A)
Returns:
pandas.Series: New feature generated.
"""
conv = 0.5 * (self._high.rolling(self._n1, min_periods=0).max()
+ self._low.rolling(self._n1, min_periods=0).min())
base = 0.5 * (self._high.rolling(self._n2, min_periods=0).max()
+ self._low.rolling(self._n2, min_periods=0).min())
spana = 0.5 * (conv + base)
spana = spana.shift(self._n2, fill_value=spana.mean()) if self._visual else spana
spana = self._check_fillna(spana, value=-1)
return pd.Series(spana, name=f'ichimoku_a_{self._n1}_{self._n2}')
def ichimoku_b(self) -> pd.Series:
"""Senkou Span B (Leading Span B)
Returns:
pandas.Series: New feature generated.
"""
spanb = 0.5 * (self._high.rolling(self._n3, min_periods=0).max()
+ self._low.rolling(self._n3, min_periods=0).min())
spanb = spanb.shift(self._n2, fill_value=spanb.mean()) if self._visual else spanb
spanb = self._check_fillna(spanb, value=-1)
return pd.Series(spanb, name=f'ichimoku_b_{self._n1}_{self._n2}')
class KSTIndicator(IndicatorMixin):
"""KST Oscillator (KST Signal)
It is useful to identify major stock market cycle junctures because its
formula is weighed to be more greatly influenced by the longer and more
dominant time spans, in order to better reflect the primary swings of stock
market cycle.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst
Args:
close(pandas.Series): dataset 'Close' column.
r1(int): r1 period.
r2(int): r2 period.
r3(int): r3 period.
r4(int): r4 period.
n1(int): n1 smoothed period.
n2(int): n2 smoothed period.
n3(int): n3 smoothed period.
n4(int): n4 smoothed period.
nsig(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, r1: int = 10, r2: int = 15, r3: int = 20, r4: int = 30,
n1: int = 10, n2: int = 10, n3: int = 10, n4: int = 15, nsig: int = 9,
fillna: bool = False):
self._close = close
self._r1 = r1
self._r2 = r2
self._r3 = r3
self._r4 = r4
self._n1 = n1
self._n2 = n2
self._n3 = n3
self._n4 = n4
self._nsig = nsig
self._fillna = fillna
self._run()
def _run(self):
rocma1 = ((self._close - self._close.shift(self._r1, fill_value=self._close.mean()))
/ self._close.shift(self._r1, fill_value=self._close.mean())).rolling(self._n1, min_periods=0).mean()
rocma2 = ((self._close - self._close.shift(self._r2, fill_value=self._close.mean()))
/ self._close.shift(self._r2, fill_value=self._close.mean())).rolling(self._n2, min_periods=0).mean()
rocma3 = ((self._close - self._close.shift(self._r3, fill_value=self._close.mean()))
/ self._close.shift(self._r3, fill_value=self._close.mean())).rolling(self._n3, min_periods=0).mean()
rocma4 = ((self._close - self._close.shift(self._r4, fill_value=self._close.mean()))
/ self._close.shift(self._r4, fill_value=self._close.mean())).rolling(self._n4, min_periods=0).mean()
self._kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
self._kst_sig = self._kst.rolling(self._nsig, min_periods=0).mean()
def kst(self) -> pd.Series:
"""Know Sure Thing (KST)
Returns:
pandas.Series: New feature generated.
"""
kst = self._check_fillna(self._kst, value=0)
return pd.Series(kst, name='kst')
def kst_sig(self) -> pd.Series:
"""Signal Line Know Sure Thing (KST)
nsig-period SMA of KST
Returns:
pandas.Series: New feature generated.
"""
kst_sig = self._check_fillna(self._kst_sig, value=0)
return pd.Series(kst_sig, name='kst_sig')
def kst_diff(self) -> pd.Series:
"""Diff Know Sure Thing (KST)
KST - Signal_KST
Returns:
pandas.Series: New feature generated.
"""
kst_diff = self._kst - self._kst_sig
kst_diff = self._check_fillna(kst_diff, value=0)
return pd.Series(kst_diff, name='kst_diff')
class DPOIndicator(IndicatorMixin):
"""Detrended Price Oscillator (DPO)
Is an indicator designed to remove trend from price and make it easier to
identify cycles.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:detrended_price_osci
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 20, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
self._dpo = (self._close.shift(int((0.5 * self._n) + 1), fill_value=self._close.mean())
- self._close.rolling(self._n, min_periods=0).mean())
def dpo(self) -> pd.Series:
"""Detrended Price Oscillator (DPO)
Returns:
pandas.Series: New feature generated.
"""
dpo = self._check_fillna(self._dpo, value=0)
return pd.Series(dpo, name='dpo_'+str(self._n))
class CCIIndicator(IndicatorMixin):
"""Commodity Channel Index (CCI)
CCI measures the difference between a security's price change and its
average price change. High positive readings indicate that prices are well
above their average, which is a show of strength. Low negative readings
indicate that prices are well below their average, which is a show of
weakness.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
c(int): constant.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
n: int = 20,
c: float = 0.015,
fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._c = c
self._fillna = fillna
self._run()
def _run(self):
def _mad(x):
return np.mean(np.abs(x-np.mean(x)))
pp = (self._high + self._low + self._close) / 3.0
self._cci = ((pp - pp.rolling(self._n, min_periods=0).mean())
/ (self._c * pp.rolling(self._n, min_periods=0).apply(_mad, True)))
def cci(self) -> pd.Series:
"""Commodity Channel Index (CCI)
Returns:
pandas.Series: New feature generated.
"""
cci = self._check_fillna(self._cci, value=0)
return pd.Series(cci, name='cci')
class ADXIndicator(IndicatorMixin):
"""Average Directional Movement Index (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to
collectively as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
assert self._n != 0, "N may not be 0 and is %r" % n
cs = self._close.shift(1)
pdm = get_min_max(self._high, cs, 'max')
pdn = get_min_max(self._low, cs, 'min')
tr = pdm - pdn
self._trs_initial = np.zeros(self._n-1)
self._trs = np.zeros(len(self._close) - (self._n - 1))
self._trs[0] = tr.dropna()[0:self._n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(self._trs)-1):
self._trs[i] = self._trs[i-1] - (self._trs[i-1]/float(self._n)) + tr[self._n+i]
up = self._high - self._high.shift(1)
dn = self._low.shift(1) - self._low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
self._dip = np.zeros(len(self._close) - (self._n - 1))
self._dip[0] = pos.dropna()[0:self._n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(self._dip)-1):
self._dip[i] = self._dip[i-1] - (self._dip[i-1]/float(self._n)) + pos[self._n+i]
self._din = np.zeros(len(self._close) - (self._n - 1))
self._din[0] = neg.dropna()[0:self._n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(self._din)-1):
self._din[i] = self._din[i-1] - (self._din[i-1]/float(self._n)) + neg[self._n+i]
def adx(self) -> pd.Series:
"""Average Directional Index (ADX)
Returns:
pandas.Series: New feature generated.
"""
dip = np.zeros(len(self._trs))
for i in range(len(self._trs)):
dip[i] = 100 * (self._dip[i]/self._trs[i])
din = np.zeros(len(self._trs))
for i in range(len(self._trs)):
din[i] = 100 * (self._din[i]/self._trs[i])
dx = 100 * np.abs((dip - din) / (dip + din))
adx = np.zeros(len(self._trs))
adx[self._n] = dx[0:self._n].mean()
for i in range(self._n+1, len(adx)):
adx[i] = ((adx[i-1] * (self._n - 1)) + dx[i-1]) / float(self._n)
adx = np.concatenate((self._trs_initial, adx), axis=0)
self._adx = pd.Series(data=adx, index=self._close.index)
adx = self._check_fillna(self._adx, value=20)
return pd.Series(adx, name='adx')
def adx_pos(self) -> pd.Series:
"""Plus Directional Indicator (+DI)
Returns:
pandas.Series: New feature generated.
"""
dip = np.zeros(len(self._close))
for i in range(1, len(self._trs)-1):
dip[i+self._n] = 100 * (self._dip[i]/self._trs[i])
adx_pos = self._check_fillna(pd.Series(dip, index=self._close.index), value=20)
return pd.Series(adx_pos, name='adx_pos')
def adx_neg(self) -> pd.Series:
"""Minus Directional Indicator (-DI)
Returns:
pandas.Series: New feature generated.
"""
din = np.zeros(len(self._close))
for i in range(1, len(self._trs)-1):
din[i+self._n] = 100 * (self._din[i]/self._trs[i])
adx_neg = self._check_fillna( | pd.Series(din, index=self._close.index) | pandas.Series |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_featAcc = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_featAcc
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_perm = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_perm
def preProcessFeatSc():
dicKNN = json.loads(allParametersPerformancePerModel[5])
dfKNN = pd.DataFrame.from_dict(dicKNN)
return dfKNN
# remove that maybe!
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
for loop, elements in enumerate(values):
rowSum = elements*factors[loop] + rowSum
if sum(factors) == 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelColl):
if (index == 19):
metricsPerModelColl[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
models = KNNModels + SVCModels + GausNBModels + MLPModels + LRModels + LDAModels + QDAModels + RFModels + ExtraTModels + AdaBModels + GradBModels
return models
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2, random_state=RANDOM_SEED).fit_transform(data)
tsne.shape
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]
def InitializeEnsemble():
XModels = PreprocessingMetrics()
global ModelSpaceMDS
global ModelSpaceTSNE
global allParametersPerformancePerModel
global impDataInst
XModels = XModels.fillna(0)
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
ModelSpaceUMAP = FunUMAP(XModels)
PredictionProbSel = PreprocessingPred()
PredictionSpaceMDS = FunMDS(PredictionProbSel)
PredictionSpaceTSNE = FunTsne(PredictionProbSel)
PredictionSpaceTSNE = PredictionSpaceTSNE.tolist()
PredictionSpaceUMAP = FunUMAP(PredictionProbSel)
ModelsIDs = preProceModels()
impDataInst = processDataInstance(ModelsIDs,allParametersPerformancePerModel)
callPreResults()
key = 0
EnsembleModel(ModelsIDs, key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP)
def processDataInstance(ModelsIDs, allParametersPerformancePerModel):
dicKNN = json.loads(allParametersPerformancePerModel[8])
dicKNN = json.loads(dicKNN)
dicSVC = json.loads(allParametersPerformancePerModel[17])
dicSVC = json.loads(dicSVC)
dicGausNB = json.loads(allParametersPerformancePerModel[26])
dicGausNB = json.loads(dicGausNB)
dicMLP = json.loads(allParametersPerformancePerModel[35])
dicMLP = json.loads(dicMLP)
dicLR = json.loads(allParametersPerformancePerModel[44])
dicLR = json.loads(dicLR)
dicLDA = json.loads(allParametersPerformancePerModel[53])
dicLDA = json.loads(dicLDA)
dicQDA = json.loads(allParametersPerformancePerModel[62])
dicQDA = json.loads(dicQDA)
dicRF = json.loads(allParametersPerformancePerModel[71])
dicRF = json.loads(dicRF)
dicExtraT = json.loads(allParametersPerformancePerModel[80])
dicExtraT = json.loads(dicExtraT)
dicAdaB = json.loads(allParametersPerformancePerModel[89])
dicAdaB = json.loads(dicAdaB)
dicGradB = json.loads(allParametersPerformancePerModel[98])
dicGradB = json.loads(dicGradB)
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_connect = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
global yData
global filterActionFinal
global dataSpacePointsIDs
lengthDF = len(df_connect.columns)
if (filterActionFinal == 'compose'):
getList = []
for index, row in df_connect.iterrows():
yDataSelected = []
for column in row[dataSpacePointsIDs]:
yDataSelected.append(column)
storeMode = mode(yDataSelected)
getList.append(storeMode)
df_connect[str(lengthDF)] = getList
countCorrect = []
length = len(df_connect.index)
for index, element in enumerate(yData):
countTemp = 0
dfPart = df_connect[[str(index)]]
for indexdf, row in dfPart.iterrows():
if (int(row.values[0]) == int(element)):
countTemp += 1
countCorrect.append(1 - (countTemp/length))
return countCorrect
def ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP):
global Results
global AllTargets
Results = []
parametersGen = PreprocessingParam()
PerClassMetrics = preProcessPerClassM()
FeatureAccuracy = preProcessFeatAcc()
perm_imp_eli5PDCon = preProcessPerm()
featureScoresCon = preProcessFeatSc()
metricsPerModel = preProcMetricsAllAndSel()
sumPerClassifier = preProcsumPerMetric(factors)
ModelsIDs = preProceModels()
parametersGenPD = parametersGen.to_json(orient='records')
PerClassMetrics = PerClassMetrics.to_json(orient='records')
FeatureAccuracy = FeatureAccuracy.to_json(orient='records')
perm_imp_eli5PDCon = perm_imp_eli5PDCon.to_json(orient='records')
featureScoresCon = featureScoresCon.to_json(orient='records')
XDataJSONEntireSet = XData.to_json(orient='records')
XDataJSON = XData.columns.tolist()
Results.append(json.dumps(sumPerClassifier)) # Position: 0
Results.append(json.dumps(ModelSpaceMDS)) # Position: 1
Results.append(json.dumps(parametersGenPD)) # Position: 2
Results.append(PerClassMetrics) # Position: 3
Results.append(json.dumps(target_names)) # Position: 4
Results.append(FeatureAccuracy) # Position: 5
Results.append(json.dumps(XDataJSON)) # Position: 6
Results.append(0) # Position: 7
Results.append(json.dumps(PredictionSpaceMDS)) # Position: 8
Results.append(json.dumps(metricsPerModel)) # Position: 9
Results.append(perm_imp_eli5PDCon) # Position: 10
Results.append(featureScoresCon) # Position: 11
Results.append(json.dumps(ModelSpaceTSNE)) # Position: 12
Results.append(json.dumps(ModelsIDs)) # Position: 13
Results.append(json.dumps(XDataJSONEntireSet)) # Position: 14
Results.append(json.dumps(yData)) # Position: 15
Results.append(json.dumps(AllTargets)) # Position: 16
Results.append(json.dumps(ModelSpaceUMAP)) # Position: 17
Results.append(json.dumps(PredictionSpaceTSNE)) # Position: 18
Results.append(json.dumps(PredictionSpaceUMAP)) # Position: 19
return Results
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/PlotClassifiers', methods=["GET", "POST"])
def SendToPlot():
while (len(DataResultsRaw) != DataRawLength):
pass
InitializeEnsemble()
response = {
'OverviewResults': Results
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRemoveFromStack', methods=["GET", "POST"])
def RetrieveSelClassifiersIDandRemoveFromStack():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
PredictionProbSelUpdate = PreprocessingPredUpdate(ClassifierIDsList)
global resultsUpdatePredictionSpace
resultsUpdatePredictionSpace = []
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[0])) # Position: 0
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[1]))
key = 3
EnsembleModel(ClassifierIDsList, key)
return 'Everything Okay'
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/UpdatePredictionsSpace', methods=["GET", "POST"])
def SendPredBacktobeUpdated():
response = {
'UpdatePredictions': resultsUpdatePredictionSpace
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoin', methods=["GET", "POST"])
def RetrieveSelClassifiersID():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
#ComputeMetricsForSel(ClassifierIDsList)
ClassifierIDCleaned = json.loads(ClassifierIDsList)
global keySpecInternal
keySpecInternal = 1
keySpecInternal = ClassifierIDCleaned['keyNow']
EnsembleModel(ClassifierIDsList, 1)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoinLocally', methods=["GET", "POST"])
def RetrieveSelClassifiersIDLocally():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
ComputeMetricsForSel(ClassifierIDsList)
return 'Everything Okay'
def ComputeMetricsForSel(Models):
Models = json.loads(Models)
MetricsAlltoSel = PreprocessingMetrics()
listofModels = []
for loop in Models['ClassifiersList']:
listofModels.append(loop)
MetricsAlltoSel = MetricsAlltoSel.loc[listofModels,:]
global metricsPerModelCollSel
global factors
metricsPerModelCollSel = []
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['matthews_corrcoef'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_roc_auc_ovo_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelCollSel):
if (index == 19):
metricsPerModelCollSel[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelCollSel[index] = (1 - metric)*factors[index] * 100
else:
metricsPerModelCollSel[index] = metric*factors[index] * 100
metricsPerModelCollSel[index] = metricsPerModelCollSel[index].to_json()
return 'okay'
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/BarChartSelectedModels', methods=["GET", "POST"])
def SendToUpdateBarChart():
response = {
'SelectedMetricsForModels': metricsPerModelCollSel
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestDataPoint', methods=["GET", "POST"])
def RetrieveSelDataPoints():
DataPointsSel = request.get_data().decode('utf8').replace("'", '"')
DataPointsSelClear = json.loads(DataPointsSel)
listofDataPoints = []
for loop in DataPointsSelClear['DataPointsSel']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofDataPoints.append(temp[0])
global algorithmsList
global resultsMetrics
resultsMetrics = []
df_concatMetrics = []
metricsSelList = []
paramsListSepPD = []
paramsListSepPD = PreprocessingParamSep()
paramsListSeptoDicKNN = paramsListSepPD[0].to_dict(orient='list')
paramsListSeptoDicSVC = paramsListSepPD[1].to_dict(orient='list')
paramsListSeptoDicGausNB = paramsListSepPD[2].to_dict(orient='list')
paramsListSeptoDicMLP = paramsListSepPD[3].to_dict(orient='list')
paramsListSeptoDicLR = paramsListSepPD[4].to_dict(orient='list')
paramsListSeptoDicLDA = paramsListSepPD[5].to_dict(orient='list')
paramsListSeptoDicQDA = paramsListSepPD[6].to_dict(orient='list')
paramsListSeptoDicRF = paramsListSepPD[7].to_dict(orient='list')
paramsListSeptoDicExtraT = paramsListSepPD[8].to_dict(orient='list')
paramsListSeptoDicAdaB = paramsListSepPD[9].to_dict(orient='list')
paramsListSeptoDicGradB = paramsListSepPD[10].to_dict(orient='list')
RetrieveParamsCleared = {}
RetrieveParamsClearedListKNN = []
for key, value in paramsListSeptoDicKNN.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListKNN.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListSVC = []
for key, value in paramsListSeptoDicSVC.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListSVC.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGausNB = []
for key, value in paramsListSeptoDicGausNB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGausNB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListMLP = []
for key, value in paramsListSeptoDicMLP.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListMLP.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLR = []
for key, value in paramsListSeptoDicLR.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLR.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLDA = []
for key, value in paramsListSeptoDicLDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListQDA = []
for key, value in paramsListSeptoDicQDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListQDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListRF = []
for key, value in paramsListSeptoDicRF.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListRF.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListExtraT = []
for key, value in paramsListSeptoDicExtraT.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListExtraT.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListAdaB = []
for key, value in paramsListSeptoDicAdaB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListAdaB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGradB = []
for key, value in paramsListSeptoDicGradB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGradB.append(RetrieveParamsCleared)
if (len(paramsListSeptoDicKNN['n_neighbors']) == 0):
RetrieveParamsClearedListKNN = []
if (len(paramsListSeptoDicSVC['C']) == 0):
RetrieveParamsClearedListSVC = []
if (len(paramsListSeptoDicGausNB['var_smoothing']) == 0):
RetrieveParamsClearedListGausNB = []
if (len(paramsListSeptoDicMLP['alpha']) == 0):
RetrieveParamsClearedListMLP = []
if (len(paramsListSeptoDicLR['C']) == 0):
RetrieveParamsClearedListLR = []
if (len(paramsListSeptoDicLDA['shrinkage']) == 0):
RetrieveParamsClearedListLDA = []
if (len(paramsListSeptoDicQDA['reg_param']) == 0):
RetrieveParamsClearedListQDA = []
if (len(paramsListSeptoDicRF['n_estimators']) == 0):
RetrieveParamsClearedListRF = []
if (len(paramsListSeptoDicExtraT['n_estimators']) == 0):
RetrieveParamsClearedListExtraT = []
if (len(paramsListSeptoDicAdaB['n_estimators']) == 0):
RetrieveParamsClearedListAdaB = []
if (len(paramsListSeptoDicGradB['n_estimators']) == 0):
RetrieveParamsClearedListGradB = []
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = RetrieveParamsClearedListKNN
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = RetrieveParamsClearedListSVC
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = RetrieveParamsClearedListGausNB
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListMLP
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListLR
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = RetrieveParamsClearedListLDA
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = RetrieveParamsClearedListQDA
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListRF
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListExtraT
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListAdaB
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListGradB
AlgorithmsIDsEnd = GradBModelsCount
metricsSelList = GridSearchSel(clf, params, factors, AlgorithmsIDsEnd, listofDataPoints, crossValidation)
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[0], paramsListSepPD[0]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfKNNCleared = dfKNN
else:
dfKNNCleared = dfKNN.drop(dfKNN.index[set_diff_df])
dicSVC = json.loads(metricsSelList[1])
dfSVC = pd.DataFrame.from_dict(dicSVC)
parametersSelDataPD = parametersSelData[1].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[1], paramsListSepPD[1]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfSVCCleared = dfSVC
else:
dfSVCCleared = dfSVC.drop(dfSVC.index[set_diff_df])
dicGausNB = json.loads(metricsSelList[2])
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
parametersSelDataPD = parametersSelData[2].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[2], paramsListSepPD[2]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfGausNBCleared = dfGausNB
else:
dfGausNBCleared = dfGausNB.drop(dfGausNB.index[set_diff_df])
dicMLP = json.loads(metricsSelList[3])
dfMLP = pd.DataFrame.from_dict(dicMLP)
parametersSelDataPD = parametersSelData[3].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[3], paramsListSepPD[3]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfMLPCleared = dfMLP
else:
dfMLPCleared = dfMLP.drop(dfMLP.index[set_diff_df])
dicLR = json.loads(metricsSelList[4])
dfLR = pd.DataFrame.from_dict(dicLR)
parametersSelDataPD = parametersSelData[4].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[4], paramsListSepPD[4]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLRCleared = dfLR
else:
dfLRCleared = dfLR.drop(dfLR.index[set_diff_df])
dicLDA = json.loads(metricsSelList[5])
dfLDA = pd.DataFrame.from_dict(dicLDA)
parametersSelDataPD = parametersSelData[5].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[5], paramsListSepPD[5]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLDACleared = dfLDA
else:
dfLDACleared = dfLDA.drop(dfLDA.index[set_diff_df])
dicQDA = json.loads(metricsSelList[6])
dfQDA = pd.DataFrame.from_dict(dicQDA)
parametersSelDataPD = parametersSelData[6].apply(pd.Series)
set_diff_df = | pd.concat([parametersSelDataPD, paramsListSepPD[6], paramsListSepPD[6]]) | pandas.concat |
import time
import re
import pyautogui
import requests
import sys
import random
import pandas as pd
import imagehash
from selenium import webdriver
from bs4 import BeautifulSoup
from datetime import datetime
from PIL import Image
class MyLikes:
def __init__(self, url, driver_path, records_path) -> None:
self.url = url # URL for selenium
self.incrementer = 0 # Variable to replace a count within a for loop for `main`
self.card_identifier = dict() # Unique identifier for a profile card
self.picture_count = 0 # This helps to identify the profile card we're on and is also used in the filenames
self.records = list() # Storing the data to be written to an Excel workbook
self.records_path = records_path # Path to save the Excel workbook
self.now = datetime.utcnow() # Store the start time, in GMT, of the script in a variable
self.url_regEx = re.compile(pattern=r'url\(\"(https://.+\.jpg)\"')
self.card_identifier_regEx = re.compile(pattern=r'https://images-ssl.gotinder.com/(.+)/\d{3}x')
self.options = webdriver.ChromeOptions() # Standard for using Chrome with selenium
self.options.add_experimental_option('debuggerAddress', 'localhost:9222') # Running Chrome on localhost
self.driver = webdriver.Chrome(executable_path=driver_path, options=self.options) # Standard for using Chrome with selenium
self.headers = { # Headers for our requests. These are very important. Without them, we can get timed out or banned.
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',
'accept-language': 'en-US,en;q=0.9',
'referer': 'https://tinder.com/',
'dnt': '1'
}
def __repr__(self) -> str:
return 'This script is intended to download all of the pictures and videos from the profiles on the "Likes Sent" section of Tinder.'
def log_in(self) -> None:
# Open the URL in Chrome
self.driver.get(url=self.url)
time.sleep(4)
# Click the Likes Sent button
self.driver.find_element_by_xpath(xpath='//a[@href="/app/my-likes"]').click() # Selecting by the href of an anchor (i.e., 'a') tag
time.sleep(3)
def main(self) -> None:
# while 1:
for _ in range(1):
time.sleep(3)
# Get the current page's HTML
final_html = self.driver.page_source
# Create a soup object
self.soup = BeautifulSoup(final_html, 'html.parser')
# Find all profile cards within the current HTML
cards = self.soup.find_all('div', {'aria-label': re.compile(pattern=r'.*'), 'class': 'Bdrs(8px) Bgz(cv) Bgp(c) StretchedBox'})
# Find the div's id for the div that holds the profile cards. This is important because Tinder frequently changes this id
div_id = self.soup.find('div', {'class': 'likesYou__scroller Sb(s) D(f) Jc(c) Fxd(c) Animtf(l) Animfm(f) Animdur(.75s) Ovy(s) Ovsb(n) Ovs(touch)'})['id']
# Iterate over the profile cards
for card in cards:
card_identifier = re.search(pattern=self.card_identifier_regEx, string=str(card)).group(1)
if self.card_identifier.get(card_identifier) is not None:
continue # Since the profile card ID is in the dictionary, skip this card and go to the next card
else: # Since we haven't gathered the profile data, gather now
# Click in the background
pyautogui.moveTo(x=1850, y=350, duration=0.1)
pyautogui.click()
# Add the card ID to the dictionary
self.card_identifier.setdefault(card_identifier, 0)
# Increment the picture count
self.picture_count += 1
# Click the relevant profile card
if self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div/span/div') is not None: # Tinder may change the div the xpath relates to. I can probably write a regular expression to account for this, but I manually updated this one.
try:
self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div/span/div').click()
except Exception as e:
self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div/span/div[2]/video').click()
elif self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div') is not None:
self.driver.find_element_by_xpath(xpath=f'//*[@id="{div_id}"]/div[2]/div[{self.picture_count}]/div/div').click()
else:
# Finish the script by writing the data to a dataframe then an Excel workbook. Finally, call `sys.exit()`
sys.exit('The script is complete. There are no more profile cards to go through.')
time.sleep(1)
# Get HTML of the profile card
profile_html = self.driver.page_source
second_soup = BeautifulSoup(profile_html, 'html.parser')
name = second_soup.find('h1', {'class': 'Fz($xl) Fw($bold) Fxs(1) Fxw(w) Pend(8px) M(0) D(i)'}).text.title()
# Get the total number of pages in the profile card
try:
number_of_pages = int(second_soup.find('button', {'class': 'bullet D(ib) Va(m) Cnt($blank)::a D(b)::a Cur(p) bullet--active H(4px)::a W(100%)::a Py(4px) Px(2px) W(100%) Bdrs(100px)::a Bgc(#fff)::a focus-background-style'}).text.split('/')[1])
except Exception:
number_of_pages = 1 # If there's only one page, there won't be a button.
# Iterate over the number of pages
for i in range(0, number_of_pages, 1):
time.sleep(1)
page_html = self.driver.page_source
page_soup = BeautifulSoup(page_html, 'html.parser')
current_card = page_soup.find('span', {'class': 'keen-slider__slide Wc($transform) Fxg(1)', 'aria-hidden': 'false', 'style': re.compile(pattern=r'.+')})
vid = current_card.find('video', {'class': 'W(100%)'})
# Find appropriate URL
if vid:
vid = vid['src']
download_url = vid
else:
download_url = re.search(pattern=self.url_regEx, string=str(current_card)).group(1)
# Send GET request
r = requests.get(url=download_url, headers=self.headers)
# Content Type (i.e., image or video) and Last-Modified
content_type, res_last_mod = r.headers['Content-Type'], r.headers['Last-Modified']
res_last_mod = self.to_datetime_obj(date_str=res_last_mod)
time_diff = ':'.join(str(self.now - res_last_mod).split(':')[:2])
# Write picture/video to disk
with open(file=f'./tinder_pics/{self.picture_count}_{name}_{i+1}.{download_url[-3:]}', mode='wb') as file:
file.write(r.content)
# If the content is an image, create a hash
if download_url[-3:] == 'jpg':
hash = imagehash.average_hash(image=Image.open(fp=f'./tinder_pics/{self.picture_count}_{name}_{i+1}.{download_url[-3:]}'))
# Append data to list
self.records.append((name, card_identifier, content_type, res_last_mod, self.now, time_diff, hash))
# Resetting hash. This can be handled in a better way.
hash = ''
# Check if we need to click to go to the next page
if i != (number_of_pages - 1):
pyautogui.moveTo(x=1250, y=400, duration=0.1)
pyautogui.click()
time.sleep(1)
else:
continue
# Click off the profile card
pyautogui.moveTo(x=1850, y=350, duration=0.1)
pyautogui.click()
time.sleep(1)
# Move down the webpage
if self.incrementer == 0:
pyautogui.moveTo(x=1850, y=350, duration=0.5)
time.sleep(1)
print(f'Run number: {self.incrementer} | {pyautogui.position()}')
pyautogui.scroll(clicks=-2000)
time.sleep(2.5)
pyautogui.scroll(clicks=-280)
time.sleep(1)
self.incrementer += 1
else:
print(f'Run number: {self.incrementer} | {pyautogui.position()}')
time.sleep(random.randint(2, 3))
pyautogui.scroll(clicks=-755)
time.sleep(random.randint(2, 3))
self.incrementer += 1
def to_datetime_obj(self, date_str) -> datetime.strptime:
return datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %Z')
def pandas_to_excel(self) -> None:
self.df = | pd.DataFrame(data=self.records, columns=['Name', 'Card_ID', 'Type', 'Res_Last_Mod', 'Current_Date', 'Time_Diff', 'Hash']) | pandas.DataFrame |
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import itertools
import numpy as np
import pandas as pd
import xgboost as xgb
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from sklearn.model_selection import GridSearchCV, train_test_split
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
def eval_models(models, data):
"""Calculates the root mean squared error (RMSE) and the coefficient of
determination (R^2) for each of the models.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:return: Returns a DataFrame containing the evaluation metric results
:rtype: pandas.DataFrame
"""
evals = []
for target_col, reg in models.items():
y_hat = reg.predict(data['X_test'])
y = data['y_test'][target_col]
rmse = metrics.mean_squared_error(y, y_hat, squared=False)
r2 = metrics.r2_score(y, y_hat)
eval_dict = {'Error': target_col, 'RMSE': rmse, 'R^2': r2}
evals.append(eval_dict)
return | pd.DataFrame(evals) | pandas.DataFrame |
from util.utils import load_classes, prep_image, display
from net import DarkNet
import argparse
import os
import time
import random
import pandas as pd
import torch
import cv2 as cv
import pickle as pkl
def parse():
p = argparse.ArgumentParser(description = "YOLOv3 Detection")
p.add_argument("--images", dest = "images", help = "Directory containing images for detection",
default = "images", type = str)
p.add_argument("--output", dest = "output", help = "Output directory",
default = "output", type = str)
p.add_argument("--bs", dest = "bs", help = "Batch size", default = 1)
p.add_argument("--conf", dest = "conf", help = "Confidence, to help filter prediction", default = 0.5)
p.add_argument("--nms", dest = "nms", help = "NMS Threshold", default = 0.4)
p.add_argument("--cfg", dest = "cfg", help = "Config file path for model",
default = "cfgs/yolov3.cfg", type = str)
p.add_argument("--w", dest = "w", help = "Weights file path for model",
default = "weights/yolov3.weights", type = str)
return p.parse_args()
args = parse()
print("in")
images = args.images
bs = int(args.bs)
confidence = float(args.conf)
nms_threshold = float(args.nms)
start = 0
num_classes = 80
classes = load_classes("data/coco.names")
print("Loading network")
model = DarkNet(args.cfg)
print("Model initiated\n")
model.load_weights(args.w)
print("Weights loaded\n")
model.eval()
try:
im = [os.path.join(os.path.realpath('.'), images, img) for img in os.listdir(images)]
except NotADirectoryError:
im = []
im.append(os.path.join(os.path.realpath('.'), images))
except FileNotFoundError:
print("No such directory with the name {images}")
exit()
if not os.path.exists(args.output):
os.mkdir(args.output)
load_images = [cv.imread(img) for img in im]
im_batches = list(map(prep_image, load_images, [608 for x in range(len(im))]))
im_dim_list = [(x.shape[1], x.shape[0]) for x in load_images]
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
leftover = 0
if (len(im_dim_list) % bs):
leftover = 1
if bs != 1:
num_batches = len(im) // bs + leftover
im_batches = [torch.cat((im_batches[i* bs : min((i + 1)* bs, \
len(im_batches))])) for i in range(num_batches)]
write = 0
for idx, batch in enumerate(im_batches):
start = time.time()
with torch.no_grad():
pred = model(torch.Tensor(batch))
pred = display(pred, confidence, num_classes, nms_threshold)
end = time.time()
if type(pred) == int:
for im_num, image in enumerate(im[i* bs: min((idx + 1)* bs, len(im))]):
im_id = idx * bs + im_num
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/ bs))
print("{0:20s} {1:s}".format("Objects Detected:", ""))
print("----------------------------------------------------------")
continue
pred[:,0] += idx * bs
if not write:
output = pred
write = 1
else:
output = torch.cat((output, pred))
for im_num, image in enumerate(im[idx * bs: min((idx + 1)* bs, len(im))]):
im_id = idx * bs + im_num
objs = [classes[int(x[-1])] for x in output if int(x[0]) == im_id]
# print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/bs))
# print("{0:20s} {1:s}".format("Objects Detected:", " ".join(objs)))
print("----------------------------------------------------------")
# try:
# output
# except NameError:
# print ("No detections were made")
# exit()
im_dim_list = torch.index_select(im_dim_list, 0, output[:,0].long())
scaling_factor = torch.min(608/im_dim_list,1)[0].view(-1,1)
output[:,[1,3]] -= (608 - scaling_factor*im_dim_list[:,0].view(-1,1))/2
output[:,[2,4]] -= (608 - scaling_factor*im_dim_list[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim_list[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim_list[i,1])
colors = pkl.load(open("pallete", "rb"))
def out(x, results):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = results[int(x[0])]
cls = int(x[-1])
color = random.choice(colors)
label = "{0}".format(classes[cls])
cv.rectangle(img, c1, c2,color, 1)
t_size = cv.getTextSize(label, cv.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv.rectangle(img, c1, c2,color, -1)
cv.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1)
return img
list(map(lambda x: out(x, load_images), output))
det_names = | pd.Series(im) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 1 11:34:39 2018
@author: MaggieYC_Pang
"""
import sys
sys.path.append("../")
from mongodb_api import mongodb_api
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def moving_func(ip_list, step, func=np.mean, arg=None):
op_list=[]
i=0
for data in ip_list[step:]:
op_list.append(func(ip_list[i:i+step], arg))
i=i+1
return op_list
class wifi_diag_api:
def __init__(self):
label_list = []
label_index_dict = {}
topicdata_dict = {}
# ============================== ML OUTPUT ===============================
label_list.append({"Name":"Delay", "Topic":"Ping", "MLType":"Out", "Process":[np.mean, np.std, len]})
label_list.append({"Name":"Tput", "Topic":"Iperf", "MLType":"Out", "Process":[np.mean]})
label_list.append({"Name":"Jitter", "Topic":"Iperf", "MLType":"Out", "Process":[np.mean]})
label_list.append({"Name":"Loss", "Topic":"Iperf", "MLType":"Out", "Process":[np.mean]})
label_list.append({"Name":"Tx_bitrate", "Topic":"Stationinfo", "MLType":"Out"})
label_list.append({"Name":"Rx_bitrate", "Topic":"Stationinfo", "MLType":"Out"})
label_list.append({"Name":"Signal", "Topic":"Stationinfo", "MLType":"Out"})
label_list.append({"Name":"FER", "Topic":"Stationinfo", "MLType":"Out"})
# ============================== ML INPUT ===============================
label_list.append({"Name":"SS_Sigval", "Topic":"Spectralscan", "MLType":"In", "Process":[np.array]})
label_list.append({"Name":"SS_Sigval_Std", "Topic":"Spectralscan", "MLType":"In", "Process":[np.array]})
label_list.append({"Name":"SS_Portion", "Topic":"Spectralscan", "MLType":"In", "Process":[np.array]})
label_list.append({"Name":"SS_Count", "Topic":"Spectralscan", "MLType":"In", "Process":[np.sum]})
label_list.append({"Name":"SS_Rssi", "Topic":"Spectralscan", "MLType":"In", "Process":[np.mean]})
label_list.append({"Name":"SS_Noise", "Topic":"Spectralscan", "MLType":"In", "Process":[np.mean]})
label_list.append({"Name":"Busy", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"Noise", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"Rcv", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"Tx", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"FCSError", "Topic":"Statistics", "MLType":"In"})
ERR_list = ["CRC-ERR", "LENGTH-ERR", "PHY-ERR", "SPECTRAL"] # USEFUL
# ERR_list = ["CRC-ERR", "DECRYPT-BUSY-ERR", "DECRYPT-CRC-ERR", "LENGTH-ERR", "MIC-ERR", "OOM-ERR", "PHY-ERR", "POST-DELIM-CRC-ERR", "PRE-DELIM-CRC-ERR", "RATE-ERR", "SPECTRAL"]
for data in ERR_list:
label_list.append({"Name":data, "Topic":"ath9kERR", "MLType":"In"})
# ERR_list = ["chan_idle_dur", "chan_idle_dur_valid", "dcu_arb_state", "dcu_complete_state", "dcu_fp_state",
# "qcu_complete_state", "qcu_fetch_state", "qcu_stitch_state",
# "txfifo_dcu_num_0", "txfifo_dcu_num_1", "txfifo_valid_0", "txfifo_valid_1"]
ERR_list = ["chan_idle_dur", "chan_idle_dur_valid"] #USEFUL
for data in ERR_list:
label_list.append({"Name":data, "Topic":"ath9kDMA", "MLType":"In"})
# ERR_list = ["ANI_RESET", "CCK_ERRORS", "CCK_LEVEL", "FIR-STEP_DOWN", "FIR-STEP_UP", "INV_LISTENTIME", "MRC-CCK_OFF", "MRC-CCK_ON",
# "OFDM_ERRORS", "OFDM_LEVEL", "OFDM_WS-DET_OFF", "OFDM_WS-DET_ON", "SPUR_DOWN", "SPUR_UP"]
ERR_list = ["CCK_ERRORS", "OFDM_ERRORS", "SPUR_DOWN", "SPUR_UP"] #USEFUL
for data in ERR_list:
label_list.append({"Name":data, "Topic":"ath9kANI", "MLType":"In"})
# ============================== END ===============================
for labeldata in label_list:
label_index_dict[labeldata["Name"]] = label_list.index(labeldata)
label_index_dict[label_list.index(labeldata)] = labeldata["Name"]
if(labeldata["Topic"] not in topicdata_dict):
topicdata_dict[labeldata["Topic"]]=[]
if("Process" not in labeldata):
topicdata_dict[labeldata["Topic"]].append([labeldata["Name"], "single"])
else:
topicdata_dict[labeldata["Topic"]].append([labeldata["Name"], "list"])
# =========================================================================================================
process_name_dict={}
process_name_dict[np.mean] = "mean"
process_name_dict[np.std] = "std"
process_name_dict[np.sum] = "sum"
process_name_dict[np.array] = "array"
process_name_dict[len] = "len"
self.label_list = label_list
self.process_name_dict = process_name_dict
self.label_index_dict = label_index_dict
self.topicdata_dict = topicdata_dict
def GetDataList(self, dev, found_data, name, proc):
retlist = []
for data in found_data:
target = data[dev]
if(name not in target):
retlist.append(-1)
else:
if(proc==None):
retlist.append(target[name])
else:
retlist.append(proc(target[name]))
return retlist
def plot_all(self, mdb):
print("collection = " + mdb.get_full_name())
found_data = mdb.find(key_value = {}, ftype='many')
print("len(found_data) = " + str(len(found_data)))
ML_data_AP = {}
ML_data_STA = {}
for labeldata in self.label_list:
if(labeldata["Name"] not in found_data[0]["AP"]):
continue
if("Process" not in labeldata):
ML_data_AP[labeldata["Name"]] = self.GetDataList("AP", found_data, labeldata["Name"], None)
else:
for proc in labeldata["Process"]:
ML_data_AP[labeldata["Name"] + '_' + self.process_name_dict[proc]] = self.GetDataList("AP", found_data, labeldata["Name"], proc)
if("Process" not in labeldata):
ML_data_STA[labeldata["Name"]] = self.GetDataList("STA", found_data, labeldata["Name"], None)
else:
for proc in labeldata["Process"]:
ML_data_STA[labeldata["Name"] + '_' + self.process_name_dict[proc]] = self.GetDataList("STA", found_data, labeldata["Name"], proc)
for pkey in ML_data_AP:
if("array" in pkey):
continue
plt.plot(moving_func(ML_data_AP[pkey],10), 'b.')
plt.plot(moving_func(ML_data_STA[pkey],10), 'g.')
plt.show()
print("pkey: " + pkey)
APdf = | pd.DataFrame(ML_data_AP) | pandas.DataFrame |
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from code.models.dataset import Dataset
def apply_specific_dataset_processing(dataset: Dataset):
dataset.create_raw_transformed_dataset()
def train_test_split_and_save(dataset: Dataset):
raw_dataset = dataset.get_raw_transformed_dataset()
X = raw_dataset.loc[:, raw_dataset.columns != dataset.target_class.name]
y = raw_dataset[dataset.target_class.name]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=dataset.test_size, random_state=dataset.seed)
raw_train_dataset = pd.concat((X_train, pd.Series(y_train).rename(dataset.target_class.name)), axis=1)
raw_test_dataset = pd.concat((X_test, pd.Series(y_test).rename(dataset.target_class.name)), axis=1)
save_dataset(raw_train_dataset, dataset.get_raw_train_dataset_filename())
save_dataset(raw_test_dataset, dataset.get_raw_test_dataset_filename())
return X_train, X_test, y_train, y_test
def create_preprocessed_train_and_test_datasets(dataset: Dataset,
raw_train_dataset_X,
raw_test_dataset_X,
raw_train_dataset_y,
raw_test_dataset_y):
features = dataset.features
train_dataset = pd.DataFrame.copy(raw_train_dataset_X, deep=True)
train_dataset = pd.concat((train_dataset, pd.Series(raw_train_dataset_y).rename(dataset.target_class.name)), axis=1)
test_dataset = pd.DataFrame.copy(raw_test_dataset_X, deep=True)
test_dataset = pd.concat((test_dataset, pd.Series(raw_test_dataset_y).rename(dataset.target_class.name)), axis=1)
# Reset Encoding Mapping
dataset.reset_encoding_mapping()
for feature in features:
feature_name = feature.name
feature_values_raw_train = train_dataset[feature_name]
feature_values_raw_test = test_dataset[feature_name]
# Encode and Save Encoding Mapping
feature_values_train, feature_values_test = \
feature.feature_type.encode(dataset, feature_name, feature_values_raw_train, feature_values_raw_test)
train_dataset[feature_name] = feature_values_train
test_dataset[feature_name] = feature_values_test
# Standardize
features_to_standardize = [f.name for f in features if f.should_standardize]
train_dataset[features_to_standardize], test_dataset[features_to_standardize] \
= standardize(train_dataset[features_to_standardize], test_dataset[features_to_standardize])
save_dataset(train_dataset, dataset.get_train_dataset_filename())
save_dataset(test_dataset, dataset.get_test_dataset_filename())
def standardize(train_dataset: pd.DataFrame,
test_dataset: pd.DataFrame) -> [pd.DataFrame, pd.DataFrame]:
scaler = StandardScaler().fit(train_dataset)
train_dataset_scaled = scaler.transform(train_dataset)
test_dataset_scaled = scaler.transform(test_dataset)
train_dataset_temp = | pd.DataFrame(train_dataset_scaled, columns=train_dataset.columns, index=train_dataset.index) | pandas.DataFrame |
# Originally created: 02nd October, 2021
# <NAME>
# Last amended: 09th Oct, 2021
# Myfolder: 1/home/ashok/Documents/churnapp
# VM: lubuntu_healthcare
# Ref: https://builtin.com/machine-learning/streamlit-tutorial
#
# Objective:
# Deploy an ML model on web
#
########################
# Notes:
# 1, Run this app in its folder, as:
# cd /home/ashok/Documents/churnapp
# streamlit run churn-app.py
# 2. Accomanying file to experiment is
# expt.py
########################
# 1.0 Call libraries
# Install as: pip install streamlit
# Better create a separate conda environment for it
import streamlit as st
import pandas as pd
import numpy as np
import pickle
import base64
#import seaborn as sns
#import matplotlib.pyplot as plt
# 1.1 Set pandas options. None means no truncation
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Write some body-text on the Web-Page:
st.write("""
# Churn Prediction App
Customer churn is defined as the loss of customers after a certain period of time.
Companies are interested in targeting customers who are likely to churn. They can
target these customers with special deals and promotions to influence them to stay
with the company.
This app predicts the probability of a customer churning using Telco Customer data. Here
customer churn means the customer does not make another purchase after a period of time.
""")
# 2.0 Read data from current folder
# Default folder is where streamlit
# is being run. So this file
# should be in /home/ashok/Documents/churnapp
# Else, home folder is the default.
df_selected = pd.read_csv("telco_churn.csv")
# 2.1 We will select only a few columns
# for our model:
cols = ['gender', 'Partner', 'Dependents', 'PhoneService', 'tenure', 'MonthlyCharges', 'Churn']
df_selected_all = df_selected[cols].copy()
# 3.0 We will create a file download link
# in our webapp
# What is base64?
# See: https://levelup.gitconnected.com/what-is-base64-encoding-4b5ed1eb58a4
def filedownload(df):
csv = df.to_csv(index=False) # csv is now a string
csv = csv.encode() # csv is b' string or bytes
b64 = base64.b64encode(csv) # b64 is base64 encoded binary
b64 = b64.decode() # b64 is decoded to one of 64 characters
# 3.1 Create an html link to download datafile
# See here: https://stackoverflow.com/a/14011075
href = f'<a href="data:file/csv;base64,{b64}" download="churn_data.csv">Download CSV File</a>'
# 3.2 Return href object
return href
#st.set_option('deprecation.showPyplotGlobalUse', False)
# 3.3 Finally display the href link
href = filedownload(df_selected_all)
st.markdown(href, unsafe_allow_html=True)
# 4.0 Create a component to upload data file in the sidebar.
# 'uploaded_file' is a pandas dataframe
uploaded_file = st.sidebar.file_uploader(
"Upload your input CSV file",
type=["csv"]
)
# 4.1 Read data from file. Else, read from widgets
if uploaded_file is not None:
# 4.2 Read the uploaded file
input_df = pd.read_csv(uploaded_file)
else:
# 4.3 Define a function to create data from widgets
def user_input_features():
# 4.4 Create four widgets
gender = st.sidebar.selectbox('gender',('Male','Female'))
PaymentMethod = st.sidebar.selectbox('PaymentMethod',('Bank transfer (automatic)', 'Credit card (automatic)', 'Mailed check', 'Electronic check'))
MonthlyCharges = st.sidebar.slider('Monthly Charges', 18.0,118.0, 18.0)
tenure = st.sidebar.slider('tenure', 0.0,72.0, 0.0)
# 4.5 Collect widget output in a dictionary
data = {
'gender': [gender], # Should be a list data structure
'PaymentMethod': [PaymentMethod],
'MonthlyCharges':[MonthlyCharges],
'tenure': [tenure]
}
# 4,6 Transform data to DataFrame
features = pd.DataFrame(data)
# 4.7 Return dataframe of features
return features
# 4.8 Call the function and get a 1-row DataFrame
input_df = user_input_features()
# 5.0 To fill NA values, we may import
# our original DataFrame
churn_raw = | pd.read_csv('telco_churn.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 09:08:54 2021
根据research_field_0304v2.csv
对一个公司-->所在行业的所有公司研究领域求交集
"""
import csv
import pandas as pd
#先把一家公司所有年份研究领域合并
def get_field_list(orgpath,outpath):
orgdf = | pd.read_csv(orgpath,dtype=str) | pandas.read_csv |
# USDA_CoA_Cropland.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
import json
import numpy as np
import pandas as pd
from flowsa.common import *
def CoA_Cropland_URL_helper(build_url, config, args):
"""This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for coa cropland data
that requires parts of the url text string to be replaced with info specific to the usda nass quickstats API.
This function does not parse the data, only modifies the urls from which data is obtained. """
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
for y in config['sector_levels']:
# at national level, remove the text string calling for state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("&state_alpha=__stateAlpha__", "")
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA")
else:
url = url.replace("&commmodity_desc=AG LAND", "")
url = url.replace(" ", "%20")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("__stateAlpha__", z)
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA")
else:
url = url.replace("&commmodity_desc=AG LAND", "")
url = url.replace(" ", "%20")
urls.append(url)
return urls
def coa_cropland_call(url, coa_response, args):
cropland_json = json.loads(coa_response.text)
df_cropland = | pd.DataFrame(data=cropland_json["data"]) | pandas.DataFrame |
#!/usr/bin/python3
# import the module
import os
import glob
import pandas as pd
import csv
from sqlalchemy import create_engine
import psycopg2
import config #you need to create this config.py file and update the variables with your database, username and password
import subprocess
import sys
#Note: you need to indicate which directory (e.g. path/to/pur1997) in argv[1]
# Get a database connection
conn_string = "host="+config.HOST+" dbname="+config.DB+" user="+config.username+" password="+config.password
# Get a database connection
conn = psycopg2.connect(conn_string)
# Create a cursor object. Allows us to execute the SQL query
cursor = conn.cursor()
def load_data(schema, table):
sql_command = "SELECT * FROM {}.{};".format(str(schema), str(table))
# Load the data
data = pd.read_sql(sql_command, conn)
return (data)
# Download data that is already uploaded (find where you left off)
chem_df = load_data(config.dpr_schema, config.use_data)
# make lists for filtering the data
already_in = list(chem_df['use_no'])
chem_list = list(pd.read_csv("/home/bmain/pesticide/chem_com.csv")["chem_code"])
prod = list(pd.read_csv("/home/bmain/pesticide/product_prodno.csv")["prodno"])
# set up new DB connection for alchemy
engine = create_engine('postgresql://{user}:{pw}@{site}:5432/{db}'.format(user=config.username, pw=config.password, site=config.HOST, db=config.DB))
# make error file
error = open("error_rows_prodo.csv", 'w') # these typically occur when there is misformated data
weird_comtrs = open("comtrs_error_rows.csv", "w")
def upload_by_row(pur_directory):
files = glob.glob("%s/udc*" % (pur_directory))
for udc in files:
df = | pd.read_csv(udc,low_memory=False, dtype={'range': str, 'section':str, 'township':str, 'comtrs':str}) | pandas.read_csv |
import os
from io import StringIO
import pandas as pd
import requests
import tqdm
from cellphonedb.src.app.app_logger import app_logger
from cellphonedb.src.app.cellphonedb_app import data_dir
from cellphonedb.tools.tools_helper import add_to_meta
def call(genes: pd.DataFrame, downloads_path: str, fetch: bool, save_backup: bool = True) -> pd.DataFrame:
proteins = genes['uniprot'].tolist()
sources = [
{
'name': 'InnateDB',
'base_url': 'https://psicquic.curated.innatedb.com/webservices/current/search/query/species:human',
'query_parameters': False,
},
{
'name': 'InnateDB-All',
'base_url': 'https://psicquic.all.innatedb.com/webservices/current/search/query/species:human',
'query_parameters': False,
},
{
'name': 'IMEx',
'base_url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/imex/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'IntAct',
'base_url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/intact/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'bhf-ucl',
'base_url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/bhf-ucl/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'MatrixDB',
'base_url': 'http://matrixdb.univ-lyon1.fr:8080/psicquic/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'MINT',
'base_url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/mint/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'I2D',
'base_url': 'http://ophid.utoronto.ca/psicquic-ws/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'UniProt',
'base_url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/uniprot/webservices/current/search/query/{}',
'query_parameters': True,
},
{
'name': 'MBInfo',
'base_url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/mbinfo/webservices/current/search/query/{}',
'query_parameters': True,
},
# 'DIP': 'http://imex.mbi.ucla.edu/xpsq-dip-all/service/rest/current/search/query/{}',
]
significant_columns = ['A', 'B', 'altA', 'altB', 'provider']
carry = | pd.DataFrame(columns=significant_columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 18:53:50 2018
@author: kazuki.onodera
"""
import gc, os
from tqdm import tqdm
import pandas as pd
import numpy as np
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count, Pool
from glob import glob
#import count
import utils, utils_best
#utils.start(__file__)
#==============================================================================
SEED = 71
#new_feature = 'f110'
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
'seed': SEED
}
loader805 = utils_best.Loader('CV805_LB803')
loader804 = utils_best.Loader('LB804')
# =============================================================================
# load
# =============================================================================
X_805 = loader805.train()
X_804 = loader804.train()
col = X_804.columns.difference(X_805.columns)
X = | pd.concat([X_805, X_804[col]], axis=1) | pandas.concat |
#!/usr/bin/env python
import numpy as np
import shutil
import urllib
import urlparse
import os
from core import *
import util
from pprint import pprint
import pandas as pd
class PaperDownload(XmlClass):
def __init__(self, xe=None):
XmlClass.__init__(self,xe=xe)
self.dest = xe.attrib['dest']
self.s_file_obo = os.path.join(SyncDB.DOWNLOAD_DIR(),'hp.obo')
self.s_file_gene2hpo = os.path.join(SyncDB.DOWNLOAD_DIR(),'genes_to_phenotype.txt')
self.fn_hpo_ann = os.path.join(SyncDB.DOWNLOAD_DIR(),'hpo_ann.csv')
self.fn_trrust_rawdata_human = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_rawdata.human.tsv')
self.fn_trrust_rawdata_mouse = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_rawdata.mouse.tsv')
self.fn_DisGeNET_source = os.path.join(SyncDB.DOWNLOAD_DIR(), 'curated_gene_disease_associations.tsv')
self.fn_DisGeNET_ann = os.path.join(SyncDB.DOWNLOAD_DIR(), 'disgenet_ann.csv')
self.fn_trrust_human_term = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_human.csv')
self.fn_trrust_mouse_term = os.path.join(SyncDB.DOWNLOAD_DIR(),'trrust_mouse.csv')
self.fn_symbol = os.path.join(SyncDB.UPLOAD_DIR(),'gid2source_id','symbol.csv')
self.fn_synonym = os.path.join(SyncDB.UPLOAD_DIR(),'gid2source_id','gene_synonym.csv')
self.fn_description = os.path.join(SyncDB.UPLOAD_DIR(),'annotation','gene_description.csv')
self.inputs=['ds:paper',self.fn_symbol,self.fn_synonym, self.fn_description]
self.fn_trrust_term = os.path.join(SyncDB.DOWNLOAD_DIR(), 'trrust_term.csv')
self.fn_trrust_term_pair = os.path.join(SyncDB.DOWNLOAD_DIR(), 'trrust_term_pair.csv')
self.fn_disgenet_term = os.path.join(SyncDB.DOWNLOAD_DIR(), 'disgenet_term.csv')
self.fn_disgenet_term_pair = os.path.join(SyncDB.DOWNLOAD_DIR(), 'disgenet_term_pair.csv')
self.fn_PaGenBase_term = os.path.join(SyncDB.DOWNLOAD_DIR(), 'PaGenBase_term.csv')
self.fn_PaGenBase_term_pair = os.path.join(SyncDB.DOWNLOAD_DIR(), 'PaGenBase_term_pair.csv')
def get_fn_dest(self):
return os.path.join(SyncDB.DOWNLOAD_DIR(),self.dest)
def populate_more(self,root):
XmlClass.populate_more(self,root)
self.outputs.extend([self.fn_hpo_ann,
self.fn_trrust_term,
self.fn_trrust_term_pair,
self.fn_disgenet_term,
self.fn_disgenet_term_pair,
self.fn_DisGeNET_ann,
self.fn_PaGenBase_term,
self.fn_PaGenBase_term_pair,
])
def do_update(self):
self.get_parse_PaGenBase()
self.parse_disgenet()
t_term_human, t_term_pair_human = self.parse_trrust(self.fn_trrust_rawdata_human, 9606, start_id=1)
t_term_mouse, t_term_pair_mouse = self.parse_trrust(self.fn_trrust_rawdata_mouse, 10090,start_id=len(t_term_human)+1)
t_term = pd.concat([t_term_human,t_term_mouse])
t_term_pair = pd.concat([t_term_pair_human,t_term_pair_mouse])
t_term.to_csv(self.fn_trrust_term, index=False)
t_term_pair.to_csv(self.fn_trrust_term_pair, index=False)
parent_child = self.parse_hp(self.s_file_obo)
# print(parent_child['HP:0000001'])
pheno_level = self.get_level(parent_child)
# print(pheno_level['HP:0012823'], pheno_level['HP:0000001'])
self.parse_gp(self.s_file_gene2hpo, pheno_level)
def get_parse_PaGenBase(self):
count_start = 0
S_term = []
S_pair = []
S_file = [
('hotisp.txt', 'Tissue-specific', 9606),
('hocesp.txt', 'Cell-specific', 9606),
('mutisp.txt', 'Tissue-specific', 10090),
('mucesp.txt', 'Cell-specific', 10090),
('ratisp.txt', 'Tissue-specific', 10116),
('drtisp.txt', 'Tissue-specific', 7227)
]
for fn in S_file:
fn = fn[0]
urllib.urlretrieve('http://bioinf.xmu.edu.cn/PaGenBase/browse/{0}'.format(fn),
os.path.join(SyncDB.DOWNLOAD_DIR(), fn))
for (s_file, s_ann, tax_id) in S_file:
s_file = os.path.join(SyncDB.DOWNLOAD_DIR(), s_file)
t_term, t_pair, count_start = self.parse_PaGenBase(s_file, s_ann, tax_id, count_start)
S_term.append(t_term)
S_pair.append(t_pair)
t_term = pd.concat(S_term, ignore_index=True)
t_pair = pd.concat(S_pair, ignore_index=True)
t_term.to_csv(self.fn_PaGenBase_term, index=False)
t_pair.to_csv(self.fn_PaGenBase_term_pair, index=False)
pass
def parse_PaGenBase(self, s_file, s_ann, tax_id, count_start=0):
t = pd.read_table(s_file, skiprows=7)
t.rename2({'Gene Symbol': 'Symbol'})
t = t[['Symbol', 'Sample']].copy()
S_term = util.unique(t.Sample)
data = []
c_id = {}
for x in S_term:
count_start += 1
term_id = 'PGB:%05d' % count_start
term_name = s_ann + ': ' + x
data.append({'term_id': term_id, 'term_name': term_name, 'description': term_name})
c_id[x] = term_id
t_term = pd.DataFrame(data)
t_pair = t[['Symbol', 'Sample']].copy()
t_pair.rename2({'Sample': 'term_name'})
t_pair['term_id'] = t_pair.term_name.apply(lambda x: c_id[x])
t_pair['term_name'] = t_pair.term_name.apply(lambda x: s_ann + ': ' + x)
t_pair['tax_id'] = tax_id
t_pair['type_name'] = 'PaGenBase'
t_pair.drop_duplicates(['term_id', 'Symbol'], inplace=True)
#convert symbol to gid
dt = pd.read_csv(self.fn_symbol)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id = dict(zip(dt.source_id, dt.gid.astype(str)))
dt = pd.read_csv(self.fn_synonym)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id.update(dict(zip(dt.source_id, dt.gid.astype(str))))
t_pair['gid'] = t['Symbol'].apply(lambda x: symbol2gene_id.get(x, ''))
t_pair = t_pair[t_pair.gid != ''].copy()
return (t_term, t_pair, count_start)
def parse_trrust(self, s_file, tax_id, start_id):
dt = pd.read_csv(self.fn_symbol)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id = dict(zip(dt.source_id, dt.gid.astype(str)))
dt = pd.read_csv(self.fn_synonym)
dt = dt[dt['tax_id']==tax_id]
symbol2gene_id.update(dict(zip(dt.source_id, dt.gid.astype(str))))
dt = pd.read_csv(self.fn_description)
dt = dt[dt['tax_id']==tax_id]
gene_id2description = dict(zip(dt.gid.astype(str), dt.content))
t = pd.read_table(s_file, header=None, names=['TF', 'Target', 'Drection', 'PMID'])
t['gid_TF'] = t['TF'].apply(lambda x: symbol2gene_id.get(x, ''))
t['gid_Target'] = t['Target'].apply(lambda x: symbol2gene_id.get(x, ''))
t = t[(t.gid_TF != '') & (t.gid_Target != '')].copy()
t['term_name'] = t['TF'].apply(lambda x: 'Regulated by: ' + x)
util.rename2(t,{'gid_Target':'gid'})
all_tf = {}
term_ids = []
for i, row in t.iterrows():
tf = row['TF']
if tf not in all_tf:
all_tf[tf] = 'TRR{0:05d}'.format(start_id+len(all_tf))
term_ids.append(all_tf[tf])
t['term_id'] = term_ids
t['tax_id'] = str(tax_id)
t['type_name'] = 'TRRUST'
t_term_pair = t[['gid','tax_id','term_id','term_name','type_name']]
t = t[['gid_TF','term_id','term_name']]
t = t.drop_duplicates()
t['description'] = [x['term_name']
+ '; '
+ gene_id2description.get(x['gid_TF'],'')[:60]
for i, x in t[['gid_TF','term_name']].iterrows()]
t_term = t[['term_id','term_name','description']]
return (t_term,t_term_pair)
def parse_disgenet(self):
t = | pd.read_table(self.fn_DisGeNET_source) | pandas.read_table |
#------------------------------------------------------------------------------
NROWS_TRAIN=184903891 #dimmension of the train set
NCHUNK_TRAIN=75000000 #length of chunk of data used for training, from total train set
MAX_TRAIN=75000000 #max length of train data (substract from NROWS_TRAIN to get the start position for training set)
NROWS_VALIDATION=2500000 #size of the validation set
ENV_RUN='local' #environment where the kernel is run
PRESET_D = 2 ** 26
PRESET_DM = 3000000000
if ENV_RUN=='local':
inpath = '../input/'
suffix = ''
outpath = ''
savepath = ''
elif ENV_RUN=='aws':
inpath = '../input/'
suffix = '.zip'
outpath = '../sub/'
savepath = '../data/'
#------------------------------------------------------------------------------
import pandas as pd
import time
import numpy as np
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import gc
import matplotlib.pyplot as plt
import os
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def show_max_clean(df,gp,agg_name,agg_type,show_max):
#------------------------------------------------------------------------------
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
#------------------------------------------------------------------------------
def perform_count( df, group_cols, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Aggregating by ", group_cols , '...' )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_countuniq( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Counting unique ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_cumcount( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Cumulative count by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].cumcount()
df[agg_name]=gp.values
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_mean( df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Calculating mean of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_var( df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Calculating variance of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
debug=0
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
#------------------------------------------------------------------------------
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=20, num_boost_round=3000, verbose_eval=10, categorical_features=None):
#------------------------------------------------------------------------------
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric':metrics,
'learning_rate': 0.2,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.6, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 4,
'verbose': 0,
'metric':metrics
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values, label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values, label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10,
feval=feval)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics+":", evals_results['valid'][metrics][bst1.best_iteration-1])
return (bst1,bst1.best_iteration)
#------------------------------------------------------------------------------
def perform_analysis(idx_from,idx_to,fileno):
#------------------------------------------------------------------------------
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
print('loading train data...',idx_from,idx_to)
train_df = pd.read_csv(inpath+"train.csv", parse_dates=['click_time'],
skiprows=range(1,idx_from), nrows=idx_to-idx_from, dtype=dtypes,
usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
print('loading test data...')
if debug:
test_df = pd.read_csv(inpath+"test.csv", nrows=100000,
parse_dates=['click_time'], dtype=dtypes,
usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
else:
test_df = pd.read_csv(inpath+"test.csv", parse_dates=['click_time'],
dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
len_train = len(train_df)
train_df=train_df.append(test_df)
del test_df
gc.collect()
print('Extracting new features...')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')
gc.collect()
train_df = perform_countuniq( train_df, ['ip'], 'channel', 'X0', 'uint8', show_max=True ); gc.collect()
train_df = perform_cumcount( train_df, ['ip', 'device', 'os'], 'app', 'X1', show_max=True ); gc.collect()
train_df = perform_countuniq( train_df, ['ip', 'day'], 'hour', 'X2', 'uint8', show_max=True ); gc.collect()
train_df = perform_countuniq( train_df, ['ip'], 'app', 'X3', 'uint8', show_max=True ); gc.collect()
train_df = perform_countuniq( train_df, ['ip', 'app'], 'os', 'X4', 'uint8', show_max=True ); gc.collect()
train_df = perform_countuniq( train_df, ['ip'], 'device', 'X5', 'uint16', show_max=True ); gc.collect()
train_df = perform_countuniq( train_df, ['app'], 'channel', 'X6', show_max=True ); gc.collect()
train_df = perform_cumcount( train_df, ['ip'], 'os', 'X7', show_max=True ); gc.collect()
train_df = perform_countuniq( train_df, ['ip', 'device', 'os'], 'app', 'X8', show_max=True ); gc.collect()
train_df = perform_count( train_df, ['ip', 'day', 'hour'], 'ip_tcount', show_max=True ); gc.collect()
train_df = perform_count( train_df, ['ip', 'app'], 'ip_app_count', show_max=True ); gc.collect()
train_df = perform_count( train_df, ['ip', 'app', 'os'], 'ip_app_os_count', 'uint16', show_max=True ); gc.collect()
train_df = perform_var( train_df, ['ip', 'day', 'channel'], 'hour', 'ip_tchan_count', show_max=True ); gc.collect()
train_df = perform_var( train_df, ['ip', 'app', 'os'], 'hour', 'ip_app_os_var', show_max=True ); gc.collect()
train_df = perform_var( train_df, ['ip', 'app', 'channel'], 'day', 'ip_app_channel_var_day', show_max=True ); gc.collect()
train_df = perform_mean( train_df, ['ip', 'app', 'channel'], 'hour', 'ip_app_channel_mean_hour', show_max=True ); gc.collect()
print('doing nextClick')
predictors=[]
new_feature = 'nextClick'
filename='nextClick_%d_%d.csv'%(idx_from,idx_to)
#if os.path.exists(filename):
if (0):
print('loading from save file')
QQ=pd.read_csv(filename).values
else:
D=PRESET_D
train_df['category'] = (train_df['ip'].astype(str) + "_" + train_df['app'].astype(str) + "_" + train_df['device'].astype(str) \
+ "_" + train_df['os'].astype(str)).apply(hash) % D
click_buffer= np.full(D, PRESET_DM, dtype=np.uint32)
train_df['epochtime']= train_df['click_time'].astype(np.int64) // 10 ** 9
next_clicks= []
for category, t in zip(reversed(train_df['category'].values), reversed(train_df['epochtime'].values)):
next_clicks.append(click_buffer[category]-t)
click_buffer[category]= t
del(click_buffer)
QQ= list(reversed(next_clicks))
if not debug:
print('saving')
pd.DataFrame(QQ).to_csv(filename,index=False)
train_df.drop(['epochtime','category','click_time'], axis=1, inplace=True)
#additional drop columns(lee importance)
train_df.drop(['day','ip_tchan_count','X7'],axis=1, inplace=True)
train_df[new_feature] = pd.Series(QQ).astype('float32')
predictors.append(new_feature)
train_df[new_feature+'_shift'] = train_df[new_feature].shift(+1).values
predictors.append(new_feature+'_shift')
del QQ
gc.collect()
print("vars and data type: ")
train_df.info()
train_df['ip_tcount'] = train_df['ip_tcount'].astype('uint16')
train_df['ip_app_count'] = train_df['ip_app_count'].astype('uint16')
train_df['ip_app_os_count'] = train_df['ip_app_os_count'].astype('uint16')
target = 'is_attributed'
# =============================================================================
# predictors.extend(['app','device','os', 'channel', 'hour', 'day',
# 'ip_tcount', 'ip_tchan_count', 'ip_app_count',
# 'ip_app_os_count', 'ip_app_os_var',
# 'ip_app_channel_var_day','ip_app_channel_mean_hour',
# 'X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8'])
# categorical = ['app', 'device', 'os', 'channel', 'hour', 'day']
#
# =============================================================================
predictors.extend(['app','device','os', 'channel', 'hour',
'ip_tcount', 'ip_app_count',
'ip_app_os_count', 'ip_app_os_var',
'ip_app_channel_var_day','ip_app_channel_mean_hour',
'X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X8'])
categorical = ['app', 'device', 'os', 'channel', 'hour']
print('predictors',predictors)
test_df = train_df[len_train:]
val_df = train_df[(len_train-val_size):len_train]
train_df = train_df[:(len_train-val_size)]
print("train size: ", len(train_df))
print("valid size: ", len(val_df))
print("test size : ", len(test_df))
sub = | pd.DataFrame() | pandas.DataFrame |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import pandas as pd
from ... import opcodes as OperandDef
from ...core import Base, Entity
from ...serialize import KeyField, AnyField, StringField, DataTypeField, \
BoolField, Int32Field
from ...tensor.core import TENSOR_TYPE
from ...tensor.datasource import empty, tensor as astensor, \
from_series as tensor_from_series, from_dataframe as tensor_from_dataframe
from ...tensor.statistics.quantile import quantile as tensor_quantile
from ...tensor.utils import recursive_tile
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
from ..core import DATAFRAME_TYPE
from ..datasource.from_tensor import series_from_tensor, dataframe_from_tensor
from ..initializer import DataFrame as create_df
from ..utils import parse_index, build_empty_df, find_common_type, validate_axis
class DataFrameQuantile(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.QUANTILE
_input = KeyField('input')
_q = AnyField('q')
_axis = Int32Field('axis')
_numeric_only = BoolField('numeric_only')
_interpolation = StringField('interpolation')
_dtype = DataTypeField('dtype')
def __init__(self, q=None, interpolation=None, axis=None, numeric_only=None,
dtype=None, gpu=None, object_type=None, **kw):
super().__init__(_q=q, _interpolation=interpolation, _axis=axis,
_numeric_only=numeric_only, _dtype=dtype, _gpu=gpu,
_object_type=object_type, **kw)
@property
def input(self):
return self._input
@property
def q(self):
return self._q
@property
def interpolation(self):
return self._interpolation
@property
def axis(self):
return self._axis
@property
def numeric_only(self):
return self._numeric_only
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
if isinstance(self._q, TENSOR_TYPE):
self._q = self._inputs[-1]
def _calc_dtype_on_axis_1(self, a, dtypes):
quantile_dtypes = []
for name in dtypes.index:
dt = tensor_quantile(tensor_from_series(a[name]), self._q,
interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype
quantile_dtypes.append(dt)
return find_common_type(quantile_dtypes)
def _call_dataframe(self, a, inputs):
if self._numeric_only:
empty_df = build_empty_df(a.dtypes)
dtypes = empty_df._get_numeric_data().dtypes
else:
dtypes = a.dtypes
if isinstance(self._q, TENSOR_TYPE):
q_val = self._q
pd_index = pd.Index([], dtype=q_val.dtype)
name = None
store_index_value = False
else:
q_val = np.asanyarray(self._q)
pd_index = pd.Index(q_val)
name = self._q if q_val.size == 1 else None
store_index_value = True
tokenize_objects = (a, q_val, self._interpolation, type(self).__name__)
if q_val.ndim == 0 and self._axis == 0:
self._object_type = ObjectType.series
index_value = parse_index(dtypes.index, store_data=store_index_value)
shape = (len(dtypes),)
# calc dtype
dtype = self._calc_dtype_on_axis_1(a, dtypes)
return self.new_series(inputs, shape=shape, dtype=dtype,
index_value=index_value, name=name or dtypes.index.name)
elif q_val.ndim == 0 and self._axis == 1:
self._object_type = ObjectType.series
index_value = a.index_value
shape = (len(a),)
# calc dtype
dt = tensor_quantile(empty(a.shape[1], dtype=find_common_type(dtypes)),
self._q, interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype
return self.new_series(inputs, shape=shape, dtype=dt,
index_value=index_value, name=name or index_value.name)
elif q_val.ndim == 1 and self._axis == 0:
self._object_type = ObjectType.dataframe
shape = (len(q_val), len(dtypes))
index_value = parse_index(pd_index, *tokenize_objects, store_data=store_index_value)
dtype_list = []
for name in dtypes.index:
dtype_list.append(
tensor_quantile(tensor_from_series(a[name]), self._q,
interpolation=self._interpolation,
handle_non_numeric=not self._numeric_only).dtype)
dtypes = pd.Series(dtype_list, index=dtypes.index)
return self.new_dataframe(inputs, shape=shape, dtypes=dtypes,
index_value=index_value,
columns_value=parse_index(dtypes.index, store_data=True))
else:
assert q_val.ndim == 1 and self._axis == 1
self._object_type = ObjectType.dataframe
shape = (len(q_val), a.shape[0])
index_value = parse_index(pd_index, *tokenize_objects, store_data=store_index_value)
pd_columns = a.index_value.to_pandas()
dtype_list = np.full(len(pd_columns), self._calc_dtype_on_axis_1(a, dtypes))
dtypes = pd.Series(dtype_list, index=pd_columns)
return self.new_dataframe(inputs, shape=shape,
dtypes=dtypes,
index_value=index_value,
columns_value=parse_index(dtypes.index, store_data=True,
key=a.index_value.key))
def _call_series(self, a, inputs):
if isinstance(self._q, TENSOR_TYPE):
q_val = self._q
index_val = | pd.Index([], dtype=q_val.dtype) | pandas.Index |
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import os
from os.path import expanduser
home = expanduser("~")
from sklearn.externals import joblib
import numpy as np
import sys
from bs4 import BeautifulSoup
import sys
import argparse
import re
import os.path
from os import makedirs
from pprint import pprint
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from numpy import log
from scipy.special import rel_entr
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import os
from os.path import expanduser
home = expanduser("~")
from sklearn.externals import joblib
import numpy as np
import sys
sys.path.append(os.path.join(home, "code/research_code/Spring_2018/TextModules/"))
from TextProcessor import TextProcessor
# SemEval Pre-processing scripts
def preprocess_semeval(dataset='english_restaurants', subtask='sentence_level'):
import semeval_help_functions
print("\n\n\n\t\t\tDOMAIN={}".format(dataset))
print('Collecting training data...')
savefolder = datafolder + "/semeval/preprocessed/"
method = 'train'
language = dataset.split('_')[0]
embeddings_dimension = 300
pretrained_word_embedding_matrix = os.path.join(datafolder, 'semeval/embeddings/word2emb_{}.pkl'.format(dataset))
tp = TextProcessor(word_embeddings='custom', embedding_dim=embeddings_dimension,
limit=500000, stemming=False, tokenization_method='ruder',
tokenizer_language=language,
pretrained_word_embedding_matrix_path=pretrained_word_embedding_matrix)
vocab_size = len(tp.vocab_emb)
word2id = tp.word2ind_emb
id2word = {word2id[w]: w for w in word2id}
joblib.dump(word2id, savefolder + "{}_word2id.pkl".format(dataset))
# Save word embedding matrix
print('Collecting word embeddings...')
word2emb = tp.word2emb
word_emb_matrix = np.array([word2emb[id2word[i]] for i in range(len(word2id))])
joblib.dump(word_emb_matrix, savefolder + '{}_word2emb.pkl'.format(dataset))
review_df, sentence_df, opinion_df = semeval_help_functions.get_reviews_df(dataset, subtask, method)
aspects = sorted(set(opinion_df['category']))
aspect2id = {a: i for i, a in enumerate(aspects)}
joblib.dump(aspects, savefolder + "{}_aspect_names.pkl".format(dataset))
sentence_df['categories'] = sentence_df['id'].map(
lambda x: opinion_df[opinion_df['sentence_id'] == x]['category'].tolist())
sentence_df['label'] = sentence_df['categories'].map(lambda x: [aspect2id[a] for a in x])
sentence_df['word_ids'] = sentence_df['text'].map(lambda x: tp.get_word_indices(x))
sentence_df = sentence_df[sentence_df['numofopinions'] > 0]
joblib.dump(sentence_df, savefolder + "{}_TRAIN.pkl".format(dataset))
print("Collecting DEV data...")
test_review_df, test_sentence_df, test_opinion_df = semeval_help_functions.get_reviews_df(dataset, subtask, 'dev')
test_sentence_df['categories'] = test_sentence_df['id'].map(
lambda x: test_opinion_df[test_opinion_df['sentence_id'] == x]['category'].tolist())
test_sentence_df['label'] = test_sentence_df['categories'].map(
lambda x: [aspect2id[a] if a in aspect2id else -1 for a in x])
test_sentence_df['word_ids'] = test_sentence_df['text'].map(lambda x: tp.get_word_indices(x))
test_sentence_df = test_sentence_df[test_sentence_df['numofopinions'] > 0]
joblib.dump(test_sentence_df, savefolder + "{}_DEV.pkl".format(dataset))
print("Collecting TEST data...")
test_review_df, test_sentence_df, test_opinion_df = semeval_help_functions.get_reviews_df(dataset, subtask, 'test')
test_sentence_df['categories'] = test_sentence_df['id'].map(
lambda x: test_opinion_df[test_opinion_df['sentence_id'] == x]['category'].tolist())
test_sentence_df['label'] = test_sentence_df['categories'].map(
lambda x: [aspect2id[a] if a in aspect2id else -1 for a in x])
test_sentence_df['word_ids'] = test_sentence_df['text'].map(lambda x: tp.get_word_indices(x))
test_sentence_df = test_sentence_df[test_sentence_df['numofopinions'] > 0]
joblib.dump(test_sentence_df, savefolder + "{}_TEST.pkl".format(dataset))
return
def find_seed_words(dataset='english_restaurants', subtask='sentence_level', method='train', remove_stopwords=True):
import semeval_help_functions
datafolder += '/semeval/'
semeval_help_functions.find_seed_words(dataset=dataset, method=method, subtask=subtask,
savefolder=datafolder + 'seed_words/', remove_stopwords=remove_stopwords)
def preprocess_script():
all_datasets = ['english_restaurants', 'spanish_restaurants', 'french_restaurants', 'russian_restaurants',
'dutch_restaurants', 'turkish_restaurants', 'arabic_hotels', 'english_laptops']
for dataset in all_datasets:
preprocess_semeval(dataset)
find_seed_words(dataset)
def read_semeval_file(fpath):
# Used for reading SemEval 2016 Task 5 Dataset (ABSA)
xml_data = open(fpath).read() # Loading the raw XML data
soup = BeautifulSoup(xml_data, "lxml")
reviews = soup.find_all('review')
return reviews
def xml2dict_sentence_level(xml_reviews):
# Used for reading SemEval 2016 Task 5 Dataset (ABSA)
restaurant_reviews = []
for r in xml_reviews:
review_dict = {}
review_dict['rid'] = r['rid']
# print(r['rid'])
review_dict['text'] = r.getText().strip().replace('\n\n\n\n\n\n', ' ')
sentences = r.find_all('sentences')
if len(sentences) > 1:
print('[WARNING] More than 1 sentences')
sentences = sentences[0]
review_dict['sentences'] = []
for sentence in sentences.find_all('sentence'):
sentence_dict = {}
sentence_dict['id'] = sentence['id']
sentence_dict['text'] = sentence.getText().strip()
opinions = sentence.find('opinions')
sentence_dict['opinions'] = []
if opinions is not None:
for opinion in opinions.find_all('opinion'):
opinion_dict = {}
opinion_dict['category'] = opinion['category']
opinion_dict['polarity'] = opinion['polarity']
try:
opinion_dict['from'] = int(opinion['from'])
opinion_dict['to'] = int(opinion['to'])
opinion_dict['target'] = opinion['target']
except:
pass
sentence_dict['opinions'].append(opinion_dict)
review_dict['sentences'].append(sentence_dict)
restaurant_reviews.append(review_dict)
return restaurant_reviews
def dict2df_sentence_level(review_list):
# Used for the analysis of SemEval 2016 Task 5 Dataset (ABSA)
review_df = pd.DataFrame()
opinions_df = pd.DataFrame()
sentence_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 21:53:13 2021
@author: Alex
"""
import os #sistema operativo
import pandas as pd #gestionar datframes
import numpy as np #numeric python (vectores, matrices,...)
import matplotlib.pyplot as plt #graficos
import scipy.stats as stats #Tests estadisticos
from pandas.api.types import CategoricalDtype
#Cambiar el directorio donde esta el dataset
os.chdir('C:/Programacion Estadistica PEP/ejercicio comparacion medias')
os.getcwd()
wbr = | pd.read_csv('USA_cars_datasets.csv', sep=',', decimal='.') | pandas.read_csv |
#!/usr/bin/env python
'''
This script compares the breakdown voltage measurements of the LSU MPPC-PCB from LSU, UTotyo and Hamamatsu.
'''
import argparse
from pandas.io import parsers
import data_interface
import os
import pandas as pd
import seaborn as sns
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_path', type=str, default='data/20210422_first_meeting')
args = parser.parse_args()
# load data files
utokyo_meas = data_interface.utokyo_data(f'{args.data_path}/Utokyo_PCB_Measurement_For crschk_ .xlsx')
lsu_meas = data_interface.lsu_data(f'{args.data_path}/mppc_summary_utokyo_pcb_lsu_measurements.csv')
# join into a single table
df_join = | pd.merge(left=utokyo_meas.df_data, right=lsu_meas.df_data, left_on='channel', right_on='channel') | pandas.merge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.